pax_global_header00006660000000000000000000000064143641141170014514gustar00rootroot0000000000000052 comment=73d0937e39ed01e011107b03e31e609429b3918a diff_cover-7.4.0/000077500000000000000000000000001436411411700136325ustar00rootroot00000000000000diff_cover-7.4.0/.coveragerc000066400000000000000000000000321436411411700157460ustar00rootroot00000000000000[run] source = diff_cover diff_cover-7.4.0/.flake8000066400000000000000000000005011436411411700150010ustar00rootroot00000000000000[flake8] max-line-length=100 select= C901, # flake8-mccabe E, # flake8-pycodestyle F, # flake8-pyflakes W, # flake8-pycodestyle ignore = W503,E203, # conflict with black formatter per-file-ignores = # supression for __init__ diff_cover/tests/*: E501 diff_cover/tests/fixtures/*: E,F,Wdiff_cover-7.4.0/.git-blame-ignore-revs000066400000000000000000000001071436411411700177300ustar00rootroot00000000000000# Migrate code style to Black e1db83a447700237a96be7a9db4413fe5024d328 diff_cover-7.4.0/.github/000077500000000000000000000000001436411411700151725ustar00rootroot00000000000000diff_cover-7.4.0/.github/dependabot.yml000066400000000000000000000003141436411411700200200ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: "pip" directory: "/" schedule: interval: "weekly" - package-ecosystem: "github-actions" directory: "/" schedule: interval: "weekly"diff_cover-7.4.0/.github/workflows/000077500000000000000000000000001436411411700172275ustar00rootroot00000000000000diff_cover-7.4.0/.github/workflows/codeql-analysis.yml000066400000000000000000000040221436411411700230400ustar00rootroot00000000000000# For most projects, this workflow file will not need changing; you simply need # to commit it to your repository. # # You may wish to alter this file to override the set of languages analyzed, # or to provide custom queries or build logic. # # ******** NOTE ******** # We have attempted to detect the languages in your repository. Please check # the `language` matrix defined below to confirm you have the correct set of # supported CodeQL languages. # name: "CodeQL" on: push: branches: [ main ] pull_request: # The branches below must be a subset of the branches above branches: [ main ] schedule: - cron: '18 10 * * 3' jobs: analyze: name: Analyze runs-on: ubuntu-latest permissions: actions: read contents: read security-events: write steps: - name: Checkout repository uses: actions/checkout@v3 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@v2 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. # By default, queries listed here will override any specified in a config file. # Prefix the list here with "+" to use these queries and those in the config file. # queries: ./path/to/local/query, your-org/your-repo/queries@main # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild uses: github/codeql-action/autobuild@v2 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines # and modify them (or add more) to build your code if your project # uses a compiled language #- run: | # make bootstrap # make release - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v2 diff_cover-7.4.0/.github/workflows/verify.yaml000066400000000000000000000025441436411411700214240ustar00rootroot00000000000000name: PR Job on: [push, pull_request, workflow_dispatch] jobs: verify: runs-on: ubuntu-latest strategy: matrix: python-version: [ "3.7", "3.8", "3.9", "3.10", "3.11" ] steps: - run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event." - run: echo "🔎 The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}." - name: Check out repository code uses: actions/checkout@v3 with: fetch-depth: 0 - run: echo "💡 The ${{ github.repository }} repository has been cloned to the runner." - uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Install poetry uses: snok/install-poetry@v1 with: virtualenvs-create: true virtualenvs-in-project: true - name: Load cached venv id: cached-poetry-dependencies uses: actions/cache@v3 with: path: .venv key: venv-${{ runner.os }}-${{ hashFiles('**/poetry.lock') }} - name: Install dependencies run: poetry install if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true' - name: Run The lints and tests run: poetry run bash -x verify.sh - run: echo "🍏 This job's status is ${{ job.status }}." diff_cover-7.4.0/.gitignore000066400000000000000000000006461436411411700156300ustar00rootroot00000000000000*.py[cod] .DS_Store .idea venv 27venv # C extensions *.so # Packages *.egg *.egg-info dist build eggs parts bin var sdist develop-eggs .installed.cfg lib lib64 __pycache__ # Installer logs pip-log.txt # Unit test / coverage reports .coverage .tox /coverage.xml /report.html # Translations *.mo # Mr Developer .mr.developer.cfg .project .pydevproject .python-version # VS Code .vscode/* *.code-workspaces .history/ diff_cover-7.4.0/CHANGELOG000066400000000000000000000476141436411411700150600ustar00rootroot0000000000000001/24/2023 v7.4.0 * Significant performance improvement when processing cobertura reports! Thanks @madest92 [PR 134](https://github.com/Bachmann1234/diff_cover/pull/314) 01/18/2023 v7.3.2 * Update dependencies in the lockfile, adds python 3.11 support. Triggered by [Issue 310](https://github.com/Bachmann1234/diff_cover/issues/310) reported by kloczek 01/18/2023 v7.3.1 * Fixes case where bundled tests fail in Solaris and other OSs that dont support deleting the directory you are currently residing in. [PR 309](https://github.com/Bachmann1234/diff_cover/pull/309) Reported by mtelka [Issue 308](https://github.com/Bachmann1234/diff_cover/issues/308) 12/09/2022 v7.3.0 * Plugins now Support '--options' and report files (Thanks @barrywhart) [PR 305](https://github.com/Bachmann1234/diff_cover/pull/305) 11/30/2022 v7.2.0 * Add for the ability for diff-quality report generator plugins to process modified files as a batch (Thanks @barrywhart) [PR 300](https://github.com/Bachmann1234/diff_cover/pull/300) 11/29/2022 v7.1.2 * Remove some annoying prints (thanks @peterg79) [PR 302](https://github.com/Bachmann1234/diff_cover/pull/302) 11/28/2022 v7.1.1 * Handle branch coverage in lcov reports (thanks @peterg79) [PR 301](https://github.com/Bachmann1234/diff_cover/pull/301) 11/26/2022 v7.1.0 * Native support for lcov coverage reports (thanks @peterg79) [PR 297](https://github.com/Bachmann1234/diff_cover/pull/297) 11/13/2022 v7.0.2 * Ensures multiple options get passed to cpp check properly. Thanks @ptzz [Issue 293](https://github.com/Bachmann1234/diff_cover/issues/293) 9/19/2022 v7.0.1 * Updates dependencies and updates pylint driver to support 2.5 in a backwards compatible way * Fixes the broken tests reported in https://github.com/Bachmann1234/diff_cover/issues/290 Thanks @kloczek for the report 9/19/2022 v7.0.0 * Fixes issue where report name would not respoect diff notation https://github.com/Bachmann1234/diff_cover/issues/288 Thanks @prescod for the report. * Drops Python 3.6 (This is why I bumped the major) 6/22/2022 v6.5.1 * Updates lockfile and tests to work with new pygment. Resolves [#281](https://github.com/Bachmann1234/diff_cover/issues/281) PR by me however, this was first identified and worked on by @AdamWill in [PR-280](https://github.com/Bachmann1234/diff_cover/pull/280). I absentmindly stomped over that PR when I saw [Issue 281](https://github.com/Bachmann1234/diff_cover/issues/281) come in. 4/15/2022 v6.5.0 * Fixes issues when pylint identifies a multi line issue. Issue [#276](https://github.com/Bachmann1234/diff_cover/issues/276) Thanks @kozlovsky 3/24/2022 v6.4.5 * Updates deps to allow newer Tomli versions Thanks @kasium (PR by me cuse I had to debug it some. He pointed out the original issue.) * Fix to the tests to support a new version of pygments * Fixed a test that was not asserting properly (thanks @kalekundert https://github.com/Bachmann1234/diff_cover/pull/264) 12/5/2021 v6.4.4 * Fixes issue where fail over was not being properly set by default. Thanks @kasium https://github.com/Bachmann1234/diff_cover/pull/262 12/2/2021 v6.4.3 * Replaces jinja-pluralize with native jinja's i18n [Issue 259](https://github.com/Bachmann1234/diff_cover/issues/259) * Some dev dep updates 10/6/2021 v6.4.2 * Announces python 3.10 support. No Code changes required. Mostly so pypi has the right metadata 9/26/2021 v6.4.1 * Re add tests to the *source* distribution. Should not pollute namespace this time. Thanks @dvzrv ! * Only require setuptools if you are using python < 3.8 9/16/2021 v6.4.0 * Allows passing a root path in for ESLint with --report_root_path * Bump pluggy to 1.0 Thanks @kasium for both of these! 8/29/2021 v6.3.5 * Including the tests dir pollutes the namespace so for now they are removed again 8/24/2021 v6.3.4 * Ensures tests are in the dist files 8/19/2021 v6.3.3 * Fix issue where files that start with a space will trigger a crash. * Fix issue where source dist had a setup.py that would not build properly 8/19/2021 v6.3.2 * Fix issue where untracked files that start with space would trigger a crash. (Thanks @kasium) Issue #242 8/15/2021 v6.3.1 * Fix issue with pylint and windows based file paths (thanks @kasium) Issue #237 8/11/2021 v6.3.0 * Add support for TOML based file config thanks @kasium! 8/08/2021 v6.2.2 * Replace setup.py with a pyproject.toml based packaging project managed by poetry. No user facing changes are expected 7/30/2021 v6.2.1 * Some minor code refactorings and and attempting to set the long description in pypi. Thanks @kasium! 7/13/2021 v6.2.0 * Adds JSON and Markdown reports for diff-quality Thanks @kasium! 7/09/2021 v6.1.1 * Undoes a change making a implemented method abstract. Turned out existing plugins depended on this. 7/08/2021 v6.1.0 * New flag `--include-untracked` to give people the option to run against files that are not being tracked by git currently Thanks @kasium * Improved handling when the user provides a report file that does not exist (the app will fail instead of just saying there were no results) Thanks @kasium * More progress to moving tests to pytest style tests Thanks @kasium * More static analysis clean up. Thanks @kasium again! Another all @kasium release! 7/04/2021 v6.0.0 * As github, gitlab, and git itself have been changing their default branch to main this release changes the default 'compare-branch' to 'main' as well. If your project is using 'master' as a default branch add the following to your build `--compare-branch origin/master` 7/03/2021 v5.5.0 * Migrate more stuff to pytest * Multiple reports can now be generated at once Thanks again to @kasium for both of these 6/24/2021 v5.4.0 * Some tweaks to the readme * Changes to how flake8 is parsed allowing diff_quality to support error codes that dont conform to the suggested 3 letter 3 digit form * Adds option to show snippets in the console report to help provide context for violations * Some tests migrated from unit test's class based approach to pytests functional approach All of these are due to @kasium Thanks for all you have done to this point! 6/22/2021 v5.3.0 * Adds 'num_changed_lines' to json report. Thanks @kasium https://github.com/Bachmann1234/diff_cover/issues/170 6/18/2021 v5.2.0 * Adds 'quiet mode' which will only print errors and failures. Flag -q thanks @kasium https://github.com/Bachmann1234/diff_cover/issues/160 * Several internal improvement to tools and configs. Thanks @kasium ! 5/29/2021 v5.1.2 * Fixes a bug where clover reports were not counting conditionals (Thanks for ZSmallX providing a detailed report that made fixing this easy) (https://github.com/Bachmann1234/diff_cover/issues/190) * Switch to github actions for CI as I ran out of free credits for travis and this job does not pay * Drop 'official' support for pypy3. I don't really have any reason to think pypy wont work for this project and I wanted to simplify my ci builds. 5/11/2021 v5.1.1 * Bug fix to in include's functionality around absolute vs reletive paths (Thanks @Kasium https://github.com/Bachmann1234/diff_cover/pull/187) * Some pylint fixes (Thanks @Kasium https://github.com/Bachmann1234/diff_cover/pull/186) 5/09/2021 v5.1.0 * Add the --includes option for diff-quality (Thanks @kasium https://github.com/Bachmann1234/diff_cover/pull/184) 3/10/2021 v5.0.1 * Removing universal wheels as we do not support python 2 (thanks @nicoddemus) * relaxing the requirement for chardet to 3.0 3/08/2021 v5.0.0 * Bumping a major version as I am dropping python 3.5 support as it has been EOL for some time * As a consolation I am adding 3.9 to the build. The project ran on 3.9 just fine (I develop on it..) but I had not declared support 3/03/2021 v4.2.3 * I pinned pygments last release and that was a bit of lazieness. This releases unpins it again 3/01/2021 v4.2.2 * We still support python 3.5 (though we should probably drop it soon). A transitive dep dropped support for 3.5. this version pins inflect * Pins pygments cuse updates keep breaking my overly fragile html tests" 1/30/2021 v4.2.1 * The fix from 4.2.0 was incorrect 1/20/2021 v4.2.0 * Attempts to sniff the encoding of src files before reading them in. I am resisting adding an argument to have the user specify them until I am absolutely certain its necessary 1/9/2021 v4.1.1 * Adds the cpp test fixtures to the manifest. No functionality changes just ensuring the packaged tests can run 1/7/2021 v4.1.0 * Adds markdown report rendering support (thanks @f18m !) 9/22/2020 v4.0.1 * No changes to logic. I goofed up the setup.py and 2.7 users are downloading diff cover 4.0 when that wont work (thanks jaraco for the fix!) 8/31/2020 v4.0.0 * Drops python 2.7 support * Adds option to ignore whitespace in a diff 6/4/2020 v3.0.1 * Updates a test around the json report that was flaking. This gets a release as some people run the tests from the packaged code 6/2/2020 v3.0.0 * Add Json report Thanks @ThePletch! * Provide different errors for environment issues and tools not being installed Thanks @ziafazal! * Update pyflakes regex to handle new format (I think it should still work for the older version but im bumping a major version to highlight a potentially breaking change) 4/7/2020 v2.6.1 * cElementTree is going to be removed in python3.9. Added some logic to handle this. Thanks @Adamwill 2/8/2020 v2.6.0 * Remove context lines from git diff to avoid false positives. Thans @macho ! 1/11/2020 v2.5.2 * Droping my CD so I can sign packages again 1/11/2020 v2.5.1 * Just a point release to get back into the habit of signing them 12/11/2019 v2.5.0 * Diff quality now has a plugin system! From now own this project will avoid adding new tools and instead direct those to become plugins instead. Perhaps we can look into moving existing tools into separate plugins over time however, currently im leaving it alone. Thank you @barrywhart (https://github.com/Bachmann1234/diff_cover/pull/128) 11/20/2019 v2.4.1 * Performance improvement identifying matching source path by only computing the main src_path the one time rather than N times Thanks @gaellalire (https://github.com/Bachmann1234/diff_cover/pull/124) 10/17/2019 v2.4.0 * Drop support for python 3.4 and upgrade syntax with pyupgrade. Thanks @hugovk * Add support for cppcheck. Thanks @noahp 6/30/2019 v2.3.0 * Add PMD xml driver Thanks @bit-ranger (https://github.com/Bachmann1234/diff_cover/pull/117) 6/13/2019 v2.2.0 * Add --version flag to both commands. Shocked we never had this 6/3/2019 v2.1.0 * New option that controls how the patch is obtained: `--diff-range-notation`, defaulting to `...`. Traditionally in git-cover the symmetric difference (three-dot, "A...M") notation has been used: it includes commits reachable from A and M from their merge-base, but not both, taking history in account. This includes cherry-picks between A and M, which are harmless and do not produce changes, but might give inaccurate coverage false-negatives. Two-dot range notation ("A..M") compares the tips of both trees and produces a diff. This more accurately describes the actual patch that will be applied by merging A into M, even if commits have been cherry-picked between branches. This will produce a more accurate diff for coverage comparison when complex merges and cherry-picks are involved. Thanks @nicoddemus! * Fix for windows users! We were not properly normalizing paths causing source lines not to line up properly. Thanks @kingchad1989 5/10/2019 v2.0.1 * Ensure case is normalized on Windows when comparing paths from XML coverage report Thanks @nicoddemus! 4/08/2019 v2.0.0 * Split Diff-cover and diff-quality's entry point scripts to allow them to be executed using pythons "-m" syntax 3/16/2019 v1.0.7 * Fix bug where git's noprefix option would break diffcover. Thanks @beaugunderson 12/17/2018 v1.0.6 * Fix bug where no source information would cause a crash https://github.com/Bachmann1234/diff-cover/issues/88 thanks @hjqgloria for the report 10/26/2018 v1.0.5 * Add support for jacoco xml * support multiple xml formats (any combination of cobertura, clover, jacoco) at the same time 7/10/2018 v1.0.4 * Fix issue where whitespace was not trimmed pulling source file from cobertura causing a missed match 6/24/2018 v1.0.3 * Support parsable output allowing emacs to use next-error and previous-error (Thanks @Glyph) * Fix Tests that were breaking due to some bad versions of pycodestyle (Thanks @Glyph) 12/20/2017 v1.0.2 * Fix bug where clover uses absolute path in its xml report * Fix pylint 1.8.0 compatibility: configuration file path printed to stderr 12/08/2017 v1.0.1 * Add Support for clover, findbugs and checkstyle 11/14/2017 v1.0.0 * Drop support for python 3.3 and 2.6 * Add Support for pypy3 and python 3.6 * Replaced pep8 with pycodestyle as the project has deprecated pep8 * added diff-cover level exclude files https://github.com/Bachmann1234/diff-cover/pull/66 Thanks @agroszer 7/23/2017 v0.9.12 * Add pydocstyle support PR#64 thanks @mr-c 3/7/2017 v0.9.11 * Allow ignoring of staged files. Thanks @abmaonline! 2/25/2017 v0.9.10 * Deflate inflated style percentages by including files that were not supported by the linters. Thanks @abmaoline 8/12/2016 v0.9.9 * Fix bug where only one file was being checked in diff-quality (Issue 47) Thanks @fperrin * Fix bug where diff-quality would attempt to read deleted files (Issue 48) Thanks @fperrin 7/27/2016 v0.9.8 * Fixes to ESLINT to work with 2.13 THanks @bjacobel (PR 46) 3/29/2016 v0.9.7 * Move logging config to main method rather than __main__. Thanks @pkaleta (PR 45) 2/21/2016 v0.9.6 * Now check for tool existence by trying to run the tool rather than trying to import it. Thanks for the idea @agroszer (issue 44) 2/12/2016 v0.9.5 * Fix git-diff with `mnemonicprefix` option Thanks @blueyed 2/10/2016 v0.9.4 * Move logging configuration to main for better integrations with people importing the tool. Thanks @agroszer for reporting the issue 2/9/2016 v0.9.3 * Make it so you can run help without being in a git repository thanks @dashea! 2/8/2016 v0.9.2 * CSS template was not being installed properly 2/8/2016 v0.9.1 * Misc bugfixes. Thanks @davidkostyszak and @mgedmin 2/7/2016 v0.9.0 * Add support for specifying an external css file (thanks mgedmin) * Add ESLint driver (thanks agroszer) * Significant reworking of diff-quality to simplify the addition of drivers and potentially allow a plugin system down the line. Something I would like to finish at some point but honestly I am just not seeing the time opening up in the short term. If anyone reading this wants to give it a go +1 to that. Special thanks to Ben Patterson for taking some time to add some QA resources to this release 12/17/2015 v0.8.6 * Fix for windows users trying to run app (thanks @evanunderscore) * Fix handing source paths that contain spaces (thanks @backupproject for pointing out the bug) 12/01/2015 v0.8.5 * Drop lazy to fully support python 3.5 (Thanks @dashea for pointing out the issue) 11/24/2015 v0.8.4 * fix diff-cover when color.ui=always is set on git thanks @Dundee 10/22/2015 v0.8.3 * Fix diff-quality when installed under a folder containing 'diff-cover' (Thanks @faulkner) 9/16/2015 v0.8.2 * Fix handling of windows paths in cobertura files. Thanks to @evanunderscore for catching and reviewing this bug catching several flaws in my attempts to deal with this :-D 9/12/2015 v0.8.1 * Fix the main method to still execute in windows. Thanks to @evanunderscore for catching this bug 6/1/2015 v0.8.0 * Add JSHint to diff-quality. Thanks @benpatterson 5/7/2015 v0.7.6.1 * Fix readme syntax so diff-cover looks prettier on pypy. Thanks @mblayman 4/1/2015 v0.7.6 * Add support for some additional Flake8 lugins thanks @dreamwalker 3/7/2015 v0.7.5 * Ensure all sources are captured when dealing with non python cobertura files. Thanks @jfairley * Suppress error output on tests when that output is expected * Adds posargs to allow you to configure test runs more easily. Thanks @nedbat 2/4/2015 v0.7.4 * Fixed bug in pylint support around duplciate code issues * Added ability to ignore unstaged changes Thanks DanCardin * License change! We are now Apache 2.0 * PyPy Support now verified in tests * Fix for python 2.6 support 12/11/2014 v0.7.3 * Added pylint support! * Fixed bug around python 1.6 and pyflakes * Update Pygments allowing us to remove code we grabbed from the development branch of the project used to support python3 correctly 11/6/2014 v0.7.2 * bugfix around parsing of options 10/21/2014 v0.7.1 * Generating HTML report still allows STDOUT to print the regular report 10/17/2014 v0.7.0 * Incorporate sources tag in corbetura reports improving diff-cover's behavior in non python codebases 10/9/2014 v0.6.2 * Added log line for when diff-cover fails due to coverage being lower than the fail-under condition 10/1/2014 v0.6.1 * Fixed issue where error message was not being returned after diff-quality errors * Fixed issue where diff-quality would fail due to warnings. 7/22/2014 v0.6.0 * Add --fail-under option 7/16/2014 v0.5.7 * Remove lxml requirement speeding up builds by roughly 50% 7/13/2014 v0.5.6 * Fix bug where logging was being called without being configured 7/12/2014 v0.5.5 * Diff quality no longer requires every potential quality tool to be installed 7/5/2014 v0.5.4 * Fix bug when writing report to stdout. Ensure that all file like objects that get passed into the report generator take bytes * Run diff cover over the code in each language 7/4/2014 v0.5.3 * Pluralize report output * Bugfixes around git_path and unicode 6/28/2014 v0.5.2 * Add snippets to quality html reports 6/27/2014 v0.5.1 * Fix snippets relative path 6/24/2014 v0.5.0 * Add support for Pyflakes 6/13/2014 v0.4.3 * Remove dependency on iPython 6/11/2014 v0.4.2 * Write report to stdout even with --html-report 5/21/2014 v0.4.1 * Unicode error fixes * Pass command line options to quality tool * Combine adjacent lines in reports 4/30/2014 v0.4.0 * Support Python 3.4 * Support Pylint >= 1 4/19/2014 v0.3.0 * Fix relative paths * Specify compare branch in a command line arg * If we get an OSError on calling subprocess, display the failing command 12/24/2013 v0.2.9 * Fixed another unicode bug in diff-quality for pre-generated reports. 12/17/2013 v0.2.8 * Fix a unicode bug in diff-quality 11/16/2013 v0.2.7 * Update requirements to use Jinja 2.7.1 * Remove sources list at top of report 10/9/2013 v0.2.6 * Fix a bug in which unicode in source files would cause an exception. 10/2/2013 v0.2.5 * Added option to consume pylint/pep8 reports instead of calling the tool directly. 9/23/2013 v0.2.4 * Handle symbolic names in pylint output. 9/4/2013 v0.2.3 * Fixed bug in parsing unicode filenames from git diff output * Coverage report now includes snippets from the original source file 9/1/13 v0.2.2 * Fixed a bug that would cause multiple coverage reports to always show 100% coverage. * Fixed bug when running diff cover during a merge conflict (Issue #41) * Added --no-ext to git diff command * Added Python 2.6 support 8/9/13 v0.2.1 * Fix a bug in which function names and TODO: were not appearing in `diff-quality` reports. 7/18/13 v0.2.0 * Add support for code quality metrics (pep8 and pylint for now). Invoked via call to `diff-quality` * Add support for multiple coverage XML inputs. * Refactored diff report generator to use Jinja2 templates 6/28/13 v0.1.4 * Fix bug in which adding then deleting lines would result in incorrect lines included in the diff report. 6/25/13 v0.1.3 * Fix a bug in which plus signs in the hunk line would cause a parser error 6/17/13 v0.1.2 * Fix bug in which lines around a change were included in the report 6/10/13 v0.1.1 * Compare with origin/master instead of master * Includes staged and unstaged changes in report diff_cover-7.4.0/LICENSE000066400000000000000000000236761436411411700146550ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS diff_cover-7.4.0/MANIFEST.in000066400000000000000000000003461436411411700153730ustar00rootroot00000000000000include *.txt include LICENSE include AUTHORS include README.rst include CHANGELOG include NOTICE include pylintrc include tox.ini include .coveragerc recursive-include diff_cover *.txt *.html *.css *.xml *.py *.md *.cpp pylintrc diff_cover-7.4.0/NOTICE000066400000000000000000000001041436411411700145310ustar00rootroot00000000000000diff-cover Copyright 2013-2014, edX Copyright 2015, Matt Bachmann diff_cover-7.4.0/README.rst000066400000000000000000000364001436411411700153240ustar00rootroot00000000000000diff-cover |pypi-version| |conda-version| |build-status| ======================================================================================== Automatically find diff lines that need test coverage. Also finds diff lines that have violations (according to tools such as pycodestyle, pyflakes, flake8, or pylint). This is used as a code quality metric during code reviews. Overview -------- Diff coverage is the percentage of new or modified lines that are covered by tests. This provides a clear and achievable standard for code review: If you touch a line of code, that line should be covered. Code coverage is *every* developer's responsibility! The ``diff-cover`` command line tool compares an XML coverage report with the output of ``git diff``. It then reports coverage information for lines in the diff. Currently, ``diff-cover`` requires that: - You are using ``git`` for version control. - Your test runner generates coverage reports in Cobertura, Clover or JaCoCo XML format, or LCov format. Supported XML or LCov coverage reports can be generated with many coverage tools, including: - Cobertura__ (Java) - Clover__ (Java) - JaCoCo__ (Java) - coverage.py__ (Python) - JSCover__ (JavaScript) - lcov__ (C/C++) __ http://cobertura.sourceforge.net/ __ http://openclover.org/ __ https://www.jacoco.org/ __ http://nedbatchelder.com/code/coverage/ __ http://tntim96.github.io/JSCover/ __ https://ltp.sourceforge.net/coverage/lcov.php ``diff-cover`` is designed to be extended. If you are interested in adding support for other version control systems or coverage report formats, see below for information on how to contribute! Installation ------------ To install the latest release: .. code:: bash pip install diff_cover To install the development version: .. code:: bash git clone https://github.com/Bachmann1234/diff-cover.git cd diff-cover poetry install poetry shell Getting Started --------------- 1. Set the current working directory to a ``git`` repository. 2. Run your test suite under coverage and generate a [Cobertura, Clover or JaCoCo] XML report. For example, using `pytest-cov`__: .. code:: bash pytest --cov --cov-report=xml __ https://pypi.org/project/pytest-cov This will create a ``coverage.xml`` file in the current working directory. **NOTE**: If you are using a different coverage generator, you will need to use different commands to generate the coverage XML report. 3. Run ``diff-cover``: .. code:: bash diff-cover coverage.xml This will compare the current ``git`` branch to ``origin/main`` and print the diff coverage report to the console. You can also generate an HTML, JSON or Markdown version of the report: .. code:: bash diff-cover coverage.xml --html-report report.html diff-cover coverage.xml --json-report report.json diff-cover coverage.xml --markdown-report report.md Multiple XML Coverage Reports ------------------------------- In the case that one has multiple xml reports form multiple test suites, you can get a combined coverage report (a line is counted as covered if it is covered in ANY of the xml reports) by running ``diff-cover`` with multiple coverage reports as arguments. You may specify any arbitrary number of coverage reports: .. code:: bash diff-cover coverage1.xml coverage2.xml Quality Coverage ----------------- You can use diff-cover to see quality reports on the diff as well by running ``diff-quality``. .. code :: bash diff-quality --violations= Where ``tool`` is the quality checker to use. Currently ``pycodestyle``, ``pyflakes``, ``flake8``, ``pylint``, ``checkstyle``, ``checkstylexml`` are supported, but more checkers can (and should!) be supported. See the section "Adding `diff-quality`` Support for a New Quality Checker". NOTE: There's no way to run ``findbugs`` from ``diff-quality`` as it operating over the generated java bytecode and should be integrated into the build framework. Like ``diff-cover``, HTML, JSON or Markdown reports can be generated with .. code:: bash diff-quality --violations= --html-report report.html diff-quality --violations= --json-report report.json diff-quality --violations= --markdown-report report.md If you have already generated a report using ``pycodestyle``, ``pyflakes``, ``flake8``, ``pylint``, ``checkstyle``, ``checkstylexml``, or ``findbugs`` you can pass the report to ``diff-quality``. This is more efficient than letting ``diff-quality`` re-run ``pycodestyle``, ``pyflakes``, ``flake8``, ``pylint``, ``checkstyle``, or ``checkstylexml``. .. code:: bash # For pylint < 1.0 pylint -f parseable > pylint_report.txt # For pylint >= 1.0 pylint --msg-template="{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}" > pylint_report.txt # Use the generated pylint report when running diff-quality diff-quality --violations=pylint pylint_report.txt # Use a generated pycodestyle report when running diff-quality. pycodestyle > pycodestyle_report.txt diff-quality --violations=pycodestyle pycodestyle_report.txt Note that you must use the ``-f parseable`` option to generate the ``pylint`` report for pylint versions less than 1.0 and the ``--msg-template`` option for versions >= 1.0. ``diff-quality`` will also accept multiple ``pycodestyle``, ``pyflakes``, ``flake8``, or ``pylint`` reports: .. code:: bash diff-quality --violations=pylint report_1.txt report_2.txt If you need to pass in additional options you can with the ``options`` flag .. code:: bash diff-quality --violations=pycodestyle --options="--exclude='*/migrations*' --statistics" pycodestyle_report.txt Compare Branch -------------- By default, ``diff-cover`` compares the current branch to ``origin/main``. To specify a different compare branch: .. code:: bash diff-cover coverage.xml --compare-branch=origin/release Fail Under ---------- To have ``diff-cover`` and ``diff-quality`` return a non zero status code if the report quality/coverage percentage is below a certain threshold specify the fail-under parameter .. code:: bash diff-cover coverage.xml --fail-under=80 diff-quality --violations=pycodestyle --fail-under=80 The above will return a non zero status if the coverage or quality score was below 80%. Exclude/Include paths --------------------- Explicit exclusion of paths is possible for both ``diff-cover`` and ``diff-quality``, while inclusion is only supported for ``diff-quality`` (since 5.1.0). The exclude option works with ``fnmatch``, include with ``glob``. Both options can consume multiple values. Include options should be wrapped in double quotes to prevent shell globbing. Also they should be relative to the current git directory. .. code:: bash diff-cover coverage.xml --exclude setup.py diff-quality --violations=pycodestyle --exclude setup.py diff-quality --violations=pycodestyle --include project/foo/** The following is executed for every changed file: #. check if any include pattern was specified #. if yes, check if the changed file is part of at least one include pattern #. check if the file is part of any exclude pattern Ignore/Include based on file status in git ------------------------------------------ Both ``diff-cover`` and ``diff-quality`` allow users to ignore and include files based on the git status: staged, unstaged, untracked: * ``--ignore-staged``: ignore all staged files (by default include them) * ``--ignore-unstaged``: ignore all unstaged files (by default include them) * ``--include-untracked``: include all untracked files (by default ignore them) Quiet mode ---------- Both ``diff-cover`` and ``diff-quality`` support a quiet mode which is disable by default. It can be enabled by using the ``-q``/``--quiet`` flag: .. code:: bash diff-cover coverage.xml -q diff-quality --violations=pycodestyle -q If enabled, the tool will only print errors and failures but no information or warning messages. Configuration files ------------------- Both tools allow users to specify the options in a configuration file with `--config-file`/`-c`: .. code:: bash diff-cover coverage.xml --config-file myconfig.toml diff-quality --violations=pycodestyle --config-file myconfig.toml Currently, only TOML files are supported. Please note, that only non-mandatory options are supported. If an option is specified in the configuration file and over the command line, the value of the command line is used. TOML configuration ~~~~~~~~~~~~~~~~~~ The parser will only react to configuration files ending with `.toml`. To use it, install `diff-cover` with the extra requirement `toml`. The option names are the same as on the command line, but all dashes should be underscores. If an option can be specified multiple times, the configuration value should be specified as a list. .. code:: toml [tool.diff_cover] compare_branch = "origin/feature" quiet = true [tool.diff_quality] compare_branch = "origin/feature" ignore_staged = true Troubleshooting ---------------------- **Issue**: ``diff-cover`` always reports: "No lines with coverage information in this diff." **Solution**: ``diff-cover`` matches source files in the coverage XML report with source files in the ``git diff``. For this reason, it's important that the relative paths to the files match. If you are using `coverage.py`__ to generate the coverage XML report, then make sure you run ``diff-cover`` from the same working directory. __ http://nedbatchelder.com/code/coverage/ **Issue**: ``GitDiffTool._execute()`` raises the error: .. code:: bash fatal: ambiguous argument 'origin/main...HEAD': unknown revision or path not in the working tree. This is known to occur when running ``diff-cover`` in `Travis CI`__ __ http://travis-ci.org **Solution**: Fetch the remote main branch before running ``diff-cover``: .. code:: bash git fetch origin master:refs/remotes/origin/main **Issue**: ``diff-quality`` reports "diff_cover.violations_reporter.QualityReporterError: No config file found, using default configuration" **Solution**: Your project needs a `pylintrc` file. Provide this file (it can be empty) and ``diff-quality`` should run without issue. **Issue**: ``diff-quality`` reports "Quality tool not installed" **Solution**: ``diff-quality`` assumes you have the tool you wish to run against your diff installed. If you do not have it then install it with your favorite package manager. **Issue**: ``diff-quality`` reports no quality issues **Solution**: You might use a pattern like ``diff-quality --violations foo *.py``. The last argument is not used to specify the files but for the quality tool report. Remove it to resolve the issue License ------- The code in this repository is licensed under the Apache 2.0 license. Please see ``LICENSE.txt`` for details. How to Contribute ----------------- Contributions are very welcome. The easiest way is to fork this repo, and then make a pull request from your fork. NOTE: ``diff-quality`` supports a plugin model, so new tools can be integrated without requiring changes to this repo. See the section "Adding `diff-quality`` Support for a New Quality Checker". Setting Up For Development ~~~~~~~~~~~~~~~~~~~~~~~~~~ This project is managed with `poetry` this can be installed with `pip` poetry manages a python virtual environment and organizes dependencies. It also packages this project. .. code:: bash pip install poetry .. code:: bash poetry install I would also suggest running this command after. This will make it so git blame ignores the commit that formatted the entire codebase. .. code:: bash git config blame.ignoreRevsFile .git-blame-ignore-revs Adding `diff-quality`` Support for a New Quality Checker ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Adding support for a new quality checker is simple. ``diff-quality`` supports plugins using the popular Python `pluggy package `_. If the quality checker is already implemented as a Python package, great! If not, `create a Python package `_ to host the plugin implementation. In the Python package's ``setup.py`` file, define an entry point for the plugin, e.g. .. code:: python setup( ... entry_points={ 'diff_cover': [ 'sqlfluff = sqlfluff.diff_quality_plugin' ], }, ... ) Notes: * The dictionary key for the entry point must be named ``diff_cover`` * The value must be in the format ``TOOL_NAME = YOUR_PACKAGE.PLUGIN_MODULE`` When your package is installed, ``diff-quality`` uses this information to look up the tool package and module based on the tool name provided to the ``--violations`` option of the ``diff-quality`` command, e.g.: .. code:: bash $ diff-quality --violations sqlfluff The plugin implementation will look something like the example below. This is a simplified example based on a working plugin implementation. .. code:: python from diff_cover.hook import hookimpl as diff_cover_hookimpl from diff_cover.violationsreporters.base import BaseViolationReporter, Violation class SQLFluffViolationReporter(BaseViolationReporter): supported_extensions = ['sql'] def __init__(self): super(SQLFluffViolationReporter, self).__init__('sqlfluff') def violations(self, src_path): return [ Violation(violation.line_number, violation.description) for violation in get_linter().get_violations(src_path) ] def measured_lines(self, src_path): return None @staticmethod def installed(): return True @diff_cover_hookimpl def diff_cover_report_quality(): return SQLFluffViolationReporter() Important notes: * ``diff-quality`` is looking for a plugin function: * Located in your package's module that was listed in the ``setup.py`` entry point. * Marked with the ``@diff_cover_hookimpl`` decorator * Named ``diff_cover_report_quality``. (This distinguishes it from any other plugin types ``diff_cover`` may support.) * The function should return an object with the following properties and methods: * ``supported_extensions`` property with a list of supported file extensions * ``violations()`` function that returns a list of ``Violation`` objects for the specified ``src_path``. For more details on this function and other possible reporting-related methods, see the ``BaseViolationReporter`` class `here `_. Special Thanks ------------------------- Shout out to the original author of diff-cover `Will Daly `_ and the original author of diff-quality `Sarina Canelake `_. Originally created with the support of `edX `_. .. |pypi-version| image:: https://img.shields.io/pypi/v/diff-cover.svg :target: https://pypi.org/project/diff-cover :alt: PyPI version .. |conda-version| image:: https://img.shields.io/conda/vn/conda-forge/diff-cover.svg :target: https://anaconda.org/conda-forge/diff-cover :alt: Conda version .. |build-status| image:: https://github.com/bachmann1234/diff_cover/actions/workflows/verify.yaml/badge.svg?branch=main :target: https://github.com/Bachmann1234/diff_cover/actions/workflows/verify.yaml :alt: Build Status diff_cover-7.4.0/diff_cover/000077500000000000000000000000001436411411700157405ustar00rootroot00000000000000diff_cover-7.4.0/diff_cover/__init__.py000066400000000000000000000006411436411411700200520ustar00rootroot00000000000000try: from importlib.metadata import version except ImportError: # Importlib.metadata introduced in python 3.8 import pkg_resources def version(package): return pkg_resources.get_distribution(package).version VERSION = version("diff_cover") DESCRIPTION = "Automatically find diff lines that need test coverage." QUALITY_DESCRIPTION = "Automatically find diff lines with quality violations." diff_cover-7.4.0/diff_cover/command_runner.py000066400000000000000000000036261436411411700213300ustar00rootroot00000000000000import subprocess import sys class CommandError(Exception): """ Error raised when a command being executed returns an error """ def execute(command, exit_codes=None): """Execute provided command returning the stdout Args: command (list[str]): list of tokens to execute as your command. exit_codes (list[int]): exit codes which do not indicate error. subprocess_mod (module): Defaults to pythons subprocess module but you can optionally pass in another. This is mostly for testing purposes Returns: str - Stdout of the command passed in. This will be Unicode for python < 3. Str for python 3 Raises: ValueError if there is a error running the command """ if exit_codes is None: exit_codes = [0] stdout_pipe = subprocess.PIPE process = subprocess.Popen(command, stdout=stdout_pipe, stderr=stdout_pipe) try: stdout, stderr = process.communicate() except OSError: sys.stderr.write( " ".join( [ cmd.decode(sys.getfilesystemencoding()) if isinstance(cmd, bytes) else cmd for cmd in command ] ) ) raise stderr = _ensure_unicode(stderr) if process.returncode not in exit_codes: raise CommandError(stderr) return _ensure_unicode(stdout), stderr def run_command_for_code(command): """ Returns command's exit code. """ process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) process.communicate() return process.returncode def _ensure_unicode(text): """ Ensures the text passed in becomes unicode Args: text (str|unicode) Returns: unicode """ if isinstance(text, bytes): return text.decode(sys.getfilesystemencoding(), "replace") return text diff_cover-7.4.0/diff_cover/config_parser.py000066400000000000000000000043671436411411700211450ustar00rootroot00000000000000import abc import enum try: import tomli as toml _HAS_TOML = True except ImportError: # pragma: no cover _HAS_TOML = False if not _HAS_TOML: try: import tomllib as toml _HAS_TOML = True except ImportError: # pragma: no cover pass class Tool(enum.Enum): DIFF_COVER = enum.auto() DIFF_QUALITY = enum.auto() class ParserError(Exception): pass class ConfigParser(abc.ABC): def __init__(self, file_name, tool): self._file_name = file_name self._tool = tool @abc.abstractmethod def parse(self): """Returns a dict of the parsed data or None if the file cannot be handled.""" class TOMLParser(ConfigParser): def __init__(self, file_name, tool): super().__init__(file_name, tool) self._section = "diff_cover" if tool == Tool.DIFF_COVER else "diff_quality" def parse(self): if not self._file_name.endswith(".toml"): return None if not _HAS_TOML: raise ParserError("No Toml lib installed") with open(self._file_name, "rb") as file_handle: config = toml.load(file_handle) config = config.get("tool", {}).get(self._section, {}) if not config: raise ParserError(f"No 'tool.{self._section}' configuration available") return config _PARSERS = [TOMLParser] def _parse_config_file(file_name, tool): for parser_class in _PARSERS: parser = parser_class(file_name, tool) config = parser.parse() if config: return config raise ParserError(f"No config parser could handle {file_name}") def get_config(parser, argv, defaults, tool): cli_config = vars(parser.parse_args(argv)) if cli_config["config_file"]: file_config = _parse_config_file(cli_config["config_file"], tool) else: file_config = {} config = defaults for config_dict in [file_config, cli_config]: for key, value in config_dict.items(): if value is None: # if the value is None, it's a default one; only override if not present config.setdefault(key, value) else: # else just override the existing value config[key] = value return config diff_cover-7.4.0/diff_cover/diff_cover_tool.py000066400000000000000000000214441436411411700214620ustar00rootroot00000000000000import argparse import io import logging import os import sys import xml.etree.ElementTree as etree from diff_cover import DESCRIPTION, VERSION from diff_cover.config_parser import Tool, get_config from diff_cover.diff_reporter import GitDiffReporter from diff_cover.git_diff import GitDiffTool from diff_cover.git_path import GitPathTool from diff_cover.report_generator import ( HtmlReportGenerator, JsonReportGenerator, MarkdownReportGenerator, StringReportGenerator, ) from diff_cover.violationsreporters.violations_reporter import ( LcovCoverageReporter, XmlCoverageReporter, ) HTML_REPORT_HELP = "Diff coverage HTML output" JSON_REPORT_HELP = "Diff coverage JSON output" MARKDOWN_REPORT_HELP = "Diff coverage Markdown output" COMPARE_BRANCH_HELP = "Branch to compare" CSS_FILE_HELP = "Write CSS into an external file" FAIL_UNDER_HELP = ( "Returns an error code if coverage or quality score is below this value" ) IGNORE_STAGED_HELP = "Ignores staged changes" IGNORE_UNSTAGED_HELP = "Ignores unstaged changes" IGNORE_WHITESPACE = "When getting a diff ignore any and all whitespace" EXCLUDE_HELP = "Exclude files, more patterns supported" SRC_ROOTS_HELP = "List of source directories (only for jacoco coverage reports)" COVERAGE_FILE_HELP = "coverage report (XML or lcov.info)" DIFF_RANGE_NOTATION_HELP = ( "Git diff range notation to use when comparing branches, defaults to '...'" ) QUIET_HELP = "Only print errors and failures" SHOW_UNCOVERED = "Show uncovered lines on the console" INCLUDE_UNTRACKED_HELP = "Include untracked files" CONFIG_FILE_HELP = "The configuration file to use" LOGGER = logging.getLogger(__name__) def parse_coverage_args(argv): """ Parse command line arguments, returning a dict of valid options: { 'coverage_file': COVERAGE_FILE, 'html_report': None | HTML_REPORT, 'json_report': None | JSON_REPORT, 'external_css_file': None | CSS_FILE, } where `COVERAGE_FILE`, `HTML_REPORT`, `JSON_REPORT`, and `CSS_FILE` are paths. The path strings may or may not exist. """ parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument("coverage_file", type=str, help=COVERAGE_FILE_HELP, nargs="+") parser.add_argument( "--html-report", metavar="FILENAME", type=str, help=HTML_REPORT_HELP, ) parser.add_argument( "--json-report", metavar="FILENAME", type=str, help=JSON_REPORT_HELP, ) parser.add_argument( "--markdown-report", metavar="FILENAME", type=str, help=MARKDOWN_REPORT_HELP, ) parser.add_argument( "--show-uncovered", action="store_true", default=None, help=SHOW_UNCOVERED ) parser.add_argument( "--external-css-file", metavar="FILENAME", type=str, help=CSS_FILE_HELP, ) parser.add_argument( "--compare-branch", metavar="BRANCH", type=str, help=COMPARE_BRANCH_HELP, ) parser.add_argument( "--fail-under", metavar="SCORE", type=float, default=None, help=FAIL_UNDER_HELP ) parser.add_argument( "--ignore-staged", action="store_true", default=None, help=IGNORE_STAGED_HELP ) parser.add_argument( "--ignore-unstaged", action="store_true", default=None, help=IGNORE_UNSTAGED_HELP, ) parser.add_argument( "--include-untracked", action="store_true", default=None, help=INCLUDE_UNTRACKED_HELP, ) parser.add_argument( "--exclude", metavar="EXCLUDE", type=str, nargs="+", help=EXCLUDE_HELP ) parser.add_argument( "--src-roots", metavar="DIRECTORY", type=str, nargs="+", help=SRC_ROOTS_HELP, ) parser.add_argument( "--diff-range-notation", metavar="RANGE_NOTATION", type=str, choices=["...", ".."], help=DIFF_RANGE_NOTATION_HELP, ) parser.add_argument("--version", action="version", version=f"diff-cover {VERSION}") parser.add_argument( "--ignore-whitespace", action="store_true", default=None, help=IGNORE_WHITESPACE, ) parser.add_argument( "-q", "--quiet", action="store_true", default=None, help=QUIET_HELP ) parser.add_argument( "-c", "--config-file", help=CONFIG_FILE_HELP, metavar="CONFIG_FILE" ) defaults = { "show_uncovered": False, "compare_branch": "origin/main", "fail_under": 0, "ignore_staged": False, "ignore_unstaged": False, "ignore_untracked": False, "src_roots": ["src/main/java", "src/test/java"], "ignore_whitespace": False, "diff_range_notation": "...", "quiet": False, } return get_config(parser=parser, argv=argv, defaults=defaults, tool=Tool.DIFF_COVER) def generate_coverage_report( coverage_files, compare_branch, html_report=None, css_file=None, json_report=None, markdown_report=None, ignore_staged=False, ignore_unstaged=False, include_untracked=False, exclude=None, src_roots=None, diff_range_notation=None, ignore_whitespace=False, quiet=False, show_uncovered=False, ): """ Generate the diff coverage report, using kwargs from `parse_args()`. """ diff = GitDiffReporter( compare_branch, git_diff=GitDiffTool(diff_range_notation, ignore_whitespace), ignore_staged=ignore_staged, ignore_unstaged=ignore_unstaged, include_untracked=include_untracked, exclude=exclude, ) xml_roots = [ etree.parse(coverage_file) for coverage_file in coverage_files if coverage_file.endswith(".xml") ] lcov_roots = [ LcovCoverageReporter.parse(coverage_file) for coverage_file in coverage_files if not coverage_file.endswith(".xml") ] if len(xml_roots) > 0 and len(lcov_roots) > 0: raise ValueError(f"Mixing LCov and XML reports is not supported yet") elif len(xml_roots) > 0: coverage = XmlCoverageReporter(xml_roots, src_roots) else: coverage = LcovCoverageReporter(lcov_roots, src_roots) # Build a report generator if html_report is not None: css_url = css_file if css_url is not None: css_url = os.path.relpath(css_file, os.path.dirname(html_report)) reporter = HtmlReportGenerator(coverage, diff, css_url=css_url) with open(html_report, "wb") as output_file: reporter.generate_report(output_file) if css_file is not None: with open(css_file, "wb") as output_file: reporter.generate_css(output_file) if json_report is not None: reporter = JsonReportGenerator(coverage, diff) with open(json_report, "wb") as output_file: reporter.generate_report(output_file) if markdown_report is not None: reporter = MarkdownReportGenerator(coverage, diff) with open(markdown_report, "wb") as output_file: reporter.generate_report(output_file) # Generate the report for stdout reporter = StringReportGenerator(coverage, diff, show_uncovered) output_file = io.BytesIO() if quiet else sys.stdout.buffer # Generate the report reporter.generate_report(output_file) return reporter.total_percent_covered() def main(argv=None, directory=None): """ Main entry point for the tool, script installed via pyproject.toml Returns a value that can be passed into exit() specifying the exit code. 1 is an error 0 is successful run """ argv = argv or sys.argv arg_dict = parse_coverage_args(argv[1:]) quiet = arg_dict["quiet"] level = logging.ERROR if quiet else logging.WARNING logging.basicConfig(format="%(message)s", level=level) GitPathTool.set_cwd(directory) fail_under = arg_dict.get("fail_under") percent_covered = generate_coverage_report( arg_dict["coverage_file"], arg_dict["compare_branch"], html_report=arg_dict["html_report"], json_report=arg_dict["json_report"], markdown_report=arg_dict["markdown_report"], css_file=arg_dict["external_css_file"], ignore_staged=arg_dict["ignore_staged"], ignore_unstaged=arg_dict["ignore_unstaged"], include_untracked=arg_dict["include_untracked"], exclude=arg_dict["exclude"], src_roots=arg_dict["src_roots"], diff_range_notation=arg_dict["diff_range_notation"], ignore_whitespace=arg_dict["ignore_whitespace"], quiet=quiet, show_uncovered=arg_dict["show_uncovered"], ) if percent_covered >= fail_under: return 0 LOGGER.error("Failure. Coverage is below %i%%.", fail_under) return 1 if __name__ == "__main__": sys.exit(main()) diff_cover-7.4.0/diff_cover/diff_quality_tool.py000066400000000000000000000263401436411411700220340ustar00rootroot00000000000000""" Implement the command-line tool interface for diff_quality. """ import argparse import io import logging import os import sys import pluggy import diff_cover from diff_cover import hookspecs from diff_cover.config_parser import Tool, get_config from diff_cover.diff_cover_tool import ( COMPARE_BRANCH_HELP, CONFIG_FILE_HELP, CSS_FILE_HELP, DIFF_RANGE_NOTATION_HELP, EXCLUDE_HELP, FAIL_UNDER_HELP, HTML_REPORT_HELP, IGNORE_STAGED_HELP, IGNORE_UNSTAGED_HELP, IGNORE_WHITESPACE, INCLUDE_UNTRACKED_HELP, JSON_REPORT_HELP, MARKDOWN_REPORT_HELP, QUIET_HELP, ) from diff_cover.diff_reporter import GitDiffReporter from diff_cover.git_diff import GitDiffTool from diff_cover.git_path import GitPathTool from diff_cover.report_generator import ( HtmlQualityReportGenerator, JsonReportGenerator, MarkdownQualityReportGenerator, StringQualityReportGenerator, ) from diff_cover.violationsreporters.base import QualityReporter from diff_cover.violationsreporters.java_violations_reporter import ( CheckstyleXmlDriver, FindbugsXmlDriver, PmdXmlDriver, checkstyle_driver, ) from diff_cover.violationsreporters.violations_reporter import ( CppcheckDriver, EslintDriver, PylintDriver, flake8_driver, jshint_driver, pycodestyle_driver, pydocstyle_driver, pyflakes_driver, ) QUALITY_DRIVERS = { "cppcheck": CppcheckDriver(), "pycodestyle": pycodestyle_driver, "pyflakes": pyflakes_driver, "pylint": PylintDriver(), "flake8": flake8_driver, "jshint": jshint_driver, "eslint": EslintDriver(), "pydocstyle": pydocstyle_driver, "checkstyle": checkstyle_driver, "checkstylexml": CheckstyleXmlDriver(), "findbugs": FindbugsXmlDriver(), "pmd": PmdXmlDriver(), } VIOLATION_CMD_HELP = "Which code quality tool to use (%s)" % "/".join( sorted(QUALITY_DRIVERS) ) INPUT_REPORTS_HELP = "Which violations reports to use" OPTIONS_HELP = "Options to be passed to the violations tool" INCLUDE_HELP = "Files to include (glob pattern)" REPORT_ROOT_PATH_HELP = "The root path used to generate a report" LOGGER = logging.getLogger(__name__) def parse_quality_args(argv): """ Parse command line arguments, returning a dict of valid options: { 'violations': pycodestyle| pyflakes | flake8 | pylint | ..., 'html_report': None | HTML_REPORT, 'external_css_file': None | CSS_FILE, } where `HTML_REPORT` and `CSS_FILE` are paths. """ parser = argparse.ArgumentParser(description=diff_cover.QUALITY_DESCRIPTION) parser.add_argument( "--violations", metavar="TOOL", type=str, help=VIOLATION_CMD_HELP, required=True ) parser.add_argument( "--html-report", metavar="FILENAME", type=str, help=HTML_REPORT_HELP, ) parser.add_argument( "--json-report", metavar="FILENAME", type=str, help=JSON_REPORT_HELP, ) parser.add_argument( "--markdown-report", metavar="FILENAME", type=str, help=MARKDOWN_REPORT_HELP, ) parser.add_argument( "--external-css-file", metavar="FILENAME", type=str, help=CSS_FILE_HELP, ) parser.add_argument( "--compare-branch", metavar="BRANCH", type=str, help=COMPARE_BRANCH_HELP, ) parser.add_argument("input_reports", type=str, nargs="*", help=INPUT_REPORTS_HELP) parser.add_argument("--options", type=str, nargs="?", help=OPTIONS_HELP) parser.add_argument( "--fail-under", metavar="SCORE", type=float, help=FAIL_UNDER_HELP ) parser.add_argument( "--ignore-staged", action="store_true", default=None, help=IGNORE_STAGED_HELP ) parser.add_argument( "--ignore-unstaged", action="store_true", default=None, help=IGNORE_UNSTAGED_HELP, ) parser.add_argument( "--include-untracked", action="store_true", default=None, help=INCLUDE_UNTRACKED_HELP, ) parser.add_argument( "--exclude", metavar="EXCLUDE", type=str, nargs="+", help=EXCLUDE_HELP ) parser.add_argument( "--include", metavar="INCLUDE", nargs="+", type=str, help=INCLUDE_HELP ) parser.add_argument( "--diff-range-notation", metavar="RANGE_NOTATION", type=str, help=DIFF_RANGE_NOTATION_HELP, ) parser.add_argument( "--version", action="version", version=f"diff-quality {diff_cover.VERSION}", ) parser.add_argument( "--ignore-whitespace", action="store_true", default=None, help=IGNORE_WHITESPACE, ) parser.add_argument( "-q", "--quiet", action="store_true", default=None, help=QUIET_HELP ) parser.add_argument( "-c", "--config-file", help=CONFIG_FILE_HELP, metavar="CONFIG_FILE" ) parser.add_argument( "--report-root-path", help=REPORT_ROOT_PATH_HELP, metavar="ROOT_PATH" ) defaults = { "ignore_whitespace": False, "compare_branch": "origin/main", "diff_range_notation": "...", "input_reports": [], "fail_under": 0, "ignore_staged": False, "ignore_unstaged": False, "ignore_untracked": False, "quiet": False, } return get_config( parser=parser, argv=argv, defaults=defaults, tool=Tool.DIFF_QUALITY ) def generate_quality_report( tool, compare_branch, html_report=None, json_report=None, markdown_report=None, css_file=None, ignore_staged=False, ignore_unstaged=False, include_untracked=False, exclude=None, include=None, diff_range_notation=None, ignore_whitespace=False, quiet=False, ): """ Generate the quality report, using kwargs from `parse_args()`. """ supported_extensions = ( getattr(tool, "supported_extensions", None) or tool.driver.supported_extensions ) diff = GitDiffReporter( compare_branch, git_diff=GitDiffTool(diff_range_notation, ignore_whitespace), ignore_staged=ignore_staged, ignore_unstaged=ignore_unstaged, include_untracked=include_untracked, supported_extensions=supported_extensions, exclude=exclude, include=include, ) if html_report is not None: css_url = css_file if css_url is not None: css_url = os.path.relpath(css_file, os.path.dirname(html_report)) reporter = HtmlQualityReportGenerator(tool, diff, css_url=css_url) with open(html_report, "wb") as output_file: reporter.generate_report(output_file) if css_file is not None: with open(css_file, "wb") as output_file: reporter.generate_css(output_file) if json_report is not None: reporter = JsonReportGenerator(tool, diff) with open(json_report, "wb") as output_file: reporter.generate_report(output_file) if markdown_report is not None: reporter = MarkdownQualityReportGenerator(tool, diff) with open(markdown_report, "wb") as output_file: reporter.generate_report(output_file) # Generate the report for stdout reporter = StringQualityReportGenerator(tool, diff) output_file = io.BytesIO() if quiet else sys.stdout.buffer reporter.generate_report(output_file) return reporter.total_percent_covered() def main(argv=None, directory=None): """ Main entry point for the tool, script installed via pyproject.toml Returns a value that can be passed into exit() specifying the exit code. 1 is an error 0 is successful run """ argv = argv or sys.argv arg_dict = parse_quality_args(argv[1:]) quiet = arg_dict["quiet"] level = logging.ERROR if quiet else logging.WARNING logging.basicConfig(format="%(message)s", level=level) GitPathTool.set_cwd(directory) fail_under = arg_dict.get("fail_under") tool = arg_dict["violations"] user_options = arg_dict.get("options") if user_options: # strip quotes if present first_char = user_options[0] last_char = user_options[-1] if first_char == last_char and first_char in ('"', "'"): user_options = user_options[1:-1] reporter = None reporter_factory_fn = None driver = QUALITY_DRIVERS.get(tool) if driver is None: # The requested tool is not built into diff_cover. See if another Python # package provides it. plugin_manager = pluggy.PluginManager("diff_cover") plugin_manager.add_hookspecs(hookspecs) plugin_manager.load_setuptools_entrypoints("diff_cover") hooks = ( plugin_manager.hook.diff_cover_report_quality # pylint: disable=no-member ) for hookimpl in hooks.get_hookimpls(): if hookimpl.plugin_name == tool: reporter_factory_fn = hookimpl.function break if reporter or driver or reporter_factory_fn: input_reports = [] try: for path in arg_dict["input_reports"]: try: input_reports.append(open(path, "rb")) except OSError: LOGGER.error("Could not load report '%s'", path) return 1 if driver is not None: # If we've been given pre-generated reports, # try to open the files if arg_dict["report_root_path"]: driver.add_driver_args( report_root_path=arg_dict["report_root_path"] ) reporter = QualityReporter(driver, input_reports, user_options) elif reporter_factory_fn: reporter = reporter_factory_fn( reports=input_reports, options=user_options ) percent_passing = generate_quality_report( reporter, arg_dict["compare_branch"], html_report=arg_dict["html_report"], json_report=arg_dict["json_report"], markdown_report=arg_dict["markdown_report"], css_file=arg_dict["external_css_file"], ignore_staged=arg_dict["ignore_staged"], ignore_unstaged=arg_dict["ignore_unstaged"], include_untracked=arg_dict["include_untracked"], exclude=arg_dict["exclude"], include=arg_dict["include"], diff_range_notation=arg_dict["diff_range_notation"], ignore_whitespace=arg_dict["ignore_whitespace"], quiet=quiet, ) if percent_passing >= fail_under: return 0 LOGGER.error("Failure. Quality is below %i.", fail_under) return 1 except ImportError: LOGGER.error("Quality tool not installed: '%s'", tool) return 1 except OSError as exc: LOGGER.error("Failure: '%s'", str(exc)) return 1 # Close any reports we opened finally: for file_handle in input_reports: file_handle.close() else: LOGGER.error("Quality tool not recognized: '%s'", tool) return 1 if __name__ == "__main__": sys.exit(main()) diff_cover-7.4.0/diff_cover/diff_reporter.py000066400000000000000000000405471436411411700211560ustar00rootroot00000000000000""" Classes for querying which lines have changed based on a diff. """ import fnmatch import glob import os import re from abc import ABC, abstractmethod from diff_cover.git_diff import GitDiffError class BaseDiffReporter(ABC): """ Query information about lines changed in a diff. """ _exclude = None _include = None def __init__(self, name, exclude=None, include=None): """ Provide a `name` for the diff report, which will be included in the diff coverage report. """ self._name = name self._exclude = exclude self._include = include @abstractmethod def src_paths_changed(self): """ Returns a list of source paths changed in this diff. Source paths are guaranteed to be unique. """ @abstractmethod def lines_changed(self, src_path): """ Returns a list of line numbers changed in the source file at `src_path`. Each line is guaranteed to be included only once in the list and in ascending order. """ def name(self): """ Return the name of the diff, which will be included in the diff coverage report. """ return self._name def _fnmatch(self, filename, patterns, default=True): """Wrap :func:`fnmatch.fnmatch` to add some functionality. :param str filename: Name of the file we're trying to match. :param list patterns: Patterns we're using to try to match the filename. :param bool default: The default value if patterns is empty :returns: True if a pattern matches the filename, False if it doesn't. ``default`` if patterns is empty. """ if not patterns: return default return any(fnmatch.fnmatch(filename, pattern) for pattern in patterns) def _is_path_excluded(self, path): """ Check if a path is excluded. First it is checked if the path matches one of the include patterns (if provided). Second, the path is matched against the exclude patterns. :param str path: Path to check against the exclude and include patterns. :returns: True if the patch should be excluded, otherwise False. """ include = self._include if include: for pattern in include: if path in glob.glob(pattern, recursive=True): break # file is included else: return True exclude = self._exclude if not exclude: return False basename = os.path.basename(path) if self._fnmatch(basename, exclude): return True absolute_path = os.path.abspath(path) return self._fnmatch(absolute_path, exclude) class GitDiffReporter(BaseDiffReporter): """ Query information from a Git diff between branches. """ def __init__( self, compare_branch="origin/main", git_diff=None, ignore_staged=None, ignore_unstaged=None, include_untracked=False, supported_extensions=None, exclude=None, include=None, ): """ Configure the reporter to use `git_diff` as the wrapper for the `git diff` tool. (Should have same interface as `git_diff.GitDiffTool`) """ options = [] if not ignore_staged: options.append("staged") if not ignore_unstaged: options.append("unstaged") if include_untracked: options.append("untracked") # Branch is always present, so use as basis for name name = f"{compare_branch}{git_diff.range_notation if git_diff else '...'}HEAD" if len(options) > 0: # If more options are present separate them by comma's, except the last one for item in options[:-1]: name += ", " + item # Apply and + changes to the last option name += " and " + options[-1] + " changes" super().__init__(name, exclude, include) self._compare_branch = compare_branch self._git_diff_tool = git_diff self._ignore_staged = ignore_staged self._ignore_unstaged = ignore_unstaged self._include_untracked = include_untracked self._supported_extensions = supported_extensions # Cache diff information as a dictionary # with file path keys and line number list values self._diff_dict = None def clear_cache(self): """ Reset the git diff result cache. """ self._diff_dict = None def src_paths_changed(self): """ See base class docstring. """ # Get the diff dictionary diff_dict = self._git_diff() # include untracked files if self._include_untracked: for path in self._git_diff_tool.untracked(): with open(path) as file_handle: num_lines = len(file_handle.readlines()) diff_dict[path] = list(range(1, num_lines + 1)) # Return the changed file paths (dict keys) # in alphabetical order return sorted(diff_dict.keys(), key=lambda x: x.lower()) def lines_changed(self, src_path): """ See base class docstring. """ # Get the diff dictionary (cached) diff_dict = self._git_diff() # Look up the modified lines for the source file # If no lines modified, return an empty list return diff_dict.get(src_path, []) def _get_included_diff_results(self): """ Return a list of stages to be included in the diff results. """ included = [self._git_diff_tool.diff_committed(self._compare_branch)] if not self._ignore_staged: included.append(self._git_diff_tool.diff_staged()) if not self._ignore_unstaged: included.append(self._git_diff_tool.diff_unstaged()) return included def _git_diff(self): """ Run `git diff` and returns a dict in which the keys are changed file paths and the values are lists of line numbers. Guarantees that each line number within a file is unique (no repeats) and in ascending order. Returns a cached result if called multiple times. Raises a GitDiffError if `git diff` has an error. """ # If we do not have a cached result, execute `git diff` if self._diff_dict is None: result_dict = {} for diff_str in self._get_included_diff_results(): # Parse the output of the diff string diff_dict = self._parse_diff_str(diff_str) for src_path, (added_lines, deleted_lines) in diff_dict.items(): if self._is_path_excluded(src_path): continue # If no _supported_extensions provided, or extension present: process _, extension = os.path.splitext(src_path) extension = extension[1:].lower() # 'not self._supported_extensions' tests for both None and empty list [] if ( not self._supported_extensions or extension in self._supported_extensions ): # Remove any lines from the dict that have been deleted # Include any lines that have been added result_dict[src_path] = [ line for line in result_dict.get(src_path, []) if line not in deleted_lines ] + added_lines # Eliminate repeats and order line numbers for (src_path, lines) in result_dict.items(): result_dict[src_path] = self._unique_ordered_lines(lines) # Store the resulting dict self._diff_dict = result_dict # Return the diff cache return self._diff_dict # Regular expressions used to parse the diff output SRC_FILE_RE = re.compile(r'^diff --git "?a/.*"? "?b/([^\n"]*)"?') MERGE_CONFLICT_RE = re.compile(r"^diff --cc ([^\n]*)") HUNK_LINE_RE = re.compile(r"\+([0-9]*)") def _parse_diff_str(self, diff_str): """ Parse the output of `git diff` into a dictionary of the form: { SRC_PATH: (ADDED_LINES, DELETED_LINES) } where `ADDED_LINES` and `DELETED_LINES` are lists of line numbers added/deleted respectively. If the output could not be parsed, raises a GitDiffError. """ # Create a dict to hold results diff_dict = {} # Parse the diff string into sections by source file sections_dict = self._parse_source_sections(diff_str) for (src_path, diff_lines) in sections_dict.items(): # Parse the hunk information for the source file # to determine lines changed for the source file diff_dict[src_path] = self._parse_lines(diff_lines) return diff_dict def _parse_source_sections(self, diff_str): """ Given the output of `git diff`, return a dictionary with keys that are source file paths. Each value is a list of lines from the `git diff` output related to the source file. Raises a `GitDiffError` if `diff_str` is in an invalid format. """ # Create a dict to map source files to lines in the diff output source_dict = {} # Keep track of the current source file src_path = None # Signal that we've found a hunk (after starting a source file) found_hunk = False # Parse the diff string into sections by source file for line in diff_str.split("\n"): # If the line starts with "diff --git" # or "diff --cc" (in the case of a merge conflict) # then it is the start of a new source file if line.startswith("diff --git") or line.startswith("diff --cc"): # Retrieve the name of the source file src_path = self._parse_source_line(line) # Create an entry for the source file, if we don't # already have one. if src_path not in source_dict: source_dict[src_path] = [] # Signal that we're waiting for a hunk for this source file found_hunk = False # Every other line is stored in the dictionary for this source file # once we find a hunk section else: # Only add lines if we're in a hunk section # (ignore index and files changed lines) if found_hunk or line.startswith("@@"): # Remember that we found a hunk found_hunk = True if src_path is not None: source_dict[src_path].append(line) else: # We tolerate other information before we have # a source file defined, unless it's a hunk line if line.startswith("@@"): msg = f"Hunk has no source file: '{line}'" raise GitDiffError(msg) return source_dict def _parse_lines(self, diff_lines): """ Given the diff lines output from `git diff` for a particular source file, return a tuple of `(ADDED_LINES, DELETED_LINES)` where `ADDED_LINES` and `DELETED_LINES` are lists of line numbers added/deleted respectively. Raises a `GitDiffError` if the diff lines are in an invalid format. """ added_lines = [] deleted_lines = [] current_line_new = None current_line_old = None for line in diff_lines: # If this is the start of the hunk definition, retrieve # the starting line number if line.startswith("@@"): line_num = self._parse_hunk_line(line) current_line_new, current_line_old = line_num, line_num # This is an added/modified line, so store the line number elif line.startswith("+"): # Since we parse for source file sections before # calling this method, we're guaranteed to have a source # file specified. We check anyway just to be safe. if current_line_new is not None: # Store the added line added_lines.append(current_line_new) # Increment the line number in the file current_line_new += 1 # This is a deleted line that does not exist in the final # version, so skip it elif line.startswith("-"): # Since we parse for source file sections before # calling this method, we're guaranteed to have a source # file specified. We check anyway just to be safe. if current_line_old is not None: # Store the deleted line deleted_lines.append(current_line_old) # Increment the line number in the file current_line_old += 1 # This is a line in the final version that was not modified. # Increment the line number, but do not store this as a changed # line. else: if current_line_old is not None: current_line_old += 1 if current_line_new is not None: current_line_new += 1 # If we are not in a hunk, then ignore the line return added_lines, deleted_lines def _parse_source_line(self, line): """ Given a source line in `git diff` output, return the path to the source file. """ if "--git" in line: regex = self.SRC_FILE_RE elif "--cc" in line: regex = self.MERGE_CONFLICT_RE else: msg = f"Do not recognize format of source in line '{line}'" raise GitDiffError(msg) # Parse for the source file path groups = regex.findall(line) if len(groups) == 1: return groups[0] msg = f"Could not parse source path in line '{line}'" raise GitDiffError(msg) def _parse_hunk_line(self, line): """ Given a hunk line in `git diff` output, return the line number at the start of the hunk. A hunk is a segment of code that contains changes. The format of the hunk line is: @@ -k,l +n,m @@ TEXT where `k,l` represent the start line and length before the changes and `n,m` represent the start line and length after the changes. `git diff` will sometimes put a code excerpt from within the hunk in the `TEXT` section of the line. """ # Split the line at the @@ terminators (start and end of the line) components = line.split("@@") # The first component should be an empty string, because # the line starts with '@@'. The second component should # be the hunk information, and any additional components # are excerpts from the code. if len(components) >= 2: hunk_info = components[1] groups = self.HUNK_LINE_RE.findall(hunk_info) if len(groups) == 1: try: return int(groups[0]) except ValueError: msg = "Could not parse '{}' as a line number".format(groups[0]) raise GitDiffError(msg) else: msg = f"Could not find start of hunk in line '{line}'" raise GitDiffError(msg) else: msg = f"Could not parse hunk in line '{line}'" raise GitDiffError(msg) @staticmethod def _unique_ordered_lines(line_numbers): """ Given a list of line numbers, return a list in which each line number is included once and the lines are ordered sequentially. """ if len(line_numbers) == 0: return [] # Ensure lines are unique by putting them in a set line_set = set(line_numbers) # Retrieve the list from the set, sort it, and return return sorted(line for line in line_set) diff_cover-7.4.0/diff_cover/git_diff.py000066400000000000000000000073771436411411700201030ustar00rootroot00000000000000""" Wrapper for `git diff` command. """ from textwrap import dedent from diff_cover.command_runner import CommandError, execute class GitDiffError(Exception): """ `git diff` command produced an error. """ class GitDiffTool: """ Thin wrapper for a subset of the `git diff` command. """ def __init__(self, range_notation, ignore_whitespace): """ :param str range_notation: which range notation to use when producing the diff for committed files against another branch. Traditionally in git-cover the symmetric difference (three-dot, "A...M") notation has been used: it includes commits reachable from A and M from their merge-base, but not both, taking history in account. This includes cherry-picks between A and M, which are harmless and do not produce changes, but might give inaccurate coverage false-negatives. Two-dot range notation ("A..M") compares the tips of both trees and produces a diff. This more accurately describes the actual patch that will be applied by merging A into M, even if commits have been cherry-picked between branches. This will produce a more accurate diff for coverage comparison when complex merges and cherry-picks are involved. :param bool ignore_whitespace: Perform a diff but ignore any and all whitespace. """ self.range_notation = range_notation self._default_git_args = [ "git", "-c", "diff.mnemonicprefix=no", "-c", "diff.noprefix=no", ] self._default_diff_args = ["diff", "--no-color", "--no-ext-diff", "-U0"] if ignore_whitespace: self._default_diff_args.append("--ignore-all-space") self._default_diff_args.append("--ignore-blank-lines") def diff_committed(self, compare_branch="origin/main"): """ Returns the output of `git diff` for committed changes not yet in origin/main. Raises a `GitDiffError` if `git diff` outputs anything to stderr. """ diff_range = "{branch}{notation}HEAD".format( branch=compare_branch, notation=self.range_notation ) try: return execute( self._default_git_args + self._default_diff_args + [diff_range] )[0] except CommandError as e: if "unknown revision" in str(e): raise ValueError( dedent( f""" Could not find the branch to compare to. Does '{compare_branch}' exist? the `--compare-branch` argument allows you to set a different branch. """ ) ) raise def diff_unstaged(self): """ Returns the output of `git diff` with no arguments, which is the diff for unstaged changes. Raises a `GitDiffError` if `git diff` outputs anything to stderr. """ return execute(self._default_git_args + self._default_diff_args)[0] def diff_staged(self): """ Returns the output of `git diff --cached`, which is the diff for staged changes. Raises a `GitDiffError` if `git diff` outputs anything to stderr. """ return execute(self._default_git_args + self._default_diff_args + ["--cached"])[ 0 ] def untracked(self): """Return the untracked files.""" output = execute(["git", "ls-files", "--exclude-standard", "--others"])[0] if not output: return [] return [line for line in output.splitlines() if line] diff_cover-7.4.0/diff_cover/git_path.py000066400000000000000000000037641436411411700201230ustar00rootroot00000000000000""" Converter for `git diff` paths """ import os import sys from diff_cover.command_runner import execute class GitPathTool: """ Converts `git diff` paths to absolute paths or relative paths to cwd. This class should be used throughout the project to change paths from the paths yielded by `git diff` to correct project paths """ _cwd = None _root = None @classmethod def set_cwd(cls, cwd): """ Set the cwd that is used to manipulate paths. """ if not cwd: try: cwd = os.getcwdu() except AttributeError: cwd = os.getcwd() if isinstance(cwd, bytes): cwd = cwd.decode(sys.getdefaultencoding()) cls._cwd = cwd cls._root = cls._git_root() @classmethod def relative_path(cls, git_diff_path): """ Returns git_diff_path relative to cwd. """ # Remove git_root from src_path for searching the correct filename # If cwd is `/home/user/work/diff-cover/diff_cover` # and src_path is `diff_cover/violations_reporter.py` # search for `violations_reporter.py` root_rel_path = os.path.relpath(cls._cwd, cls._root) return os.path.relpath(git_diff_path, root_rel_path) @classmethod def absolute_path(cls, src_path): """ Returns absolute git_diff_path """ # If cwd is `/home/user/work/diff-cover/diff_cover` # and src_path is `other_package/some_file.py` # search for `/home/user/work/diff-cover/other_package/some_file.py` return os.path.join(cls._root, src_path) @classmethod def _git_root(cls): """ Returns the output of `git rev-parse --show-toplevel`, which is the absolute path for the git project root. """ command = ["git", "rev-parse", "--show-toplevel", "--encoding=utf-8"] git_root = execute(command)[0] return git_root.split("\n", maxsplit=1)[0] if git_root else "" diff_cover-7.4.0/diff_cover/hook.py000066400000000000000000000001731436411411700172530ustar00rootroot00000000000000import pluggy # Other packages that implement diff_cover plugins use this. hookimpl = pluggy.HookimplMarker("diff_cover") diff_cover-7.4.0/diff_cover/hookspecs.py000066400000000000000000000003601436411411700203070ustar00rootroot00000000000000import pluggy hookspec = pluggy.HookspecMarker("diff_cover") @hookspec def diff_cover_report_quality(): """ Return a 2-part tuple: - Quality plugin name - Object that implements the BaseViolationReporter protocol """ diff_cover-7.4.0/diff_cover/report_generator.py000066400000000000000000000325441436411411700217030ustar00rootroot00000000000000""" Classes for generating diff coverage reports. """ import contextlib import json from abc import ABC, abstractmethod from gettext import gettext, ngettext from jinja2 import Environment, PackageLoader, select_autoescape from diff_cover.snippets import Snippet class DiffViolations: """ Class to capture violations generated by a particular diff """ def __init__(self, violations, measured_lines, diff_lines): self.lines = {violation.line for violation in violations}.intersection( diff_lines ) self.violations = { violation for violation in violations if violation.line in self.lines } # By convention, a violation reporter # can return `None` to indicate that all lines are "measured" # by default. This is an optimization to avoid counting # lines in all the source files. if measured_lines is None: self.measured_lines = set(diff_lines) else: self.measured_lines = set(measured_lines).intersection(diff_lines) class BaseReportGenerator(ABC): """ Generate a diff coverage report. """ def __init__(self, violations_reporter, diff_reporter): """ Configure the report generator to build a report from `violations_reporter` (of type BaseViolationReporter) and `diff_reporter` (of type BaseDiffReporter) """ self._violations = violations_reporter self._diff = diff_reporter self._diff_violations_dict = None self._cache_violations = None @abstractmethod def generate_report(self, output_file): """ Write the report to `output_file`, which is a file-like object implementing the `write()` method. Concrete subclasses should access diff coverage info using the base class methods. """ def coverage_report_name(self): """ Return the name of the coverage report. """ return self._violations.name() def diff_report_name(self): """ Return the name of the diff. """ return self._diff.name() def src_paths(self): """ Return a list of source files in the diff for which we have coverage information. """ return { src for src, summary in self._diff_violations().items() if len(summary.measured_lines) > 0 } def percent_covered(self, src_path): """ Return a float percent of lines covered for the source in `src_path`. If we have no coverage information for `src_path`, returns None """ diff_violations = self._diff_violations().get(src_path) if diff_violations is None: return None # Protect against a divide by zero num_measured = len(diff_violations.measured_lines) if num_measured > 0: num_uncovered = len(diff_violations.lines) return 100 - float(num_uncovered) / num_measured * 100 return None def violation_lines(self, src_path): """ Return a list of lines in violation (integers) in `src_path` that were changed. If we have no coverage information for `src_path`, returns an empty list. """ diff_violations = self._diff_violations().get(src_path) if diff_violations is None: return [] return sorted(diff_violations.lines) def total_num_lines(self): """ Return the total number of lines in the diff for which we have coverage info. """ return sum( [ len(summary.measured_lines) for summary in self._diff_violations().values() ] ) def total_num_violations(self): """ Returns the total number of lines in the diff that are in violation. """ return sum(len(summary.lines) for summary in self._diff_violations().values()) def total_percent_covered(self): """ Returns the float percent of lines in the diff that are covered. (only counting lines for which we have coverage info). """ total_lines = self.total_num_lines() if total_lines > 0: num_covered = total_lines - self.total_num_violations() return int(float(num_covered) / total_lines * 100) return 100 def num_changed_lines(self): """Returns the number of changed lines.""" return sum( len(self._diff.lines_changed(src_path)) for src_path in self._diff.src_paths_changed() ) def _diff_violations(self): """ Returns a dictionary of the form: { SRC_PATH: DiffViolations(SRC_PATH) } where `SRC_PATH` is the path to the source file. To make this efficient, we cache and reuse the result. """ src_paths_changed = self._diff.src_paths_changed() if not self._diff_violations_dict: try: violations = self._violations.violations_batch(src_paths_changed) self._diff_violations_dict = { src_path: DiffViolations( violations.get(src_path, []), self._violations.measured_lines(src_path), self._diff.lines_changed(src_path), ) for src_path in src_paths_changed } except NotImplementedError: self._diff_violations_dict = { src_path: DiffViolations( self._violations.violations(src_path), self._violations.measured_lines(src_path), self._diff.lines_changed(src_path), ) for src_path in src_paths_changed } return self._diff_violations_dict def report_dict(self): src_stats = {src: self._src_path_stats(src) for src in self.src_paths()} return { "report_name": self.coverage_report_name(), "diff_name": self.diff_report_name(), "src_stats": src_stats, "total_num_lines": self.total_num_lines(), "total_num_violations": self.total_num_violations(), "total_percent_covered": self.total_percent_covered(), "num_changed_lines": self.num_changed_lines(), } def _src_path_stats(self, src_path): """ Return a dict of statistics for the source file at `src_path`. """ # Find violation lines violation_lines = self.violation_lines(src_path) violations = sorted(self._diff_violations()[src_path].violations) return { "percent_covered": self.percent_covered(src_path), "violation_lines": violation_lines, "violations": violations, } # Set up the template environment TEMPLATE_LOADER = PackageLoader(__package__) TEMPLATE_ENV = Environment( extensions=["jinja2.ext.i18n"], loader=TEMPLATE_LOADER, trim_blocks=True, lstrip_blocks=True, autoescape=select_autoescape(), ) # pylint thinks this callable does not exist, I assure you it does TEMPLATE_ENV.install_gettext_callables( # pylint: disable=no-member gettext=gettext, ngettext=ngettext, newstyle=True ) class JsonReportGenerator(BaseReportGenerator): def generate_report(self, output_file): json_report_str = json.dumps(self.report_dict()) # all report generators are expected to write raw bytes, so we encode # the json output_file.write(json_report_str.encode("utf-8")) class TemplateReportGenerator(BaseReportGenerator): """ Reporter that uses a template to generate the report. """ # Subclasses override this to specify the name of the templates # If not overridden, the template reporter will raise an exception template_path = None css_template_path = None # Subclasses should set this to True to indicate # that they want to include source file snippets. include_snippets = False def __init__(self, violations_reporter, diff_reporter, css_url=None): super().__init__(violations_reporter, diff_reporter) self.css_url = css_url def generate_report(self, output_file): """ See base class. output_file must be a file handler that takes in bytes! """ if self.template_path is not None: template = TEMPLATE_ENV.get_template(self.template_path) report = template.render(self._context()) if isinstance(report, str): report = report.encode("utf-8") output_file.write(report) def generate_css(self, output_file): """ Generate an external style sheet file. output_file must be a file handler that takes in bytes! """ if self.css_template_path is not None: template = TEMPLATE_ENV.get_template(self.css_template_path) style = template.render(self._context()) if isinstance(style, str): style = style.encode("utf-8") output_file.write(style) def _context(self): """ Return the context to pass to the template. The context is a dict of the form: { 'css_url': CSS_URL, 'report_name': REPORT_NAME, 'diff_name': DIFF_NAME, 'src_stats': {SRC_PATH: { 'percent_covered': PERCENT_COVERED, 'violation_lines': [LINE_NUM, ...] }, ... } 'total_num_lines': TOTAL_NUM_LINES, 'total_num_violations': TOTAL_NUM_VIOLATIONS, 'total_percent_covered': TOTAL_PERCENT_COVERED } """ # Include snippet style info if we're displaying # source code snippets if self.include_snippets: snippet_style = Snippet.style_defs() else: snippet_style = None context = super().report_dict() context.update({"css_url": self.css_url, "snippet_style": snippet_style}) return context @staticmethod def combine_adjacent_lines(line_numbers): """ Given a sorted collection of line numbers this will turn them to strings and combine adjacent values [1, 2, 5, 6, 100] -> ["1-2", "5-6", "100"] """ combine_template = "{0}-{1}" combined_list = [] # Add a terminating value of `None` to list line_numbers.append(None) start = line_numbers[0] end = None for line_number in line_numbers[1:]: # If the current number is adjacent to the previous number if (end if end else start) + 1 == line_number: end = line_number else: if end: combined_list.append(combine_template.format(start, end)) else: combined_list.append(str(start)) start = line_number end = None return combined_list def _src_path_stats(self, src_path): stats = super()._src_path_stats(src_path) # Load source snippets (if the report will display them) # If we cannot load the file, then fail gracefully formatted_snippets = {"html": [], "markdown": [], "terminal": []} if self.include_snippets: with contextlib.suppress(OSError): formatted_snippets = Snippet.load_formatted_snippets( src_path, stats["violation_lines"] ) stats.update( { "snippets_html": formatted_snippets["html"], "snippets_markdown": formatted_snippets["markdown"], "snippets_terminal": formatted_snippets["terminal"], "violation_lines": TemplateReportGenerator.combine_adjacent_lines( stats["violation_lines"] ), } ) return stats class StringReportGenerator(TemplateReportGenerator): """ Generate a string diff coverage report. """ template_path = "console_coverage_report.txt" def __init__(self, violations_reporter, diff_reporter, show_uncovered=False): super().__init__(violations_reporter, diff_reporter) self.include_snippets = show_uncovered class HtmlReportGenerator(TemplateReportGenerator): """ Generate an HTML formatted diff coverage report. """ template_path = "html_coverage_report.html" css_template_path = "external_style.css" include_snippets = True class StringQualityReportGenerator(TemplateReportGenerator): """ Generate a string diff quality report. """ template_path = "console_quality_report.txt" class HtmlQualityReportGenerator(TemplateReportGenerator): """ Generate an HTML formatted diff quality report. """ template_path = "html_quality_report.html" css_template_path = "external_style.css" include_snippets = True class MarkdownReportGenerator(TemplateReportGenerator): """ Generate a Markdown formatted diff quality report. """ template_path = "markdown_coverage_report.md" include_snippets = True class MarkdownQualityReportGenerator(TemplateReportGenerator): """ Generate a Markdown formatted diff quality report. """ template_path = "markdown_quality_report.md" include_snippets = True diff_cover-7.4.0/diff_cover/snippets.py000066400000000000000000000334541436411411700201700ustar00rootroot00000000000000""" Load snippets from source files to show violation lines in HTML reports. """ import contextlib from tokenize import open as openpy import chardet import pygments from pygments.formatters.html import HtmlFormatter from pygments.formatters.terminal import TerminalFormatter from pygments.lexers import guess_lexer_for_filename from pygments.lexers.special import TextLexer from pygments.util import ClassNotFound from diff_cover.git_path import GitPathTool class Snippet: """ A source code snippet. """ VIOLATION_COLOR = "#ffcccc" DIV_CSS_CLASS = "snippet" # Number of extra lines to include before and after # each snippet to provide context. NUM_CONTEXT_LINES = 4 # Maximum distance between two violations within # a snippet. If violations are further apart, # should split into two snippets. MAX_GAP_IN_SNIPPET = 4 # See https://github.com/github/linguist/blob/master/lib/linguist/languages.yml # for typical values of accepted programming language hints in Markdown code fenced blocks LEXER_TO_MARKDOWN_CODE_HINT = { "Python": "python", "C++": "cpp", # TODO: expand this list... } def __init__( self, src_tokens, src_filename, start_line, last_line, violation_lines, lexer_name, ): """ Create a source code snippet. `src_tokens` is a list of `(token_type, value)` tuples, parsed from the source file. NOTE: `value` must be `unicode`, not a `str` `src_filename` is the name of the source file, used to determine the source file language. `start_line` is the line number of first line in `src_str`. The first line in the file is line number 1. `last_line` is the line number of last line in `src_str`. `violation_lines` is a list of line numbers to highlight as violations. `lexer_name` provides an hint on the programming language for this snippet. See https://pygments.org/docs/lexers/ Raises a `ValueError` if `start_line` is less than 1 """ if start_line < 1: raise ValueError("Start line must be >= 1") self._src_tokens = src_tokens self._src_filename = src_filename self._start_line = start_line self._last_line = last_line self._violation_lines = violation_lines self._lexer_name = lexer_name @classmethod def style_defs(cls): """ Return the CSS style definitions required by the formatted snippet. """ formatter = HtmlFormatter() formatter.style.highlight_color = cls.VIOLATION_COLOR return formatter.get_style_defs() def html(self): """ Return an HTML representation of the snippet. """ formatter = HtmlFormatter( cssclass=self.DIV_CSS_CLASS, linenos=True, linenostart=self._start_line, hl_lines=self._shift_lines(self._violation_lines, self._start_line), lineanchors=self._src_filename, ) return pygments.format(self.src_tokens(), formatter) def markdown(self): """ Return a Markdown representation of the snippet using Markdown fenced code blocks. See https://github.github.com/gfm/#fenced-code-blocks. """ header = "Lines %d-%d\n\n" % (self._start_line, self._last_line) if self._lexer_name in self.LEXER_TO_MARKDOWN_CODE_HINT: return header + ( "```" + self.LEXER_TO_MARKDOWN_CODE_HINT[self._lexer_name] + "\n" + self.text() + "\n```\n" ) # unknown programming language, return a non-decorated fenced code block: return "```\n" + self.text() + "\n```\n" def terminal(self): """ Return a Terminal-friendly (with ANSI color sequences) representation of the snippet. """ formatter = TerminalFormatter( linenos=True, colorscheme=None, linenostart=self._start_line, ) return pygments.format(self.src_tokens(), formatter) def src_tokens(self): """ Return a list of `(token_type, value)` tokens parsed from the source file. """ return self._src_tokens def line_range(self): """ Return a tuple of the form `(start_line, end_line)` indicating the start and end line number of the snippet. """ num_lines = len(self.text().split("\n")) end_line = self._start_line + num_lines - 1 return (self._start_line, end_line) def text(self): """ Return the source text for the snippet. """ return "".join([val for _, val in self._src_tokens]) @classmethod def load_formatted_snippets(cls, src_path, violation_lines): """ Load snippets from the file at `src_path` and format them as HTML and as plain text. Returns a dictionary containing the two types of formatting results for code snippets. See `load_snippets()` for details. """ # load once... snippet_list = cls.load_snippets(src_path, violation_lines) # ...render twice in different formats return { "html": [snippet.html() for snippet in snippet_list], "markdown": [snippet.markdown() for snippet in snippet_list], "terminal": [snippet.terminal() for snippet in snippet_list], } @classmethod def load_contents(cls, src_path): try: with openpy(GitPathTool.relative_path(src_path)) as src_file: contents = src_file.read() except (SyntaxError, UnicodeDecodeError): # this tool was originally written with python in mind. # for processing non python files encoded in anything other than ascii or utf-8 that # code wont work with open(GitPathTool.relative_path(src_path), "rb") as src_file: contents = src_file.read() if isinstance(contents, bytes): encoding = chardet.detect(contents).get("encoding", "utf-8") with contextlib.suppress(UnicodeDecodeError): contents = contents.decode(encoding) if isinstance(contents, bytes): # We failed to decode the file. # if this is happening a lot I should just bite the bullet # and write a parameter to let people list their file encodings print( "Warning: I was not able to decode your src file. " "I can continue but code snippets in the final report may look wrong" ) contents = contents.decode("utf-8", "replace") return contents @classmethod def load_snippets(cls, src_path, violation_lines): """ Load snippets from the file at `src_path` to show violations on lines in the list `violation_lines` (list of line numbers, starting at index 0). The file at `src_path` should be a text file (not binary). Returns a list of `Snippet` instances. Raises an `IOError` if the file could not be loaded. """ contents = cls.load_contents(src_path) # Construct a list of snippet ranges src_lines = contents.split("\n") snippet_ranges = cls._snippet_ranges(len(src_lines), violation_lines) # Parse the source into tokens token_stream, lexer = cls._parse_src(contents, src_path) # Group the tokens by snippet token_groups = cls._group_tokens(token_stream, snippet_ranges) return [ Snippet(tokens, src_path, start, end, violation_lines, lexer.name) for (start, end), tokens in sorted(token_groups.items()) ] @classmethod def _parse_src(cls, src_contents, src_filename): """ Return a stream of `(token_type, value)` tuples parsed from `src_contents` (str) Uses `src_filename` to guess the type of file so it can highlight syntax correctly. """ # Parse the source into tokens try: lexer = guess_lexer_for_filename(src_filename, src_contents) except ClassNotFound: lexer = TextLexer() # Ensure that we don't strip newlines from # the source file when lexing. lexer.stripnl = False return pygments.lex(src_contents, lexer), lexer @classmethod def _group_tokens(cls, token_stream, range_list): """ Group tokens into snippet ranges. `token_stream` is a generator that produces `(token_type, value)` tuples, `range_list` is a list of `(start, end)` tuples representing the (inclusive) range of line numbers for each snippet. Assumes that `range_list` is an ascending order by start value. Returns a dict mapping ranges to lists of tokens: { (4, 10): [(ttype_1, val_1), (ttype_2, val_2), ...], (29, 39): [(ttype_3, val_3), ...], ... } The algorithm is slightly complicated because a single token can contain multiple line breaks. """ # Create a map from ranges (start/end tuples) to tokens token_map = {rng: [] for rng in range_list} # Keep track of the current line number; we will # increment this as we encounter newlines in token values line_num = 1 for ttype, val in token_stream: # If there are newlines in this token, # we need to split it up and check whether # each line within the token is within one # of our ranges. if "\n" in val: val_lines = val.split("\n") # Check if the tokens match each range for (start, end), filtered_tokens in token_map.items(): # Filter out lines that are not in this range include_vals = [ val_lines[i] for i in range(len(val_lines)) if i + line_num in range(start, end + 1) ] # If we found any lines, store the tokens if len(include_vals) > 0: token = (ttype, "\n".join(include_vals)) filtered_tokens.append(token) # Increment the line number # by the number of lines we found line_num += len(val_lines) - 1 # No newline in this token # If we're in the line range, add it else: # Check if the tokens match each range for (start, end), filtered_tokens in token_map.items(): # If we got a match, store the token if line_num in range(start, end + 1): filtered_tokens.append((ttype, val)) # Otherwise, ignore the token return token_map @classmethod def _snippet_ranges(cls, num_src_lines, violation_lines): """ Given the number of source file lines and list of violation line numbers, return a list of snippet ranges of the form `(start_line, end_line)`. Each snippet contains a few extra lines of context before/after the first/last violation. Nearby violations are grouped within the same snippet. """ current_range = (None, None) lines_since_last_violation = 0 snippet_ranges = [] for line_num in range(1, num_src_lines + 1): # If we have not yet started a snippet, # check if we can (is this line a violation?) if current_range[0] is None: if line_num in violation_lines: # Expand to include extra context, but not before line 1 snippet_start = max(1, line_num - cls.NUM_CONTEXT_LINES) current_range = (snippet_start, None) lines_since_last_violation = 0 # If we are within a snippet, check if we # can end the snippet (have we gone enough # lines without hitting a violation?) elif current_range[1] is None: if line_num in violation_lines: lines_since_last_violation = 0 elif lines_since_last_violation > cls.MAX_GAP_IN_SNIPPET: # Expand to include extra context, but not after last line snippet_end = line_num - lines_since_last_violation snippet_end = min( num_src_lines, snippet_end + cls.NUM_CONTEXT_LINES ) current_range = (current_range[0], snippet_end) # Store the snippet and start looking for the next one snippet_ranges.append(current_range) current_range = (None, None) # Another line since the last violation lines_since_last_violation += 1 # If we started a snippet but didn't finish it, do so now if current_range[0] is not None and current_range[1] is None: snippet_ranges.append((current_range[0], num_src_lines)) return snippet_ranges @staticmethod def _shift_lines(line_num_list, start_line): """ Shift all line numbers in `line_num_list` so that `start_line` is treated as line 1. For example, `[5, 8, 9]` with `start_line=3` would become `[3, 6, 7]`. Assumes that all entries in `line_num_list` are greater than or equal to `start_line`; otherwise, they will be excluded from the list. """ return [ line_num - start_line + 1 for line_num in line_num_list if line_num >= start_line ] diff_cover-7.4.0/diff_cover/templates/000077500000000000000000000000001436411411700177365ustar00rootroot00000000000000diff_cover-7.4.0/diff_cover/templates/console_coverage_report.txt000066400000000000000000000013461436411411700254130ustar00rootroot00000000000000------------- Diff Coverage Diff: {{ diff_name }} ------------- {% if src_stats %} {% for src_path, stats in src_stats|dictsort %} {% if stats.percent_covered < 100 %} {{ src_path }} ({{ stats.percent_covered|round(1) }}%): Missing lines {{ stats.violation_lines|join(',') }} {% else %} {{ src_path }} (100%) {% endif %} {% endfor %} ------------- Total: {{ total_num_lines }} {% trans count=total_num_lines %}line{% pluralize %}lines{% endtrans %} Missing: {{ total_num_violations }} {% trans count=total_num_violations %}line{% pluralize %}lines{% endtrans %} Coverage: {{ total_percent_covered }}% ------------- {% else %} No lines with coverage information in this diff. ------------- {% endif %} {% include 'snippet_content.txt' %} diff_cover-7.4.0/diff_cover/templates/console_quality_report.txt000066400000000000000000000014231436411411700253040ustar00rootroot00000000000000------------- Diff Quality Quality Report: {{ report_name }} Diff: {{ diff_name }} ------------- {% if src_stats %} {% for src_path, stats in src_stats|dictsort %} {% if stats.percent_covered < 100 %} {{ src_path }} ({{ stats.percent_covered|round(1) }}%): {% for line, message in stats.violations %} {{ src_path }}:{{ line }}: {{ message }} {% endfor %} {% else %} {{ src_path }} (100%) {% endif %} {% endfor %} ------------- Total: {{ total_num_lines }} {% trans count=total_num_lines %}line{% pluralize %}lines{% endtrans %} Violations: {{ total_num_violations }} {% trans count=total_num_violations %}line{% pluralize %}lines{% endtrans %} % Quality: {{ total_percent_covered }}% ------------- {% else %} No lines with quality information in this diff. ------------- {% endif %} diff_cover-7.4.0/diff_cover/templates/external_style.css000066400000000000000000000002561436411411700235150ustar00rootroot00000000000000.src-snippet { margin-top: 2em; } .src-name { font-weight: bold; } .snippets { border-top: 1px solid #bdbdbd; border-bottom: 1px solid #bdbdbd; } {{ snippet_style }} diff_cover-7.4.0/diff_cover/templates/html_coverage_report.html000066400000000000000000000031401436411411700250340ustar00rootroot00000000000000 Diff Coverage {% include 'snippet_style.html' %}

Diff Coverage

Diff: {{ diff_name }}

{% if src_stats %}
  • Total: {{ total_num_lines }} {% trans count=total_num_lines %}line{% pluralize %}lines{% endtrans %}
  • Missing: {{ total_num_violations }} {% trans count=total_num_violations %}line{% pluralize %}lines{% endtrans %}
  • Coverage: {{ total_percent_covered }}%
{% for src_path, stats in src_stats|dictsort %} {% if stats.percent_covered < 100 %} {% else %} {% endif %} {% endfor %}
Source File Diff Coverage (%) Missing Lines
{{ src_path }} {{ stats.percent_covered|round(1) }}% {{ stats.violation_lines|join(',') }}
{{ src_path }} 100%  
{% else %}

No lines with coverage information in this diff.

{% endif %} {% include 'snippet_content.html' %} diff_cover-7.4.0/diff_cover/templates/html_quality_report.html000066400000000000000000000035301436411411700247340ustar00rootroot00000000000000 Diff Quality {% include 'snippet_style.html' %}

Diff Quality

Quality Report: {{ report_name }}

Diff: {{ diff_name }}

{% if src_stats %} {% for src_path, stats in src_stats|dictsort %} {% if stats.percent_covered < 100 %} {% else %} {% endif %} {% endfor %}
Source File Diff Quality (%) Lines in violation
{{ src_path }} {{ stats.percent_covered|round(1) }}%
    {% for line, message in stats.violations %}
  • {{ line }}: {{ message }}
  • {% endfor %}
{{ src_path }} 100%  
  • Total: {{ total_num_lines }} {% trans count=total_num_lines %}line{% pluralize %}lines{% endtrans %}
  • Violation: {{ total_num_violations }} {% trans count=total_num_violations %}line{% pluralize %}lines{% endtrans %}
  • % Quality: {{ total_percent_covered }}%
{% else %}

No lines with quality information in this diff.

{% endif %} {% include 'snippet_content.html' %} diff_cover-7.4.0/diff_cover/templates/markdown_coverage_report.md000066400000000000000000000013671436411411700253570ustar00rootroot00000000000000# Diff Coverage ## Diff: {{ diff_name }} {% if src_stats %} {% for src_path, stats in src_stats|dictsort %} {% if stats.percent_covered < 100 %} - {{ src_path | replace(".", ".") }} ({{ stats.percent_covered|round(1) }}%): Missing lines {{ stats.violation_lines|join(',') }} {% else %} - {{ src_path | replace(".", ".") }} (100%) {% endif %} {% endfor %} ## Summary - **Total**: {{ total_num_lines }} {% trans count=total_num_lines %}line{% pluralize %}lines{% endtrans %} - **Missing**: {{ total_num_violations }} {% trans count=total_num_violations %}line{% pluralize %}lines{% endtrans %} - **Coverage**: {{ total_percent_covered }}% {% else %} No lines with coverage information in this diff. {% endif %} {% include 'snippet_content.md' %} diff_cover-7.4.0/diff_cover/templates/markdown_quality_report.md000066400000000000000000000013621436411411700252470ustar00rootroot00000000000000# Diff Quality ## Quality Report: {{ report_name }} ## Diff: {{ diff_name }} {% if src_stats %} {% for src_path, stats in src_stats|dictsort %} {% if stats.percent_covered < 100 %} - {{ src_path }} ({{ stats.percent_covered|round(1) }}%): {% for line, message in stats.violations %} - {{ src_path }}:{{ line }}: {{ message }} {% endfor %} {% else %} - {{ src_path }} (100%) {% endif %} {% endfor %} - **Total**: {{ total_num_lines }} {% trans count=total_num_lines %}line{% pluralize %}lines{% endtrans %} - **Violations**: {{ total_num_violations }} {% trans count=total_num_violations %}line{% pluralize %}lines{% endtrans %} - **% Quality**: {{ total_percent_covered }}% {% else %} No lines with quality information in this diff. {% endif %} diff_cover-7.4.0/diff_cover/templates/snippet_content.html000066400000000000000000000005631436411411700240440ustar00rootroot00000000000000{% for src_path, stats in src_stats|dictsort %} {% if stats.snippets_html %}
{{ src_path }}
{% for snippet in stats.snippets_html %} {{ snippet | safe }} {% endfor %}
{% endif %} {% endfor %} diff_cover-7.4.0/diff_cover/templates/snippet_content.md000066400000000000000000000003451436411411700234760ustar00rootroot00000000000000{% for src_path, stats in src_stats|dictsort %} {% if stats.snippets_markdown %} ## {{ src_path | replace(".", ".") }} {% for snippet in stats.snippets_markdown %} {{ snippet }} --- {% endfor %} {% endif %} {% endfor %} diff_cover-7.4.0/diff_cover/templates/snippet_content.txt000066400000000000000000000003141436411411700237110ustar00rootroot00000000000000{% for src_path, stats in src_stats|dictsort %} {% if stats.snippets_terminal %} --- {{ src_path }} --- {% for snippet in stats.snippets_terminal %} {{ snippet }} {% endfor %} {% endif %} {% endfor %} diff_cover-7.4.0/diff_cover/templates/snippet_style.html000066400000000000000000000006431436411411700235310ustar00rootroot00000000000000{% if snippet_style %} {% if css_url %} {% else %} {% endif %} {% endif %} diff_cover-7.4.0/diff_cover/util.py000066400000000000000000000012571436411411700172740ustar00rootroot00000000000000import os.path import posixpath def to_unix_path(path): """ Tries to ensure tha the path is a normalized unix path. This seems to be the solution cobertura used.... https://github.com/cobertura/cobertura/blob/642a46eb17e14f51272c6962e64e56e0960918af/cobertura/src/main/java/net/sourceforge/cobertura/instrument/ClassPattern.java#L84 I know of at least one case where this will fail (\\) is allowed in unix paths. But I am taking the bet that this is not common. We deal with source code. :param path: string of the path to convert :return: the unix version of that path """ return posixpath.normpath(os.path.normcase(path).replace("\\", "/")) diff_cover-7.4.0/diff_cover/violationsreporters/000077500000000000000000000000001436411411700220755ustar00rootroot00000000000000diff_cover-7.4.0/diff_cover/violationsreporters/__init__.py000066400000000000000000000000001436411411700241740ustar00rootroot00000000000000diff_cover-7.4.0/diff_cover/violationsreporters/base.py000066400000000000000000000204071436411411700233640ustar00rootroot00000000000000import copy import os import re import sys from abc import ABC, abstractmethod from collections import defaultdict, namedtuple from diff_cover.command_runner import execute, run_command_for_code Violation = namedtuple("Violation", "line, message") class QualityReporterError(Exception): """ A quality reporter command produced an error. """ class BaseViolationReporter(ABC): """ Query information from a coverage report. """ def __init__(self, name): """ Provide a name for the coverage report, which will be included in the generated diff report. """ self._name = name @abstractmethod def violations(self, src_path): """ Return a list of Violations recorded in `src_path`. """ def violations_batch(self, src_paths): """ Return a dict of Violations recorded in `src_paths`. src_paths: Sequence[str] - sequence of paths to source files Returns a Dict[str, List[Violation]]. Keys are paths to source files. If a subclass does not implement this function, violations() will be called instead, once for each src_path in src_paths. """ raise NotImplementedError def measured_lines(self, src_path): """ Return a list of the lines in src_path that were measured by this reporter. Some reporters will always consider all lines in the file "measured". As an optimization, such violation reporters can return `None` to indicate that all lines are measured. The diff reporter generator will then use all changed lines provided by the diff. """ # An existing quality plugin "sqlfluff" depends on this # being not abstract and returning None return None def name(self): """ Retrieve the name of the report, which may be included in the generated diff coverage report. For example, `name()` could return the path to the coverage report file or the type of reporter. """ return self._name class QualityDriver(ABC): def __init__( self, name, supported_extensions, command, exit_codes=None, output_stderr=False ): """ Args: name: (str) name of the driver supported_extensions: (list[str]) list of file extensions this driver supports Example: py, js command: (list[str]) list of tokens that are the command to be executed to create a report exit_codes: (list[int]) list of exit codes that do not indicate a command error output_stderr: (bool) use stderr instead of stdout from the invoked command """ self.name = name self.supported_extensions = supported_extensions self.command = command self.exit_codes = exit_codes self.output_stderr = output_stderr @abstractmethod def parse_reports(self, reports): """ Args: reports: list[str] - output from the report Return: A dict[Str:Violation] Violation is a simple named tuple Defined above """ @abstractmethod def installed(self): """ Method checks if the provided tool is installed. Returns: boolean True if installed """ def add_driver_args(self, **kwargs): """Inject additional driver related arguments. A driver can override the method. By default an exception is raised. """ raise ValueError(f"Unsupported argument(s) {kwargs.keys()}") class QualityReporter(BaseViolationReporter): def __init__(self, driver, reports=None, options=None): """ Args: driver (QualityDriver) object that works with the underlying quality tool reports (list[file]) pre-generated reports. If not provided the tool will be run instead options (str) options to be passed into the command """ super().__init__(driver.name) self.reports = self._load_reports(reports) if reports else None self.violations_dict = defaultdict(list) self.driver = driver self.options = options self.driver_tool_installed = None def _load_reports(self, report_files): """ Args: report_files: list[file] reports to read in """ contents = [] for file_handle in report_files: # Convert to unicode, replacing unreadable chars contents.append(file_handle.read().decode("utf-8", "replace")) return contents def violations(self, src_path): """ Return a list of Violations recorded in `src_path`. """ if not any(src_path.endswith(ext) for ext in self.driver.supported_extensions): return [] if src_path not in self.violations_dict: if self.reports: self.violations_dict = self.driver.parse_reports(self.reports) else: if self.driver_tool_installed is None: self.driver_tool_installed = self.driver.installed() if not self.driver_tool_installed: raise OSError(f"{self.driver.name} is not installed") command = copy.deepcopy(self.driver.command) if self.options: for arg in self.options.split(): command.append(arg) if os.path.exists(src_path): command.append(src_path.encode(sys.getfilesystemencoding())) output = execute(command, self.driver.exit_codes) if self.driver.output_stderr: output = output[1] else: output = output[0] self.violations_dict.update(self.driver.parse_reports([output])) return self.violations_dict[src_path] def measured_lines(self, src_path): """ Quality Reports Consider all lines measured """ return None def name(self): """ Retrieve the name of the report, which may be included in the generated diff coverage report. For example, `name()` could return the path to the coverage report file or the type of reporter. """ return self._name class RegexBasedDriver(QualityDriver): def __init__( self, name, supported_extensions, command, expression, command_to_check_install, flags=0, exit_codes=None, ): """ args: expression: regex used to parse report, will be fed lines singly unless flags contain re.MULTILINE flags: such as re.MULTILINE See super for other args command_to_check_install: (list[str]) command to run to see if the tool is installed """ super().__init__(name, supported_extensions, command, exit_codes) self.expression = re.compile(expression, flags) self.command_to_check_install = command_to_check_install self.is_installed = None def parse_reports(self, reports): """ Args: reports: list[str] - output from the report Return: A dict[Str:Violation] Violation is a simple named tuple Defined above """ violations_dict = defaultdict(list) for report in reports: if self.expression.flags & re.MULTILINE: matches = (match for match in re.finditer(self.expression, report)) else: matches = (self.expression.match(line) for line in report.split("\n")) for match in matches: if match is not None: src, line_number, message = match.groups() # Transform src to a relative path, if it isn't already src = os.path.relpath(src) violation = Violation(int(line_number), message) violations_dict[src].append(violation) return violations_dict def installed(self): """ Method checks if the provided tool is installed. Returns: boolean True if installed """ return run_command_for_code(self.command_to_check_install) == 0 diff_cover-7.4.0/diff_cover/violationsreporters/java_violations_reporter.py000066400000000000000000000133161436411411700275650ustar00rootroot00000000000000""" Classes for querying the information in a test coverage report. """ import os from collections import defaultdict try: # Needed for Python < 3.3, works up to 3.8 import xml.etree.cElementTree as etree except ImportError: # Python 3.9 onwards import xml.etree.ElementTree as etree from diff_cover.command_runner import run_command_for_code from diff_cover.git_path import GitPathTool from diff_cover.violationsreporters.base import ( QualityDriver, RegexBasedDriver, Violation, ) # Report checkstyle violations. # http://checkstyle.sourceforge.net/apidocs/com/puppycrawl/tools/checkstyle/DefaultLogger.html # https://github.com/checkstyle/checkstyle/blob/master/src/main/java/com/puppycrawl/tools/checkstyle/AuditEventDefaultFormatter.java checkstyle_driver = RegexBasedDriver( name="checkstyle", supported_extensions=["java"], command=["checkstyle"], expression=r"^\[\w+\]\s+([^:]+):(\d+):(?:\d+:)? (.*)$", command_to_check_install=[ "java", "com.puppycrawl.tools.checkstyle.Main", "-version", ], ) class CheckstyleXmlDriver(QualityDriver): def __init__(self): """ See super for args """ super().__init__( "checkstyle", ["java"], [ "java", "com.puppycrawl.tools.checkstyle.Main", "-c", "/google_checks.xml", ], ) self.command_to_check_install = [ "java", "com.puppycrawl.tools.checkstyle.Main", "-version", ] def parse_reports(self, reports): """ Args: reports: list[str] - output from the report Return: A dict[Str:Violation] Violation is a simple named tuple Defined above """ violations_dict = defaultdict(list) for report in reports: xml_document = etree.fromstring("".join(report)) files = xml_document.findall(".//file") for file_tree in files: for error in file_tree.findall("error"): line_number = error.get("line") error_str = "{}: {}".format( error.get("severity"), error.get("message") ) violation = Violation(int(line_number), error_str) filename = GitPathTool.relative_path(file_tree.get("name")) violations_dict[filename].append(violation) return violations_dict def installed(self): """ Method checks if the provided tool is installed. Returns: boolean True if installed """ return run_command_for_code(self.command_to_check_install) == 0 class FindbugsXmlDriver(QualityDriver): def __init__(self): """ See super for args """ super().__init__("findbugs", ["java"], ["false"]) def parse_reports(self, reports): """ Args: reports: list[str] - output from the report Return: A dict[Str:Violation] Violation is a simple named tuple Defined above """ violations_dict = defaultdict(list) for report in reports: xml_document = etree.fromstring("".join(report)) bugs = xml_document.findall(".//BugInstance") for bug in bugs: category = bug.get("category") short_message = bug.find("ShortMessage").text line = bug.find("SourceLine") if line.get("start") is None or line.get("end") is None: continue start = int(line.get("start")) end = int(line.get("end")) for line_number in range(start, end + 1): error_str = f"{category}: {short_message}" violation = Violation(line_number, error_str) filename = GitPathTool.relative_path(line.get("sourcepath")) violations_dict[filename].append(violation) return violations_dict def installed(self): """ Method checks if the provided tool is installed. Returns: boolean False: As findbugs analyses bytecode, it would be hard to run it from outside the build framework. """ return False class PmdXmlDriver(QualityDriver): def __init__(self): """ See super for args """ super().__init__("pmd", ["java"], []) def parse_reports(self, reports): """ Args: reports: list[str] - output from the report Return: A dict[Str:Violation] Violation is a simple named tuple Defined above """ violations_dict = defaultdict(list) for report in reports: xml_document = etree.fromstring("".join(report)) node_files = xml_document.findall(".//file") for node_file in node_files: for error in node_file.findall("violation"): line_number = error.get("beginline") error_str = "{}: {}".format(error.get("rule"), error.text.strip()) violation = Violation(int(line_number), error_str) filename = GitPathTool.relative_path(node_file.get("name")) filename = filename.replace(os.sep, "/") violations_dict[filename].append(violation) return violations_dict def installed(self): """ Method checks if the provided tool is installed. Returns: boolean False: As findbugs analyses bytecode, it would be hard to run it from outside the build framework. """ return False diff_cover-7.4.0/diff_cover/violationsreporters/violations_reporter.py000066400000000000000000000617401436411411700265700ustar00rootroot00000000000000""" Classes for querying the information in a test coverage report. """ import itertools import os import os.path import re from collections import defaultdict from diff_cover import util from diff_cover.command_runner import run_command_for_code from diff_cover.git_path import GitPathTool from diff_cover.violationsreporters.base import ( BaseViolationReporter, QualityDriver, RegexBasedDriver, Violation, ) class XmlCoverageReporter(BaseViolationReporter): """ Query information from a Cobertura|Clover|JaCoCo XML coverage report. """ def __init__(self, xml_roots, src_roots=None): """ Load the XML coverage report represented by the cElementTree with root element `xml_root`. """ super().__init__("XML") self._xml_roots = xml_roots # Create a dict to cache violations dict results # Keys are source file paths, values are output of `violations()` self._info_cache = defaultdict(list) # Create a list to cache xml classes list results # Values are output of `self._get_xml_classes()` self._xml_cache = [{} for i in range(len(xml_roots))] self._src_roots = src_roots or [""] def _get_xml_classes(self, xml_document): """ Return a dict of classes in `xml_document`. Keys are `filename`, values are list of `class` If `class` is not present in `xml_document`, return empty defaultdict(list) """ # cobertura sometimes provides the sources for the measurements # within it. If we have that we outta use it sources = xml_document.findall("sources/source") sources = [source.text for source in sources if source.text] classes = xml_document.findall(".//class") or [] res = defaultdict(list) for clazz in classes: f = clazz.get("filename") if not f: continue res[util.to_unix_path(f)].append(clazz) for source in sources: abs_f = util.to_unix_path(os.path.join(source.strip(), f)) res[abs_f].append(clazz) return res def _get_classes(self, index, xml_document, src_path): """ Given a path and parsed xml_document provides class nodes with the relevant lines First, we look to see if xml_document contains a source node providing paths to search for If we don't have that we check each nodes filename attribute matches an absolute path Finally, if we found no nodes, we check the filename attribute for the relative path """ # Remove git_root from src_path for searching the correct filename # If cwd is `/home/user/work/diff-cover/diff_cover` # and src_path is `diff_cover/violations_reporter.py` # search for `violations_reporter.py` src_rel_path = util.to_unix_path(GitPathTool.relative_path(src_path)) # If cwd is `/home/user/work/diff-cover/diff_cover` # and src_path is `other_package/some_file.py` # search for `/home/user/work/diff-cover/other_package/some_file.py` src_abs_path = util.to_unix_path(GitPathTool.absolute_path(src_path)) # Create a cache for `classes` in `xml_document` if cache exists if not self._xml_cache[index]: self._xml_cache[index] = self._get_xml_classes(xml_document) return self._xml_cache[index].get(src_abs_path) or self._xml_cache[index].get( src_rel_path ) def get_src_path_line_nodes_cobertura(self, index, xml_document, src_path): classes = self._get_classes(index, xml_document, src_path) if not classes: return None lines = [clazz.findall("./lines/line") for clazz in classes] return list(itertools.chain(*lines)) @staticmethod def get_src_path_line_nodes_clover(xml_document, src_path): """ Return a list of nodes containing line information for `src_path` in `xml_document`. If file is not present in `xml_document`, return None """ files = [ file_tree for file_tree in xml_document.findall(".//file") if GitPathTool.relative_path(file_tree.get("path")) == src_path ] if not files: return None lines = [] for file_tree in files: lines.append(file_tree.findall('./line[@type="stmt"]')) lines.append(file_tree.findall('./line[@type="cond"]')) return list(itertools.chain(*lines)) def _measured_source_path_matches(self, package_name, file_name, src_path): # find src_path in any of the source roots if not src_path.endswith(file_name): return False norm_src_path = os.path.normcase(src_path) for root in self._src_roots: if ( os.path.normcase( GitPathTool.relative_path( os.path.join(root, package_name, file_name) ) ) == norm_src_path ): return True return False def get_src_path_line_nodes_jacoco(self, xml_document, src_path): """ Return a list of nodes containing line information for `src_path` in `xml_document`. If file is not present in `xml_document`, return None """ files = [] packages = list(xml_document.findall(".//package")) for pkg in packages: _files = [ _file for _file in pkg.findall("sourcefile") if self._measured_source_path_matches( pkg.get("name"), _file.get("name"), src_path ) ] files.extend(_files) if not files: return None lines = [file_tree.findall("./line") for file_tree in files] return list(itertools.chain(*lines)) def _cache_file(self, src_path): """ Load the data from `self._xml_roots` for `src_path`, if it hasn't been already. """ # If we have not yet loaded this source file if src_path not in self._info_cache: # We only want to keep violations that show up in each xml source. # Thus, each time, we take the intersection. However, to do this # we must treat the first time as a special case and just add all # the violations from the first xml report. violations = None # A line is measured if it is measured in any of the reports, so # we take set union each time and can just start with the empty set measured = set() # Loop through the files that contain the xml roots for i, xml_document in enumerate(self._xml_roots): if xml_document.findall(".[@clover]"): # see etc/schema/clover.xsd at https://bitbucket.org/atlassian/clover/src line_nodes = self.get_src_path_line_nodes_clover( xml_document, src_path ) _number = "num" _hits = "count" elif xml_document.findall(".[@name]"): # https://github.com/jacoco/jacoco/blob/master/org.jacoco.report/src/org/jacoco/report/xml/report.dtd line_nodes = self.get_src_path_line_nodes_jacoco( xml_document, src_path ) _number = "nr" _hits = "ci" else: # https://github.com/cobertura/web/blob/master/htdocs/xml/coverage-04.dtd line_nodes = self.get_src_path_line_nodes_cobertura( i, xml_document, src_path ) _number = "number" _hits = "hits" if line_nodes is None: continue # First case, need to define violations initially if violations is None: violations = { Violation(int(line.get(_number)), None) for line in line_nodes if int(line.get(_hits, 0)) == 0 } # If we already have a violations set, # take the intersection of the new # violations set and its old self else: violations = violations & { Violation(int(line.get(_number)), None) for line in line_nodes if int(line.get(_hits, 0)) == 0 } # Measured is the union of itself and the new measured measured = measured | {int(line.get(_number)) for line in line_nodes} # If we don't have any information about the source file, # don't report any violations if violations is None: violations = set() self._info_cache[src_path] = (violations, measured) def violations(self, src_path): """ See base class comments. """ self._cache_file(src_path) # Yield all lines not covered return self._info_cache[src_path][0] def measured_lines(self, src_path): """ See base class docstring. """ self._cache_file(src_path) return self._info_cache[src_path][1] class LcovCoverageReporter(BaseViolationReporter): """ Query information from a Cobertura|Clover|JaCoCo XML coverage report. """ def __init__(self, lcov_roots, src_roots=None): """ Load the lcov.info coverage report represented """ super().__init__("LCOV") self._lcov_roots = lcov_roots self._lcov_report = defaultdict(list) # Create a dict to cache violations dict results # Keys are source file paths, values are output of `violations()` self._info_cache = defaultdict(list) self._src_roots = src_roots or [""] @staticmethod def parse(lcov_file): """ Parse a single LCov coverage report File format: https://ltp.sourceforge.net/coverage/lcov/geninfo.1.php """ lcov_report = defaultdict(dict) lcov = open(lcov_file, "r") while True: line = lcov.readline() if not line: break directive, _, content = line.strip().partition(":") # we're only interested in file name and line coverage if directive == "SF": # SF: source_file = content continue elif directive == "DA": # DA:,[,] args = content.split(",") if len(args) < 2 or len(args) > 3: raise ValueError(f"Unknown syntax in lcov report: {line}") line_no = int(args[0]) num_executions = int(args[1]) if source_file is None: raise ValueError( f"No source file specified for line coverage: {line}" ) if line_no not in lcov_report[source_file]: lcov_report[source_file][line_no] = 0 lcov_report[source_file][line_no] += num_executions elif directive in [ "TN", "FNF", "FNH", "FN", "FNDA", "LH", "LF", "BRF", "BRH", "BRDA", ]: # these are valid lines, but not we don't need them continue elif directive == "end_of_record": source_file = None else: raise ValueError(f"Unknown syntax in lcov report: {line}") lcov.close() return lcov_report def _cache_file(self, src_path): """ Load the data from `self._lcov_roots` for `src_path`, if it hasn't been already. """ # If we have not yet loaded this source file if src_path not in self._info_cache: # We only want to keep violations that show up in each xml source. # Thus, each time, we take the intersection. However, to do this # we must treat the first time as a special case and just add all # the violations from the first xml report. violations = None # A line is measured if it is measured in any of the reports, so # we take set union each time and can just start with the empty set measured = set() # Remove git_root from src_path for searching the correct filename # If cwd is `/home/user/work/diff-cover/diff_cover` # and src_path is `diff_cover/violations_reporter.py` # search for `violations_reporter.py` src_rel_path = util.to_unix_path(GitPathTool.relative_path(src_path)) # If cwd is `/home/user/work/diff-cover/diff_cover` # and src_path is `other_package/some_file.py` # search for `/home/user/work/diff-cover/other_package/some_file.py` src_abs_path = util.to_unix_path(GitPathTool.absolute_path(src_path)) # Loop through the files that contain the xml roots for lcov_document in self._lcov_roots: src_search_path = src_abs_path if src_search_path not in lcov_document: src_search_path = src_rel_path # First case, need to define violations initially if violations is None: violations = { Violation(int(line_no), None) for line_no, num_executions in lcov_document[ src_search_path ].items() if int(num_executions) == 0 } # If we already have a violations set, # take the intersection of the new # violations set and its old self else: violations = violations & { Violation(int(line_no), None) for line_no, num_executions in lcov_document[ src_search_path ].items() if int(num_executions) == 0 } # Measured is the union of itself and the new measured # measured = measured | {int(line.get(_number)) for line in line_nodes} measured = measured | { int(line_no) for line_no, num_executions in lcov_document[ src_search_path ].items() } # If we don't have any information about the source file, # don't report any violations if violations is None: violations = set() self._info_cache[src_path] = (violations, measured) def violations(self, src_path): """ See base class comments. """ self._cache_file(src_path) # Yield all lines not covered return self._info_cache[src_path][0] def measured_lines(self, src_path): """ See base class docstring. """ self._cache_file(src_path) return self._info_cache[src_path][1] pycodestyle_driver = RegexBasedDriver( name="pycodestyle", supported_extensions=["py"], command=["pycodestyle"], expression=r"^([^:]+):(\d+).*([EW]\d{3}.*)$", command_to_check_install=["pycodestyle", "--version"], # pycodestyle exit code is 1 if there are violations # http://pycodestyle.pycqa.org/en/latest/intro.html exit_codes=[0, 1], ) pyflakes_driver = RegexBasedDriver( name="pyflakes", supported_extensions=["py"], command=["pyflakes"], # Match lines of the form: # path/to/file.py:328: undefined name '_thing' # path/to/file.py:418: 'random' imported but unused expression=r"^([^:]+):(\d+):\d*:? (.*)$", command_to_check_install=["pyflakes", "--version"], # pyflakes exit code is 1 if there are violations # https://github.com/PyCQA/pyflakes/blob/master/pyflakes/api.py#L211 exit_codes=[0, 1], ) """ Report Flake8 violations. """ flake8_driver = RegexBasedDriver( name="flake8", supported_extensions=["py"], command=["flake8"], # Match lines of the form: # new_file.py:1:17: E231 whitespace expression=r"^([^:]+):(\d+):(?:\d+): ([a-zA-Z]+\d+.*)$", command_to_check_install=["flake8", "--version"], # flake8 exit code is 1 if there are violations # http://flake8.pycqa.org/en/latest/user/invocation.html exit_codes=[0, 1], ) jshint_driver = RegexBasedDriver( name="jshint", supported_extensions=["js"], command=["jshint"], expression=r"^([^:]+): line (\d+), col \d+, (.*)$", command_to_check_install=["jshint", "-v"], ) class EslintDriver(RegexBasedDriver): def __init__(self): super().__init__( name="eslint", supported_extensions=["js"], command=["eslint", "--format=compact"], expression=r"^([^:]+): line (\d+), col \d+, (.*)$", command_to_check_install=["eslint", "-v"], ) self.report_root_path = None def add_driver_args(self, **kwargs): self.report_root_path = kwargs.pop("report_root_path", None) if kwargs: super().add_driver_args(**kwargs) def parse_reports(self, reports): violations_dict = super().parse_reports(reports) if self.report_root_path: keys = list(violations_dict.keys()) for key in keys: new_key = os.path.relpath(key, self.report_root_path) violations_dict[new_key] = violations_dict.pop(key) return violations_dict """ Report pydocstyle violations. Warning/error codes: D1**: Missing Docstrings D2**: Whitespace Issues D3**: Quotes Issues D4**: Docstring Content Issues http://www.pydocstyle.org/en/latest/error_codes.html """ pydocstyle_driver = RegexBasedDriver( name="pydocstyle", supported_extensions=["py"], command=["pydocstyle"], expression=r"^(.+?):(\d+).*?$.+?^ (.*?)$", command_to_check_install=["pydocstyle", "--version"], flags=re.MULTILINE | re.DOTALL, # pydocstyle exit code is 1 if there are violations # http://www.pydocstyle.org/en/2.1.1/usage.html#return-code exit_codes=[0, 1], ) class PylintDriver(QualityDriver): def __init__(self): """ args: expression: regex used to parse report See super for other args """ super().__init__( "pylint", ["py"], [ "pylint", '--msg-template="{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}"', ], # Pylint returns bit-encoded exit codes as documented here: # https://pylint.readthedocs.io/en/latest/user_guide/run.html # 1 = fatal error, occurs if an error prevents pylint from doing further processing # 2,4,8,16 = error/warning/refactor/convention message issued # 32 = usage error [ 0, 2, 4, 2 | 4, 8, 2 | 8, 4 | 8, 2 | 4 | 8, 16, 2 | 16, 4 | 16, 2 | 4 | 16, 8 | 16, 2 | 8 | 16, 4 | 8 | 16, 2 | 4 | 8 | 16, ], ) self.pylint_expression = re.compile( r"^([^:]+):(\d+): \[(\w+),? ?([^\]]*)] (.*)$" ) self.dupe_code_violation = "R0801" self.command_to_check_install = ["pylint", "--version"] # Match lines of the form: # path/to/file.py:123: [C0111] Missing docstring # path/to/file.py:456: [C0111, Foo.bar] Missing docstring self.multi_line_violation_regex = re.compile(r"==((?:\w|\.)+?):\[?(\d+)") self.dupe_code_violation_regex = re.compile(r"Similar lines in (\d+) files") def _process_dupe_code_violation(self, lines, current_line, message): """ The duplicate code violation is a multi line error. This pulls out all the relevant files """ src_paths = [] message_match = self.dupe_code_violation_regex.match(message) if message_match: for _ in range(int(message_match.group(1))): current_line += 1 match = self.multi_line_violation_regex.match(lines[current_line]) src_path, l_number = match.groups() src_paths.append(("%s.py" % src_path, l_number)) return src_paths def parse_reports(self, reports): """ Args: reports: list[str] - output from the report Return: A dict[Str:Violation] Violation is a simple named tuple Defined above """ violations_dict = defaultdict(list) for report in reports: output_lines = report.split("\n") for output_line_number, line in enumerate(output_lines): match = self.pylint_expression.match(line) # Ignore any line that isn't matched # (for example, snippets from the source code) if match is not None: ( pylint_src_path, line_number, pylint_code, function_name, message, ) = match.groups() if pylint_code == self.dupe_code_violation: files_involved = self._process_dupe_code_violation( output_lines, output_line_number, message ) else: files_involved = [(pylint_src_path, line_number)] for violation in files_involved: pylint_src_path, line_number = violation # pylint might uses windows paths pylint_src_path = util.to_unix_path(pylint_src_path) # If we're looking for a particular source file, # ignore any other source files. if function_name: error_str = "{}: {}: {}".format( pylint_code, function_name, message ) else: error_str = f"{pylint_code}: {message}" violation = Violation(int(line_number), error_str) violations_dict[pylint_src_path].append(violation) return violations_dict def installed(self): """ Method checks if the provided tool is installed. Returns: boolean True if installed """ return run_command_for_code(self.command_to_check_install) == 0 class CppcheckDriver(QualityDriver): """ Driver for cppcheck c/c++ static analyzer. """ def __init__(self): """ args: expression: regex used to parse report See super for other args """ super().__init__( "cppcheck", ["c", "cpp", "h", "hpp"], ["cppcheck", "--quiet"], output_stderr=True, ) # Errors look like: # [src/foo.c:123]: (error) Array 'yolo[4]' accessed at index 4, which is out of bounds. # Match for everything, including ":" in the file name (first capturing # group), in case there are pathological path names with ":" self.cppcheck_expression = re.compile(r"^\[(.*?):(\d+)\]: (.*$)") self.command_to_check_install = ["cppcheck", "--version"] def parse_reports(self, reports): """ Args: reports: list[str] - output from the report Return: A dict[Str:Violation] Violation is a simple named tuple Defined above """ violations_dict = defaultdict(list) for report in reports: output_lines = report.splitlines() for line in output_lines: match = self.cppcheck_expression.match(line) # Ignore any line that isn't matched # (for example, snippets from the source code) if match is not None: (cppcheck_src_path, line_number, message) = match.groups() violation = Violation(int(line_number), message) violations_dict[cppcheck_src_path].append(violation) return violations_dict def installed(self): """ Method checks if the provided tool is installed. Returns: boolean True if installed """ return run_command_for_code(self.command_to_check_install) == 0 diff_cover-7.4.0/poetry.lock000066400000000000000000000724021436411411700160330ustar00rootroot00000000000000[[package]] name = "astroid" version = "2.13.2" description = "An abstract syntax tree for Python with inference support." category = "dev" optional = false python-versions = ">=3.7.2" [package.dependencies] lazy-object-proxy = ">=1.4.0" typed-ast = {version = ">=1.4.0,<2.0", markers = "implementation_name == \"cpython\" and python_version < \"3.8\""} typing-extensions = ">=4.0.0" wrapt = [ {version = ">=1.11,<2", markers = "python_version < \"3.11\""}, {version = ">=1.14,<2", markers = "python_version >= \"3.11\""}, ] [[package]] name = "attrs" version = "22.2.0" description = "Classes Without Boilerplate" category = "dev" optional = false python-versions = ">=3.6" [package.extras] cov = ["attrs", "coverage-enable-subprocess", "coverage[toml] (>=5.3)"] dev = ["attrs"] docs = ["furo", "sphinx", "myst-parser", "zope.interface", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier"] tests = ["attrs", "zope.interface"] tests-no-zope = ["hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist", "cloudpickle", "mypy (>=0.971,<0.990)", "pytest-mypy-plugins"] tests_no_zope = ["hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist", "cloudpickle", "mypy (>=0.971,<0.990)", "pytest-mypy-plugins"] [[package]] name = "black" version = "22.12.0" description = "The uncompromising code formatter." category = "dev" optional = false python-versions = ">=3.7" [package.dependencies] click = ">=8.0.0" mypy-extensions = ">=0.4.3" pathspec = ">=0.9.0" platformdirs = ">=2" tomli = {version = ">=1.1.0", markers = "python_full_version < \"3.11.0a7\""} typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""} typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} [package.extras] colorama = ["colorama (>=0.4.3)"] d = ["aiohttp (>=3.7.4)"] jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "chardet" version = "5.1.0" description = "Universal encoding detector for Python 3" category = "main" optional = false python-versions = ">=3.7" [[package]] name = "click" version = "8.1.3" description = "Composable command line interface toolkit" category = "dev" optional = false python-versions = ">=3.7" [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} [[package]] name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" [[package]] name = "coverage" version = "7.0.5" description = "Code coverage measurement for Python" category = "dev" optional = false python-versions = ">=3.7" [package.dependencies] tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} [package.extras] toml = ["tomli"] [[package]] name = "dill" version = "0.3.6" description = "serialize all of python" category = "dev" optional = false python-versions = ">=3.7" [package.extras] graph = ["objgraph (>=1.7.2)"] [[package]] name = "doc8" version = "1.0.0" description = "Style checker for Sphinx (or other) RST documentation" category = "dev" optional = false python-versions = ">=3.7" [package.dependencies] docutils = ">=0.19,<0.21" Pygments = "*" restructuredtext-lint = ">=0.7" stevedore = "*" tomli = {version = "*", markers = "python_version < \"3.11\""} [[package]] name = "docutils" version = "0.19" description = "Docutils -- Python Documentation Utilities" category = "dev" optional = false python-versions = ">=3.7" [[package]] name = "exceptiongroup" version = "1.1.0" description = "Backport of PEP 654 (exception groups)" category = "dev" optional = false python-versions = ">=3.7" [package.extras] test = ["pytest (>=6)"] [[package]] name = "flake8" version = "5.0.4" description = "the modular source code checker: pep8 pyflakes and co" category = "dev" optional = false python-versions = ">=3.6.1" [package.dependencies] importlib-metadata = {version = ">=1.1.0,<4.3", markers = "python_version < \"3.8\""} mccabe = ">=0.7.0,<0.8.0" pycodestyle = ">=2.9.0,<2.10.0" pyflakes = ">=2.5.0,<2.6.0" [[package]] name = "importlib-metadata" version = "4.2.0" description = "Read metadata from Python packages" category = "main" optional = false python-versions = ">=3.6" [package.dependencies] typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} zipp = ">=0.5" [package.extras] docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pep517", "pyfakefs", "flufl.flake8", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] [[package]] name = "iniconfig" version = "2.0.0" description = "brain-dead simple config-ini parsing" category = "dev" optional = false python-versions = ">=3.7" [[package]] name = "isort" version = "5.11.4" description = "A Python utility / library to sort Python imports." category = "dev" optional = false python-versions = ">=3.7.0" [package.extras] colors = ["colorama (>=0.4.3,<0.5.0)"] requirements-deprecated-finder = ["pip-api", "pipreqs"] pipfile-deprecated-finder = ["pipreqs", "requirementslib"] plugins = ["setuptools"] [[package]] name = "jinja2" version = "3.1.2" description = "A very fast and expressive template engine." category = "main" optional = false python-versions = ">=3.7" [package.dependencies] MarkupSafe = ">=2.0" [package.extras] i18n = ["Babel (>=2.7)"] [[package]] name = "lazy-object-proxy" version = "1.9.0" description = "A fast and thorough lazy object proxy." category = "dev" optional = false python-versions = ">=3.7" [[package]] name = "markupsafe" version = "2.1.2" description = "Safely add untrusted strings to HTML/XML markup." category = "main" optional = false python-versions = ">=3.7" [[package]] name = "mccabe" version = "0.7.0" description = "McCabe checker, plugin for flake8" category = "dev" optional = false python-versions = ">=3.6" [[package]] name = "mypy-extensions" version = "0.4.3" description = "Experimental type system extensions for programs checked with the mypy typechecker." category = "dev" optional = false python-versions = "*" [[package]] name = "packaging" version = "23.0" description = "Core utilities for Python packages" category = "dev" optional = false python-versions = ">=3.7" [[package]] name = "pathspec" version = "0.10.3" description = "Utility library for gitignore style pattern matching of file paths." category = "dev" optional = false python-versions = ">=3.7" [[package]] name = "pbr" version = "5.11.1" description = "Python Build Reasonableness" category = "dev" optional = false python-versions = ">=2.6" [[package]] name = "platformdirs" version = "2.6.2" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." category = "dev" optional = false python-versions = ">=3.7" [package.dependencies] typing-extensions = {version = ">=4.4", markers = "python_version < \"3.8\""} [package.extras] docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx-autodoc-typehints (>=1.19.5)", "sphinx (>=5.3)"] test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest-cov (>=4)", "pytest-mock (>=3.10)", "pytest (>=7.2)"] [[package]] name = "pluggy" version = "1.0.0" description = "plugin and hook calling mechanisms for python" category = "main" optional = false python-versions = ">=3.6" [package.dependencies] importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} [package.extras] dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] [[package]] name = "pycodestyle" version = "2.9.1" description = "Python style guide checker" category = "dev" optional = false python-versions = ">=3.6" [[package]] name = "pydocstyle" version = "6.3.0" description = "Python docstring style checker" category = "dev" optional = false python-versions = ">=3.6" [package.dependencies] importlib-metadata = {version = ">=2.0.0,<5.0.0", markers = "python_version < \"3.8\""} snowballstemmer = ">=2.2.0" [package.extras] toml = ["tomli (>=1.2.3)"] [[package]] name = "pyflakes" version = "2.5.0" description = "passive checker of Python programs" category = "dev" optional = false python-versions = ">=3.6" [[package]] name = "pygments" version = "2.14.0" description = "Pygments is a syntax highlighting package written in Python." category = "main" optional = false python-versions = ">=3.6" [package.extras] plugins = ["importlib-metadata"] [[package]] name = "pylint" version = "2.15.10" description = "python code static checker" category = "dev" optional = false python-versions = ">=3.7.2" [package.dependencies] astroid = ">=2.12.13,<=2.14.0-dev0" colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} dill = [ {version = ">=0.2", markers = "python_version < \"3.11\""}, {version = ">=0.3.6", markers = "python_version >= \"3.11\""}, ] isort = ">=4.2.5,<6" mccabe = ">=0.6,<0.8" platformdirs = ">=2.2.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} tomlkit = ">=0.10.1" typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} [package.extras] spelling = ["pyenchant (>=3.2,<4.0)"] testutils = ["gitpython (>3)"] [[package]] name = "pylint-pytest" version = "1.1.2" description = "A Pylint plugin to suppress pytest-related false positives." category = "dev" optional = false python-versions = ">=3.6" [package.dependencies] pylint = "*" pytest = ">=4.6" [[package]] name = "pytest" version = "7.2.1" description = "pytest: simple powerful testing with Python" category = "dev" optional = false python-versions = ">=3.7" [package.dependencies] attrs = ">=19.2.0" colorama = {version = "*", markers = "sys_platform == \"win32\""} exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} iniconfig = "*" packaging = "*" pluggy = ">=0.12,<2.0" tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} [package.extras] testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "xmlschema"] [[package]] name = "pytest-cov" version = "4.0.0" description = "Pytest plugin for measuring coverage." category = "dev" optional = false python-versions = ">=3.6" [package.dependencies] coverage = {version = ">=5.2.1", extras = ["toml"]} pytest = ">=4.6" [package.extras] testing = ["fields", "hunter", "process-tests", "six", "pytest-xdist", "virtualenv"] [[package]] name = "pytest-datadir" version = "1.4.1" description = "pytest plugin for test data directories and files" category = "dev" optional = false python-versions = ">=3.6" [package.dependencies] pytest = ">=5.0" [[package]] name = "pytest-mock" version = "3.10.0" description = "Thin-wrapper around the mock package for easier use with pytest" category = "dev" optional = false python-versions = ">=3.7" [package.dependencies] pytest = ">=5.0" [package.extras] dev = ["pre-commit", "tox", "pytest-asyncio"] [[package]] name = "restructuredtext-lint" version = "1.4.0" description = "reStructuredText linter" category = "dev" optional = false python-versions = "*" [package.dependencies] docutils = ">=0.11,<1.0" [[package]] name = "snowballstemmer" version = "2.2.0" description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms." category = "dev" optional = false python-versions = "*" [[package]] name = "stevedore" version = "3.5.2" description = "Manage dynamic plugins for Python applications" category = "dev" optional = false python-versions = ">=3.6" [package.dependencies] importlib-metadata = {version = ">=1.7.0", markers = "python_version < \"3.8\""} pbr = ">=2.0.0,<2.1.0 || >2.1.0" [[package]] name = "tomli" version = "2.0.1" description = "A lil' TOML parser" category = "main" optional = false python-versions = ">=3.7" [[package]] name = "tomlkit" version = "0.11.6" description = "Style preserving TOML library" category = "dev" optional = false python-versions = ">=3.6" [[package]] name = "typed-ast" version = "1.5.4" description = "a fork of Python 2 and 3 ast modules with type comment support" category = "dev" optional = false python-versions = ">=3.6" [[package]] name = "typing-extensions" version = "4.4.0" description = "Backported and Experimental Type Hints for Python 3.7+" category = "main" optional = false python-versions = ">=3.7" [[package]] name = "wrapt" version = "1.14.1" description = "Module for decorators, wrappers and monkey patching." category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" [[package]] name = "zipp" version = "3.11.0" description = "Backport of pathlib-compatible object wrapper for zip files" category = "main" optional = false python-versions = ">=3.7" [package.extras] docs = ["sphinx (>=3.5)", "jaraco.packaging (>=9)", "rst.linker (>=1.9)", "furo", "jaraco.tidelift (>=1.4)"] testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "flake8 (<5)", "pytest-cov", "pytest-enabler (>=1.3)", "jaraco.itertools", "func-timeout", "jaraco.functools", "more-itertools", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)", "pytest-flake8"] [extras] toml = ["tomli"] [metadata] lock-version = "1.1" python-versions = "^3.7.2" content-hash = "b87e1facd2178f87fe8546450e2fa3befc99d85522492e5905a495d7b8f357ca" [metadata.files] astroid = [] attrs = [] black = [] chardet = [] click = [ {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, ] colorama = [] coverage = [] dill = [] doc8 = [] docutils = [] exceptiongroup = [] flake8 = [] importlib-metadata = [ {file = "importlib_metadata-4.2.0-py3-none-any.whl", hash = "sha256:057e92c15bc8d9e8109738a48db0ccb31b4d9d5cfbee5a8670879a30be66304b"}, {file = "importlib_metadata-4.2.0.tar.gz", hash = "sha256:b7e52a1f8dec14a75ea73e0891f3060099ca1d8e6a462a4dff11c3e119ea1b31"}, ] iniconfig = [] isort = [] jinja2 = [ {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, ] lazy-object-proxy = [] markupsafe = [] mccabe = [] mypy-extensions = [ {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"}, {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"}, ] packaging = [] pathspec = [] pbr = [] platformdirs = [] pluggy = [ {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, ] pycodestyle = [] pydocstyle = [] pyflakes = [] pygments = [] pylint = [] pylint-pytest = [ {file = "pylint_pytest-1.1.2-py2.py3-none-any.whl", hash = "sha256:fb20ef318081cee3d5febc631a7b9c40fa356b05e4f769d6e60a337e58c8879b"}, ] pytest = [] pytest-cov = [] pytest-datadir = [] pytest-mock = [] restructuredtext-lint = [ {file = "restructuredtext_lint-1.4.0.tar.gz", hash = "sha256:1b235c0c922341ab6c530390892eb9e92f90b9b75046063e047cacfb0f050c45"}, ] snowballstemmer = [ {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"}, {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"}, ] stevedore = [] tomli = [] tomlkit = [] typed-ast = [ {file = "typed_ast-1.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:669dd0c4167f6f2cd9f57041e03c3c2ebf9063d0757dc89f79ba1daa2bfca9d4"}, {file = "typed_ast-1.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:211260621ab1cd7324e0798d6be953d00b74e0428382991adfddb352252f1d62"}, {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:267e3f78697a6c00c689c03db4876dd1efdfea2f251a5ad6555e82a26847b4ac"}, {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c542eeda69212fa10a7ada75e668876fdec5f856cd3d06829e6aa64ad17c8dfe"}, {file = "typed_ast-1.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:a9916d2bb8865f973824fb47436fa45e1ebf2efd920f2b9f99342cb7fab93f72"}, {file = "typed_ast-1.5.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:79b1e0869db7c830ba6a981d58711c88b6677506e648496b1f64ac7d15633aec"}, {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a94d55d142c9265f4ea46fab70977a1944ecae359ae867397757d836ea5a3f47"}, {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:183afdf0ec5b1b211724dfef3d2cad2d767cbefac291f24d69b00546c1837fb6"}, {file = "typed_ast-1.5.4-cp36-cp36m-win_amd64.whl", hash = "sha256:639c5f0b21776605dd6c9dbe592d5228f021404dafd377e2b7ac046b0349b1a1"}, {file = "typed_ast-1.5.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cf4afcfac006ece570e32d6fa90ab74a17245b83dfd6655a6f68568098345ff6"}, {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed855bbe3eb3715fca349c80174cfcfd699c2f9de574d40527b8429acae23a66"}, {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6778e1b2f81dfc7bc58e4b259363b83d2e509a65198e85d5700dfae4c6c8ff1c"}, {file = "typed_ast-1.5.4-cp37-cp37m-win_amd64.whl", hash = "sha256:0261195c2062caf107831e92a76764c81227dae162c4f75192c0d489faf751a2"}, {file = "typed_ast-1.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2efae9db7a8c05ad5547d522e7dbe62c83d838d3906a3716d1478b6c1d61388d"}, {file = "typed_ast-1.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7d5d014b7daa8b0bf2eaef684295acae12b036d79f54178b92a2b6a56f92278f"}, {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:370788a63915e82fd6f212865a596a0fefcbb7d408bbbb13dea723d971ed8bdc"}, {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4e964b4ff86550a7a7d56345c7864b18f403f5bd7380edf44a3c1fb4ee7ac6c6"}, {file = "typed_ast-1.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:683407d92dc953c8a7347119596f0b0e6c55eb98ebebd9b23437501b28dcbb8e"}, {file = "typed_ast-1.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4879da6c9b73443f97e731b617184a596ac1235fe91f98d279a7af36c796da35"}, {file = "typed_ast-1.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e123d878ba170397916557d31c8f589951e353cc95fb7f24f6bb69adc1a8a97"}, {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebd9d7f80ccf7a82ac5f88c521115cc55d84e35bf8b446fcd7836eb6b98929a3"}, {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98f80dee3c03455e92796b58b98ff6ca0b2a6f652120c263efdba4d6c5e58f72"}, {file = "typed_ast-1.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:0fdbcf2fef0ca421a3f5912555804296f0b0960f0418c440f5d6d3abb549f3e1"}, {file = "typed_ast-1.5.4.tar.gz", hash = "sha256:39e21ceb7388e4bb37f4c679d72707ed46c2fbf2a5609b8b8ebc4b067d977df2"}, ] typing-extensions = [] wrapt = [ {file = "wrapt-1.14.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:1b376b3f4896e7930f1f772ac4b064ac12598d1c38d04907e696cc4d794b43d3"}, {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:903500616422a40a98a5a3c4ff4ed9d0066f3b4c951fa286018ecdf0750194ef"}, {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5a9a0d155deafd9448baff28c08e150d9b24ff010e899311ddd63c45c2445e28"}, {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ddaea91abf8b0d13443f6dac52e89051a5063c7d014710dcb4d4abb2ff811a59"}, {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:36f582d0c6bc99d5f39cd3ac2a9062e57f3cf606ade29a0a0d6b323462f4dd87"}, {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:7ef58fb89674095bfc57c4069e95d7a31cfdc0939e2a579882ac7d55aadfd2a1"}, {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:e2f83e18fe2f4c9e7db597e988f72712c0c3676d337d8b101f6758107c42425b"}, {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ee2b1b1769f6707a8a445162ea16dddf74285c3964f605877a20e38545c3c462"}, {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:833b58d5d0b7e5b9832869f039203389ac7cbf01765639c7309fd50ef619e0b1"}, {file = "wrapt-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:80bb5c256f1415f747011dc3604b59bc1f91c6e7150bd7db03b19170ee06b320"}, {file = "wrapt-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:07f7a7d0f388028b2df1d916e94bbb40624c59b48ecc6cbc232546706fac74c2"}, {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02b41b633c6261feff8ddd8d11c711df6842aba629fdd3da10249a53211a72c4"}, {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fe803deacd09a233e4762a1adcea5db5d31e6be577a43352936179d14d90069"}, {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:257fd78c513e0fb5cdbe058c27a0624c9884e735bbd131935fd49e9fe719d310"}, {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4fcc4649dc762cddacd193e6b55bc02edca674067f5f98166d7713b193932b7f"}, {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:11871514607b15cfeb87c547a49bca19fde402f32e2b1c24a632506c0a756656"}, {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"}, {file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"}, {file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:a85d2b46be66a71bedde836d9e41859879cc54a2a04fad1191eb50c2066f6e9d"}, {file = "wrapt-1.14.1-cp35-cp35m-win32.whl", hash = "sha256:dbcda74c67263139358f4d188ae5faae95c30929281bc6866d00573783c422b7"}, {file = "wrapt-1.14.1-cp35-cp35m-win_amd64.whl", hash = "sha256:b21bb4c09ffabfa0e85e3a6b623e19b80e7acd709b9f91452b8297ace2a8ab00"}, {file = "wrapt-1.14.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9e0fd32e0148dd5dea6af5fee42beb949098564cc23211a88d799e434255a1f4"}, {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9736af4641846491aedb3c3f56b9bc5568d92b0692303b5a305301a95dfd38b1"}, {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b02d65b9ccf0ef6c34cba6cf5bf2aab1bb2f49c6090bafeecc9cd81ad4ea1c1"}, {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21ac0156c4b089b330b7666db40feee30a5d52634cc4560e1905d6529a3897ff"}, {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:9f3e6f9e05148ff90002b884fbc2a86bd303ae847e472f44ecc06c2cd2fcdb2d"}, {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:6e743de5e9c3d1b7185870f480587b75b1cb604832e380d64f9504a0535912d1"}, {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d79d7d5dc8a32b7093e81e97dad755127ff77bcc899e845f41bf71747af0c569"}, {file = "wrapt-1.14.1-cp36-cp36m-win32.whl", hash = "sha256:81b19725065dcb43df02b37e03278c011a09e49757287dca60c5aecdd5a0b8ed"}, {file = "wrapt-1.14.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b014c23646a467558be7da3d6b9fa409b2c567d2110599b7cf9a0c5992b3b471"}, {file = "wrapt-1.14.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:88bd7b6bd70a5b6803c1abf6bca012f7ed963e58c68d76ee20b9d751c74a3248"}, {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5901a312f4d14c59918c221323068fad0540e34324925c8475263841dbdfe68"}, {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d77c85fedff92cf788face9bfa3ebaa364448ebb1d765302e9af11bf449ca36d"}, {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d649d616e5c6a678b26d15ece345354f7c2286acd6db868e65fcc5ff7c24a77"}, {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7d2872609603cb35ca513d7404a94d6d608fc13211563571117046c9d2bcc3d7"}, {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:ee6acae74a2b91865910eef5e7de37dc6895ad96fa23603d1d27ea69df545015"}, {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2b39d38039a1fdad98c87279b48bc5dce2c0ca0d73483b12cb72aa9609278e8a"}, {file = "wrapt-1.14.1-cp37-cp37m-win32.whl", hash = "sha256:60db23fa423575eeb65ea430cee741acb7c26a1365d103f7b0f6ec412b893853"}, {file = "wrapt-1.14.1-cp37-cp37m-win_amd64.whl", hash = "sha256:709fe01086a55cf79d20f741f39325018f4df051ef39fe921b1ebe780a66184c"}, {file = "wrapt-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8c0ce1e99116d5ab21355d8ebe53d9460366704ea38ae4d9f6933188f327b456"}, {file = "wrapt-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e3fb1677c720409d5f671e39bac6c9e0e422584e5f518bfd50aa4cbbea02433f"}, {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:642c2e7a804fcf18c222e1060df25fc210b9c58db7c91416fb055897fc27e8cc"}, {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b7c050ae976e286906dd3f26009e117eb000fb2cf3533398c5ad9ccc86867b1"}, {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef3f72c9666bba2bab70d2a8b79f2c6d2c1a42a7f7e2b0ec83bb2f9e383950af"}, {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:01c205616a89d09827986bc4e859bcabd64f5a0662a7fe95e0d359424e0e071b"}, {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5a0f54ce2c092aaf439813735584b9537cad479575a09892b8352fea5e988dc0"}, {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2cf71233a0ed05ccdabe209c606fe0bac7379fdcf687f39b944420d2a09fdb57"}, {file = "wrapt-1.14.1-cp38-cp38-win32.whl", hash = "sha256:aa31fdcc33fef9eb2552cbcbfee7773d5a6792c137b359e82879c101e98584c5"}, {file = "wrapt-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1967f46ea8f2db647c786e78d8cc7e4313dbd1b0aca360592d8027b8508e24d"}, {file = "wrapt-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3232822c7d98d23895ccc443bbdf57c7412c5a65996c30442ebe6ed3df335383"}, {file = "wrapt-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:988635d122aaf2bdcef9e795435662bcd65b02f4f4c1ae37fbee7401c440b3a7"}, {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cca3c2cdadb362116235fdbd411735de4328c61425b0aa9f872fd76d02c4e86"}, {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d52a25136894c63de15a35bc0bdc5adb4b0e173b9c0d07a2be9d3ca64a332735"}, {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40e7bc81c9e2b2734ea4bc1aceb8a8f0ceaac7c5299bc5d69e37c44d9081d43b"}, {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b9b7a708dd92306328117d8c4b62e2194d00c365f18eff11a9b53c6f923b01e3"}, {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6a9a25751acb379b466ff6be78a315e2b439d4c94c1e99cb7266d40a537995d3"}, {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:34aa51c45f28ba7f12accd624225e2b1e5a3a45206aa191f6f9aac931d9d56fe"}, {file = "wrapt-1.14.1-cp39-cp39-win32.whl", hash = "sha256:dee0ce50c6a2dd9056c20db781e9c1cfd33e77d2d569f5d1d9321c641bb903d5"}, {file = "wrapt-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:dee60e1de1898bde3b238f18340eec6148986da0455d8ba7848d50470a7a32fb"}, {file = "wrapt-1.14.1.tar.gz", hash = "sha256:380a85cf89e0e69b7cfbe2ea9f765f004ff419f34194018a6827ac0e3edfed4d"}, ] zipp = [] diff_cover-7.4.0/pyproject.toml000066400000000000000000000066101436411411700165510ustar00rootroot00000000000000[tool.poetry] name = "diff_cover" version = "7.4.0" description = "Run coverage and linting reports on diffs" authors = ["See Contributors"] homepage = "https://github.com/Bachmann1234/diff-cover" repository = "https://github.com/Bachmann1234/diff-cover" license = "Apache 2.0" readme = "README.rst" classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: Implementation :: CPython", "Topic :: Software Development :: Testing", "Topic :: Software Development :: Quality Assurance", ] packages = [ { include = "diff_cover" } ] include = [ {path = 'templates/*.txt'}, {path = 'templates/*.html'}, {path = 'templates/*.css'}, {path = 'templates/*.md'}, {path = 'tests/*', format = 'sdist'}, ] [tool.poetry.scripts] diff-cover = 'diff_cover.diff_cover_tool:main' diff-quality = 'diff_cover.diff_quality_tool:main' [tool.poetry.dependencies] python = "^3.7.2" Pygments = "^2.9.0" Jinja2 = ">=2.7.1" pluggy = ">=0.13.1,<2" chardet = ">=3.0.0" tomli = {version = ">=1.2.1", optional = true} setuptools = { version = ">=17.0.0", python = "<3.8" } [tool.poetry.dev-dependencies] pytest-cov = "^4.0.0" pytest-datadir = "^1.4.1" pytest-mock = "^3.10.0" pycodestyle = ">=2.9.1" flake8 = "^5.0.4" pyflakes = "^2.5.0" pylint = "^2.15.3" pylint-pytest = "^1.1.2" pydocstyle = "^6.1.1" black = "^22.8.0" isort = "^5.10.1" doc8 = "1.0.0" [tool.poetry.extras] toml = ["tomli"] [build-system] requires = ["poetry-core>=1.0.7"] build-backend = "poetry.core.masonry.api" [tool.black] line-length = 88 target-version = ['py310'] include = '\.pyi?$' exclude = "tests/fixtures/*" [tool.isort] profile = "black" extend_skip = "tests/fixtures/" [tool.pylint.master] max-line-length = 100 load-plugins = [ "pylint_pytest", ] [tool.pylint."messages control"] enable = ["all"] disable = [ # allow TODO comments "fixme", # allow disables "locally-disabled", "suppressed-message", # covered by isort "ungrouped-imports", # allow classes and functions w/o docstring "missing-docstring", # hard number checks can be ignored, because they are covered in code reviews "too-many-instance-attributes", "too-many-arguments", "too-many-locals", "too-many-branches", "too-few-public-methods", "too-many-nested-blocks", "too-many-public-methods", # allow methods not to use self "no-self-use", # currently some code seems duplicated for pylint "duplicate-code", # we are a command line tool and don't want to show all internals "raise-missing-from", ] [tool.pylint.basic] good-names = [ "_", "i", "setUp", "tearDown", "e", "ex", ] no-docstring-rgx = "^_" [tool.pytest.ini_options] addopts = "--strict-markers" xfail_strict = true markers = [ "disable_all_files_exist: disables the fixture patch_so_all_files_exist", ] [tool.doc8] max_line_length = 120 diff_cover-7.4.0/tests/000077500000000000000000000000001436411411700147745ustar00rootroot00000000000000diff_cover-7.4.0/tests/__init__.py000066400000000000000000000000001436411411700170730ustar00rootroot00000000000000diff_cover-7.4.0/tests/fixtures/000077500000000000000000000000001436411411700166455ustar00rootroot00000000000000diff_cover-7.4.0/tests/fixtures/add_console_report.txt000066400000000000000000000003341436411411700232530ustar00rootroot00000000000000------------- Diff Coverage Diff: origin/main...HEAD, staged and unstaged changes ------------- test_src.txt (50.0%): Missing lines 2,4,6,8,10 ------------- Total: 10 lines Missing: 5 lines Coverage: 50% ------------- diff_cover-7.4.0/tests/fixtures/add_html_report.html000066400000000000000000000121621436411411700227040ustar00rootroot00000000000000 Diff Coverage

Diff Coverage

Diff: origin/main...HEAD, staged and unstaged changes

  • Total: 10 lines
  • Missing: 5 lines
  • Coverage: 50%
Source File Diff Coverage (%) Missing Lines
test_src.txt 50.0% 2,4,6,8,10
test_src.txt
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
test 1
test 2
test 3
test 4
test 5
test 6
test 7
test 8
test 9
test 10
diff_cover-7.4.0/tests/fixtures/changed_console_report.txt000066400000000000000000000003001436411411700241050ustar00rootroot00000000000000------------- Diff Coverage Diff: origin/main...HEAD, staged and unstaged changes ------------- test_src.txt (100%) ------------- Total: 1 line Missing: 0 lines Coverage: 100% ------------- diff_cover-7.4.0/tests/fixtures/changed_html_report.html000066400000000000000000000104241436411411700235440ustar00rootroot00000000000000 Diff Coverage

Diff Coverage

Diff: origin/main...HEAD, staged and unstaged changes

  • Total: 1 line
  • Missing: 0 lines
  • Coverage: 100%
Source File Diff Coverage (%) Missing Lines
test_src.txt 100%  
diff_cover-7.4.0/tests/fixtures/coverage.xml000066400000000000000000000015271436411411700211670ustar00rootroot00000000000000 diff_cover-7.4.0/tests/fixtures/coverage1.xml000066400000000000000000000015271436411411700212500ustar00rootroot00000000000000 diff_cover-7.4.0/tests/fixtures/coverage2.xml000066400000000000000000000015271436411411700212510ustar00rootroot00000000000000 diff_cover-7.4.0/tests/fixtures/delete_console_report.txt000066400000000000000000000002371436411411700237670ustar00rootroot00000000000000------------- Diff Coverage Diff: origin/main...HEAD, staged and unstaged changes ------------- No lines with coverage information in this diff. ------------- diff_cover-7.4.0/tests/fixtures/delete_html_report.html000066400000000000000000000075521436411411700234250ustar00rootroot00000000000000 Diff Coverage

Diff Coverage

Diff: origin/main...HEAD, staged and unstaged changes

No lines with coverage information in this diff.

diff_cover-7.4.0/tests/fixtures/dotnet_coverage.xml000066400000000000000000000051501436411411700225400ustar00rootroot00000000000000 --source /code/samplediff/SampleApp diff_cover-7.4.0/tests/fixtures/dotnet_coverage_console_report.txt000066400000000000000000000003331436411411700256720ustar00rootroot00000000000000------------- Diff Coverage Diff: origin/main...HEAD, staged and unstaged changes ------------- SampleApp/Sample.cs (0.0%): Missing lines 23-25 ------------- Total: 3 lines Missing: 3 lines Coverage: 0% ------------- diff_cover-7.4.0/tests/fixtures/empty.txt000066400000000000000000000000001436411411700205320ustar00rootroot00000000000000diff_cover-7.4.0/tests/fixtures/empty_pycodestyle_violations.txt000066400000000000000000000003531436411411700254400ustar00rootroot00000000000000------------- Diff Quality Quality Report: pycodestyle Diff: origin/main...HEAD, staged and unstaged changes ------------- violations_test_file.py (100%) ------------- Total: 9 lines Violations: 0 lines % Quality: 100% ------------- diff_cover-7.4.0/tests/fixtures/external_css_html_report.html000066400000000000000000000016211436411411700246440ustar00rootroot00000000000000 Diff Coverage

Diff Coverage

Diff: origin/main...HEAD, staged and unstaged changes

  • Total: 1 line
  • Missing: 0 lines
  • Coverage: 100%
Source File Diff Coverage (%) Missing Lines
test_src.txt 100%  
diff_cover-7.4.0/tests/fixtures/external_style.css000066400000000000000000000065101436411411700224230ustar00rootroot00000000000000.src-snippet { margin-top: 2em; } .src-name { font-weight: bold; } .snippets { border-top: 1px solid #bdbdbd; border-bottom: 1px solid #bdbdbd; } .hll { background-color: #ffcccc } .c { color: #408080; font-style: italic } /* Comment */ .err { border: 1px solid #FF0000 } /* Error */ .k { color: #008000; font-weight: bold } /* Keyword */ .o { color: #666666 } /* Operator */ .cm { color: #408080; font-style: italic } /* Comment.Multiline */ .cp { color: #BC7A00 } /* Comment.Preproc */ .c1 { color: #408080; font-style: italic } /* Comment.Single */ .cs { color: #408080; font-style: italic } /* Comment.Special */ .gd { color: #A00000 } /* Generic.Deleted */ .ge { font-style: italic } /* Generic.Emph */ .gr { color: #FF0000 } /* Generic.Error */ .gh { color: #000080; font-weight: bold } /* Generic.Heading */ .gi { color: #00A000 } /* Generic.Inserted */ .go { color: #888888 } /* Generic.Output */ .gp { color: #000080; font-weight: bold } /* Generic.Prompt */ .gs { font-weight: bold } /* Generic.Strong */ .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ .gt { color: #0044DD } /* Generic.Traceback */ .kc { color: #008000; font-weight: bold } /* Keyword.Constant */ .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */ .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */ .kp { color: #008000 } /* Keyword.Pseudo */ .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */ .kt { color: #B00040 } /* Keyword.Type */ .m { color: #666666 } /* Literal.Number */ .s { color: #BA2121 } /* Literal.String */ .na { color: #7D9029 } /* Name.Attribute */ .nb { color: #008000 } /* Name.Builtin */ .nc { color: #0000FF; font-weight: bold } /* Name.Class */ .no { color: #880000 } /* Name.Constant */ .nd { color: #AA22FF } /* Name.Decorator */ .ni { color: #999999; font-weight: bold } /* Name.Entity */ .ne { color: #D2413A; font-weight: bold } /* Name.Exception */ .nf { color: #0000FF } /* Name.Function */ .nl { color: #A0A000 } /* Name.Label */ .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */ .nt { color: #008000; font-weight: bold } /* Name.Tag */ .nv { color: #19177C } /* Name.Variable */ .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */ .w { color: #bbbbbb } /* Text.Whitespace */ .mb { color: #666666 } /* Literal.Number.Bin */ .mf { color: #666666 } /* Literal.Number.Float */ .mh { color: #666666 } /* Literal.Number.Hex */ .mi { color: #666666 } /* Literal.Number.Integer */ .mo { color: #666666 } /* Literal.Number.Oct */ .sb { color: #BA2121 } /* Literal.String.Backtick */ .sc { color: #BA2121 } /* Literal.String.Char */ .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */ .s2 { color: #BA2121 } /* Literal.String.Double */ .se { color: #BB6622; font-weight: bold } /* Literal.String.Escape */ .sh { color: #BA2121 } /* Literal.String.Heredoc */ .si { color: #BB6688; font-weight: bold } /* Literal.String.Interpol */ .sx { color: #008000 } /* Literal.String.Other */ .sr { color: #BB6688 } /* Literal.String.Regex */ .s1 { color: #BA2121 } /* Literal.String.Single */ .ss { color: #19177C } /* Literal.String.Symbol */ .bp { color: #008000 } /* Name.Builtin.Pseudo */ .vc { color: #19177C } /* Name.Variable.Class */ .vg { color: #19177C } /* Name.Variable.Global */ .vi { color: #19177C } /* Name.Variable.Instance */ .il { color: #666666 } /* Literal.Number.Integer.Long */ diff_cover-7.4.0/tests/fixtures/git_diff_add.txt000066400000000000000000000003301436411411700217650ustar00rootroot00000000000000diff --git a/test_src.txt b/test_src.txt new file mode 100644 index 0000000..10ae772 --- /dev/null +++ b/test_src.txt @@ -0,0 +1,10 @@ +test 1 +test 2 +test 3 +test 4 +test 5 +test 6 +test 7 +test 8 +test 9 +test 10 diff_cover-7.4.0/tests/fixtures/git_diff_changed.txt000066400000000000000000000003301436411411700226260ustar00rootroot00000000000000diff --git a/test_src.txt b/test_src.txt index 10ae772..74307ad 100644 --- a/test_src.txt +++ b/test_src.txt @@ -1,10 +1,8 @@ -test 1 +changed test 2 test 3 test 4 test 5 test 6 test 7 -test 8 -test 9 test 10 diff_cover-7.4.0/tests/fixtures/git_diff_code_dupe.txt000066400000000000000000000010601436411411700231650ustar00rootroot00000000000000diff --git a/fileone.py b/fileone.py index e69de29..3952627 100644 --- a/fileone.py +++ b/fileone.py @@ -0,0 +1,16 @@ +""" +Fileone +""" +def selection_sort(to_sort): + """ + The greatest sorting algorithm? + """ + new_list = [] + final_size = len(to_sort) + while len(new_list) < final_size: + candidate_index = 0 + for index in xrange(len(to_sort)): + if to_sort[index] <= to_sort[candidate_index]: + candidate_index = index + new_list.append(to_sort.pop(candidate_index)) + return new_listdiff_cover-7.4.0/tests/fixtures/git_diff_delete.txt000066400000000000000000000003341436411411700225030ustar00rootroot00000000000000diff --git a/test_src.txt b/test_src.txt deleted file mode 100644 index 10ae772..0000000 --- a/test_src.txt +++ /dev/null @@ -1,10 +0,0 @@ -test 1 -test 2 -test 3 -test 4 -test 5 -test 6 -test 7 -test 8 -test 9 -test 10 diff_cover-7.4.0/tests/fixtures/git_diff_dotnet.txt000066400000000000000000000005551436411411700225430ustar00rootroot00000000000000diff --git a/SampleApp/Sample.cs b/SampleApp/Sample.cs index 19dcc51..7382baa 100644 --- a/SampleApp/Sample.cs +++ b/SampleApp/Sample.cs @@ -18,10 +18,5 @@ namespace SampleApp Console.WriteLine(ex.Message); } } + + public static int DoSomethingNew(int a,int b) + { + return a + b; + } } }diff_cover-7.4.0/tests/fixtures/git_diff_external_css.txt000066400000000000000000000003301436411411700237270ustar00rootroot00000000000000diff --git a/test_src.txt b/test_src.txt index 10ae772..74307ad 100644 --- a/test_src.txt +++ b/test_src.txt @@ -1,10 +1,8 @@ -test 1 +changed test 2 test 3 test 4 test 5 test 6 test 7 -test 8 -test 9 test 10 diff_cover-7.4.0/tests/fixtures/git_diff_lua.txt000066400000000000000000000004031436411411700220170ustar00rootroot00000000000000diff --git a/scripts/maths.lua b/scripts/maths.lua index d31c441..bcd11a9 100644 --- a/scripts/maths.lua +++ b/scripts/maths.lua @@ -8,4 +8,8 @@ function maths.subber(a, b) return a - b end +function maths.mult(a,b) + return a * b +end + return mathsdiff_cover-7.4.0/tests/fixtures/git_diff_moved.txt000066400000000000000000000003301436411411700223470ustar00rootroot00000000000000diff --git a/test_src.txt b/test_src.txt new file mode 100644 index 0000000..10ae772 --- /dev/null +++ b/test_src.txt @@ -0,0 +1,10 @@ +test 1 +test 2 +test 3 +test 4 +test 5 +test 6 +test 7 +test 8 +test 9 +test 10 diff_cover-7.4.0/tests/fixtures/git_diff_mult.txt000066400000000000000000000003301436411411700222160ustar00rootroot00000000000000diff --git a/test_src.txt b/test_src.txt new file mode 100644 index 0000000..10ae772 --- /dev/null +++ b/test_src.txt @@ -0,0 +1,10 @@ +test 1 +test 2 +test 3 +test 4 +test 5 +test 6 +test 7 +test 8 +test 9 +test 10 diff_cover-7.4.0/tests/fixtures/git_diff_subdir.txt000066400000000000000000000003441436411411700225320ustar00rootroot00000000000000diff --git a/sub/test_src.txt b/sub/test_src.txt new file mode 100644 index 0000000..10ae772 --- /dev/null +++ b/sub/test_src.txt @@ -0,0 +1,10 @@ +test 1 +test 2 +test 3 +test 4 +test 5 +test 6 +test 7 +test 8 +test 9 +test 10 diff_cover-7.4.0/tests/fixtures/git_diff_unicode.txt000066400000000000000000000004661436411411700226750ustar00rootroot00000000000000diff --git a/unicode_test_src.txt b/unicode_test_src.txt new file mode 100644 index 0000000..10ae772 --- /dev/null +++ b/unicode_test_src.txt @@ -0,0 +1,10 @@ +ẗëṡẗ 1 +ẗëṡẗ 2 +ẗëṡẗ 3 +ẗëṡẗ 4 +ẗëṡẗ 5 +ẗëṡẗ 6 +ẗëṡẗ 7 +ẗëṡẗ 8 +ẗëṡẗ 9 +ẗëṡẗ 10 diff_cover-7.4.0/tests/fixtures/git_diff_violations.txt000066400000000000000000000010371436411411700234310ustar00rootroot00000000000000diff --git a/violations_test_file.py b/violations_test_file.py index 8bf3de7..4e4b0df 100644 --- a/violations_test_file.py +++ b/violations_test_file.py @@ -1,6 +1,11 @@ -def func_1(apple, my_list): - d = {} - if apple<10: +def func_1(apple,my_list): + if apple< 10: # Do something my_list.append(apple) return my_list[1:] +def func_2(spongebob, squarepants): + """A less messy function""" + for char in spongebob: + if char in squarepants: + return char + unused=1 + return None diff_cover-7.4.0/tests/fixtures/git_diff_violations_two_files.txt000066400000000000000000000004121436411411700255000ustar00rootroot00000000000000diff --git a/hello.py b/hello.py index b732142..b2ba069 100644 --- a/hello.py +++ b/hello.py @@ -1 +1,2 @@ print "hello" +print unknown_var diff --git a/hi.py b/hi.py index bae1109..151d05d 100644 --- a/hi.py +++ b/hi.py @@ -1 +1,2 @@ print "hi" +print unknown_vardiff_cover-7.4.0/tests/fixtures/hello.py000066400000000000000000000000411436411411700203150ustar00rootroot00000000000000print("hello") print(unknown_var)diff_cover-7.4.0/tests/fixtures/hi.py000066400000000000000000000000411436411411700176120ustar00rootroot00000000000000print("hello") print(unknown_var)diff_cover-7.4.0/tests/fixtures/html_report.html000066400000000000000000000023251436411411700220740ustar00rootroot00000000000000 Diff Coverage

Diff Coverage

Diff: main

  • Total: 12 lines
  • Missing: 4 lines
  • Coverage: 66%
Source File Diff Coverage (%) Missing Lines
file1.py 66.7% 10-11
subdir/file2.py 66.7% 10-11
diff_cover-7.4.0/tests/fixtures/html_report_empty.html000066400000000000000000000012451436411411700233120ustar00rootroot00000000000000 Diff Coverage

Diff Coverage

Diff: main

No lines with coverage information in this diff.

diff_cover-7.4.0/tests/fixtures/html_report_one_snippet.html000066400000000000000000000031641436411411700245010ustar00rootroot00000000000000 Diff Coverage

Diff Coverage

Diff: main

  • Total: 12 lines
  • Missing: 4 lines
  • Coverage: 66%
Source File Diff Coverage (%) Missing Lines
file1.py 66.7% 10-11
subdir/file2.py 66.7% 10-11
file1.py
Snippet with ስ 芒 unicode
subdir/file2.py
Snippet with ስ 芒 unicode
diff_cover-7.4.0/tests/fixtures/html_report_two_snippets.html000066400000000000000000000033341436411411700247130ustar00rootroot00000000000000 Diff Coverage

Diff Coverage

Diff: main

  • Total: 12 lines
  • Missing: 4 lines
  • Coverage: 66%
Source File Diff Coverage (%) Missing Lines
file1.py 66.7% 10-11
subdir/file2.py 66.7% 10-11
file1.py
Snippet with ስ 芒 unicode
Snippet with ስ 芒 unicode
subdir/file2.py
Snippet with ስ 芒 unicode
Snippet with ስ 芒 unicode
diff_cover-7.4.0/tests/fixtures/lcov.info000066400000000000000000000003501436411411700204630ustar00rootroot00000000000000TN: SF:test_src.txt FN:2,(anonymous_1) FN:5,(anonymous_2) FNF:2 FNH:2 FNDA:10,(anonymous_1) FNDA:3,(anonymous_2) DA:1,1 DA:2,0 DA:3,1 DA:4,0 DA:5,1 DA:6,0 DA:7,1 DA:8,0 DA:9,1 DA:10,0 LF:6 LH:6 BRF:0 BRH:0 BRDA:3,0,0,0 end_of_recorddiff_cover-7.4.0/tests/fixtures/lua_console_report.txt000066400000000000000000000003261436411411700233050ustar00rootroot00000000000000------------- Diff Coverage Diff: origin/main...HEAD, staged and unstaged changes ------------- scripts/maths.lua (50.0%): Missing lines 12 ------------- Total: 2 lines Missing: 1 line Coverage: 50% -------------diff_cover-7.4.0/tests/fixtures/luacoverage.xml000066400000000000000000000031511436411411700216640ustar00rootroot00000000000000 diff_cover-7.4.0/tests/fixtures/markdown_report_one_snippet.md000066400000000000000000000005451436411411700250130ustar00rootroot00000000000000# Diff Coverage ## Diff: main - file1.py (66.7%): Missing lines 10-11 - subdir/file2.py (66.7%): Missing lines 10-11 ## Summary - **Total**: 12 lines - **Missing**: 4 lines - **Coverage**: 66% ## file1.py Lines 1-1 ``` Snippet with ስ 芒 unicode ``` --- ## subdir/file2.py Lines 1-1 ``` Snippet with ስ 芒 unicode ``` --- diff_cover-7.4.0/tests/fixtures/markdown_report_two_snippets.md000066400000000000000000000007251436411411700252260ustar00rootroot00000000000000# Diff Coverage ## Diff: main - file1.py (66.7%): Missing lines 10-11 - subdir/file2.py (66.7%): Missing lines 10-11 ## Summary - **Total**: 12 lines - **Missing**: 4 lines - **Coverage**: 66% ## file1.py Lines 1-1 ``` Snippet with ስ 芒 unicode ``` --- Lines 1-1 ``` Snippet with ስ 芒 unicode ``` --- ## subdir/file2.py Lines 1-1 ``` Snippet with ስ 芒 unicode ``` --- Lines 1-1 ``` Snippet with ስ 芒 unicode ``` --- diff_cover-7.4.0/tests/fixtures/moved_console_report.txt000066400000000000000000000003341436411411700236350ustar00rootroot00000000000000------------- Diff Coverage Diff: origin/main...HEAD, staged and unstaged changes ------------- test_src.txt (50.0%): Missing lines 2,4,6,8,10 ------------- Total: 10 lines Missing: 5 lines Coverage: 50% ------------- diff_cover-7.4.0/tests/fixtures/moved_coverage.xml000066400000000000000000000015271436411411700223610ustar00rootroot00000000000000 diff_cover-7.4.0/tests/fixtures/moved_html_report.html000066400000000000000000000121621436411411700232660ustar00rootroot00000000000000 Diff Coverage

Diff Coverage

Diff: origin/main...HEAD, staged and unstaged changes

  • Total: 10 lines
  • Missing: 5 lines
  • Coverage: 50%
Source File Diff Coverage (%) Missing Lines
test_src.txt 50.0% 2,4,6,8,10
test_src.txt
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
test 1
test 2
test 3
test 4
test 5
test 6
test 7
test 8
test 9
test 10
diff_cover-7.4.0/tests/fixtures/mult_inputs_console_report.txt000066400000000000000000000003321436411411700251040ustar00rootroot00000000000000------------- Diff Coverage Diff: origin/main...HEAD, staged and unstaged changes ------------- test_src.txt (60.0%): Missing lines 4,6,8,10 ------------- Total: 10 lines Missing: 4 lines Coverage: 60% ------------- diff_cover-7.4.0/tests/fixtures/mult_inputs_html_report.html000066400000000000000000000121271436411411700245400ustar00rootroot00000000000000 Diff Coverage

Diff Coverage

Diff: origin/main...HEAD, staged and unstaged changes

  • Total: 10 lines
  • Missing: 4 lines
  • Coverage: 60%
Source File Diff Coverage (%) Missing Lines
test_src.txt 60.0% 4,6,8,10
test_src.txt
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
test 1
test 2
test 3
test 4
test 5
test 6
test 7
test 8
test 9
test 10
diff_cover-7.4.0/tests/fixtures/pycodestyle_report.txt000066400000000000000000000004061436411411700233450ustar00rootroot00000000000000violations_test_file.py:2:13: E225 missing whitespace around operator violations_test_file.py:3:23: W291 trailing whitespace violations_test_file.py:6:1: E302 expected 2 blank lines, found 0 violations_test_file.py:11:11: E225 missing whitespace around operator diff_cover-7.4.0/tests/fixtures/pycodestyle_violations_report.html000066400000000000000000000157371436411411700257560ustar00rootroot00000000000000 Diff Quality

Diff Quality

Quality Report: pycodestyle

Diff: origin/main...HEAD, staged and unstaged changes

Source File Diff Quality (%) Lines in violation
violations_test_file.py 66.7%
  • 2: E225 missing whitespace around operator
  • 6: E302 expected 2 blank lines, found 0
  • 11: E225 missing whitespace around operator
  • Total: 9 lines
  • Violation: 3 lines
  • % Quality: 66%
violations_test_file.py
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
def func_1(apple, my_list):
    if apple<10:
        # Do something 
        my_list.append(apple)
    return my_list[1:]
def func_2(spongebob, squarepants):
    """A less messy function"""
    for char in spongebob:
        if char in squarepants:
            return char
    unused=1
    return None
diff_cover-7.4.0/tests/fixtures/pycodestyle_violations_report.txt000066400000000000000000000006631436411411700256210ustar00rootroot00000000000000------------- Diff Quality Quality Report: pycodestyle Diff: origin/main...HEAD, staged and unstaged changes ------------- violations_test_file.py (66.7%): violations_test_file.py:2: E225 missing whitespace around operator violations_test_file.py:6: E302 expected 2 blank lines, found 0 violations_test_file.py:11: E225 missing whitespace around operator ------------- Total: 9 lines Violations: 3 lines % Quality: 66% ------------- diff_cover-7.4.0/tests/fixtures/pycodestyle_violations_report_external_css.html000066400000000000000000000105221436411411700305130ustar00rootroot00000000000000 Diff Quality

Diff Quality

Quality Report: pycodestyle

Diff: origin/main...HEAD, staged and unstaged changes

Source File Diff Quality (%) Lines in violation
violations_test_file.py 66.7%
  • 2: E225 missing whitespace around operator
  • 6: E302 expected 2 blank lines, found 0
  • 11: E225 missing whitespace around operator
  • Total: 9 lines
  • Violation: 3 lines
  • % Quality: 66%
violations_test_file.py
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
def func_1(apple, my_list):
    if apple<10:
        # Do something 
        my_list.append(apple)
    return my_list[1:]
def func_2(spongebob, squarepants):
    """A less messy function"""
    for char in spongebob:
        if char in squarepants:
            return char
    unused=1
    return None
diff_cover-7.4.0/tests/fixtures/pyflakes_two_files.txt000066400000000000000000000004651436411411700233040ustar00rootroot00000000000000------------- Diff Quality Quality Report: pyflakes Diff: origin/main...HEAD, staged and unstaged changes ------------- hello.py (0.0%): hello.py:2: undefined name 'unknown_var' hi.py (0.0%): hi.py:2: undefined name 'unknown_var' ------------- Total: 2 lines Violations: 2 lines % Quality: 0% ------------- diff_cover-7.4.0/tests/fixtures/pyflakes_violations_report.html000066400000000000000000000132621436411411700252170ustar00rootroot00000000000000 Diff Quality

Diff Quality

Quality Report: pyflakes

Diff: origin/main...HEAD, staged and unstaged changes

Source File Diff Quality (%) Lines in violation
violations_test_file.py 88.9%
  • 11: local variable 'unused' is assigned to but never used
  • Total: 9 lines
  • Violation: 1 line
  • % Quality: 88%
violations_test_file.py
 7
 8
 9
10
11
12
    """A less messy function"""
    for char in spongebob:
        if char in squarepants:
            return char
    unused=1
    return None
diff_cover-7.4.0/tests/fixtures/pyflakes_violations_report.txt000066400000000000000000000004721436411411700250710ustar00rootroot00000000000000------------- Diff Quality Quality Report: pyflakes Diff: origin/main...HEAD, staged and unstaged changes ------------- violations_test_file.py (88.9%): violations_test_file.py:11: local variable 'unused' is assigned to but never used ------------- Total: 9 lines Violations: 1 line % Quality: 88% ------------- diff_cover-7.4.0/tests/fixtures/pylint_dupe.txt000066400000000000000000000061441436411411700217470ustar00rootroot00000000000000************* Module filetwo filetwo.py:1: [R0801(duplicate-code), ] Similar lines in 2 files ==fileone:3 ==filetwo:3 def selection_sort(to_sort): """ The greatest sorting algorithm? """ new_list = [] final_size = len(to_sort) while len(new_list) < final_size: candidate_index = 0 for index in xrange(len(to_sort)): if to_sort[index] <= to_sort[candidate_index]: candidate_index = index new_list.append(to_sort.pop(candidate_index)) return new_list Report ====== 21 statements analysed. Statistics by type ------------------ +---------+-------+-----------+-----------+------------+---------+ |type |number |old number |difference |%documented |%badname | +=========+=======+===========+===========+============+=========+ |module |2 |2 |= |100.00 |0.00 | +---------+-------+-----------+-----------+------------+---------+ |class |0 |0 |= |0 |0 | +---------+-------+-----------+-----------+------------+---------+ |method |0 |0 |= |0 |0 | +---------+-------+-----------+-----------+------------+---------+ |function |2 |2 |= |100.00 |0.00 | +---------+-------+-----------+-----------+------------+---------+ Raw metrics ----------- +----------+-------+------+---------+-----------+ |type |number |% |previous |difference | +==========+=======+======+=========+===========+ |code |20 |52.63 |20 |= | +----------+-------+------+---------+-----------+ |docstring |12 |31.58 |12 |= | +----------+-------+------+---------+-----------+ |comment |0 |0.00 |0 |= | +----------+-------+------+---------+-----------+ |empty |6 |15.79 |6 |= | +----------+-------+------+---------+-----------+ Duplication ----------- +-------------------------+-------+---------+-----------+ | |now |previous |difference | +=========================+=======+=========+===========+ |nb duplicated lines |13 |13 |= | +-------------------------+-------+---------+-----------+ |percent duplicated lines |40.625 |40.625 |= | +-------------------------+-------+---------+-----------+ Messages by category -------------------- +-----------+-------+---------+-----------+ |type |number |previous |difference | +===========+=======+=========+===========+ |convention |0 |0 |= | +-----------+-------+---------+-----------+ |refactor |1 |1 |= | +-----------+-------+---------+-----------+ |warning |0 |0 |= | +-----------+-------+---------+-----------+ |error |0 |0 |= | +-----------+-------+---------+-----------+ Messages -------- +---------------+------------+ |message id |occurrences | +===============+============+ |duplicate-code |1 | +---------------+------------+ Global evaluation ----------------- Your code has been rated at 9.52/10 (previous run: 9.52/10, +0.00) diff_cover-7.4.0/tests/fixtures/pylint_dupe_violations_report.txt000066400000000000000000000004341436411411700256050ustar00rootroot00000000000000------------- Diff Quality Quality Report: pylint Diff: origin/main...HEAD, staged and unstaged changes ------------- fileone.py (93.8%): fileone.py:3: R0801: (duplicate-code), : Similar lines in 2 files ------------- Total: 16 lines Violations: 1 line % Quality: 93% ------------- diff_cover-7.4.0/tests/fixtures/pylint_report.txt000066400000000000000000000055041436411411700223240ustar00rootroot00000000000000violations_test_file.py:1: [C0111] Missing docstring violations_test_file.py:1: [C0111, func_1] Missing docstring violations_test_file.py:2: [C0322, func_1] Operator not preceded by a space if apple<10: ^ Report ====== 10 statements analysed. Messages by category -------------------- +-----------+-------+---------+-----------+ |type |number |previous |difference | +===========+=======+=========+===========+ |convention |3 |3 |= | +-----------+-------+---------+-----------+ |refactor |0 |0 |= | +-----------+-------+---------+-----------+ |warning |0 |0 |= | +-----------+-------+---------+-----------+ |error |0 |0 |= | +-----------+-------+---------+-----------+ Messages -------- +-----------+------------+ |message id |occurrences | +===========+============+ |C0111 |2 | +-----------+------------+ |C0322 |1 | +-----------+------------+ Global evaluation ----------------- Your code has been rated at 7.00/10 (previous run: 7.00/10) Raw metrics ----------- +----------+-------+------+---------+-----------+ |type |number |% |previous |difference | +==========+=======+======+=========+===========+ |code |9 |75.00 |9 |= | +----------+-------+------+---------+-----------+ |docstring |1 |8.33 |1 |= | +----------+-------+------+---------+-----------+ |comment |0 |0.00 |0 |= | +----------+-------+------+---------+-----------+ |empty |2 |16.67 |2 |= | +----------+-------+------+---------+-----------+ Statistics by type ------------------ +---------+-------+-----------+-----------+------------+---------+ |type |number |old number |difference |%documented |%badname | +=========+=======+===========+===========+============+=========+ |module |1 |1 |= |0.00 |0.00 | +---------+-------+-----------+-----------+------------+---------+ |class |0 |0 |= |0 |0 | +---------+-------+-----------+-----------+------------+---------+ |method |0 |0 |= |0 |0 | +---------+-------+-----------+-----------+------------+---------+ |function |2 |2 |= |50.00 |0.00 | +---------+-------+-----------+-----------+------------+---------+ Duplication ----------- +-------------------------+------+---------+-----------+ | |now |previous |difference | +=========================+======+=========+===========+ |nb duplicated lines |0 |0 |= | +-------------------------+------+---------+-----------+ |percent duplicated lines |0.000 |0.000 |= | +-------------------------+------+---------+-----------+ diff_cover-7.4.0/tests/fixtures/pylint_violations_console_report.txt000066400000000000000000000010041436411411700263040ustar00rootroot00000000000000------------- Diff Quality Quality Report: pylint Diff: origin/main...HEAD, staged and unstaged changes ------------- violations_test_file.py (77.8%): violations_test_file.py:1: C0114: (missing-module-docstring), : Missing module docstring violations_test_file.py:1: C0116: (missing-function-docstring), func_1: Missing function or method docstring violations_test_file.py:11: W0612: (unused-variable), func_2: Unused variable 'unused' ------------- Total: 9 lines Violations: 2 lines % Quality: 77% ------------- diff_cover-7.4.0/tests/fixtures/pylint_violations_report.html000066400000000000000000000166371436411411700247310ustar00rootroot00000000000000 Diff Quality

Diff Quality

Quality Report: pylint

Diff: origin/main...HEAD, staged and unstaged changes

Source File Diff Quality (%) Lines in violation
violations_test_file.py 66.7%
  • 1: C0111: (missing-docstring), : Missing module docstring
  • 1: C0111: (missing-docstring), func_1: Missing function docstring
  • 2: C0326: (bad-whitespace), : Exactly one space required around comparison
  • 11: C0326: (bad-whitespace), : Exactly one space required around assignment
  • 11: W0612: (unused-variable), func_2: Unused variable 'unused'
  • Total: 9 lines
  • Violation: 3 lines
  • % Quality: 66%
violations_test_file.py
1
2
3
4
5
6
def func_1(apple, my_list):
    if apple<10:
        # Do something 
        my_list.append(apple)
    return my_list[1:]
def func_2(spongebob, squarepants):
 7
 8
 9
10
11
12
    """A less messy function"""
    for char in spongebob:
        if char in squarepants:
            return char
    unused=1
    return None
diff_cover-7.4.0/tests/fixtures/pylint_violations_report.txt000066400000000000000000000006421436411411700245710ustar00rootroot00000000000000------------- Diff Quality Quality Report: pylint Diff: origin/main...HEAD, staged and unstaged changes ------------- violations_test_file.py (77.8%): violations_test_file.py:1: C0111: Missing docstring violations_test_file.py:1: C0111: func_1: Missing docstring violations_test_file.py:2: C0322: func_1: Operator not preceded by a space ------------- Total: 9 lines Violations: 2 lines % Quality: 77% ------------- diff_cover-7.4.0/tests/fixtures/pylintrc000066400000000000000000000000001436411411700204220ustar00rootroot00000000000000diff_cover-7.4.0/tests/fixtures/show_uncovered_lines_console.txt000066400000000000000000000005731436411411700253610ustar00rootroot00000000000000------------- Diff Coverage Diff: origin/main...HEAD, staged and unstaged changes ------------- test_src.txt (50.0%): Missing lines 2,4,6,8,10 ------------- Total: 10 lines Missing: 5 lines Coverage: 50% ------------- --- test_src.txt --- 0001: test 1 0002: test 2 0003: test 3 0004: test 4 0005: test 5 0006: test 6 0007: test 7 0008: test 8 0009: test 9 0010: test 10 0011:diff_cover-7.4.0/tests/fixtures/snippet.css000066400000000000000000000005431436411411700210430ustar00rootroot00000000000000.hll { background-color: #ffcccc } .c { color: #3D7B7B; font-style: italic } /* Comment */ .k { color: #008000; font-weight: bold } /* Keyword */ .o { color: #666666 } /* Operator */ .m { color: #666666 } /* Literal.Number */ .s { color: #BA2121 } /* Literal.String */ .na { color: #687822 } /* Name.Attribute */ .nb { color: #008000 } /* Name.Builtin */ diff_cover-7.4.0/tests/fixtures/snippet_8859.py000066400000000000000000000001521436411411700213740ustar00rootroot00000000000000# -*- coding: ISO-8859-6 -*- Line 2 Line 3 Line 4 Line 5 Line 6 Line 7 Line 8 Line 9 Line 10 diff_cover-7.4.0/tests/fixtures/snippet_arabic_output.html000066400000000000000000000026021436411411700241360ustar00rootroot00000000000000
 3
 4
 5
 6
 7
 8
 9
10
Line 3
Line 4
Line 5
Line 6
Line 7  ئئئئئ ئئئئئ
Line 8
Line 9
Line 10
diff_cover-7.4.0/tests/fixtures/snippet_default.html000066400000000000000000000015401436411411700227210ustar00rootroot00000000000000
4
5
6
7
# Test source
def test_func(arg):
    print arg
    return arg + 5
diff_cover-7.4.0/tests/fixtures/snippet_invalid_violations.html000066400000000000000000000014561436411411700252000ustar00rootroot00000000000000
1
2
3
4
# Test source
def test_func(arg):
    print arg
    return arg + 5
diff_cover-7.4.0/tests/fixtures/snippet_list.html000066400000000000000000000106551436411411700222570ustar00rootroot00000000000000
 6
 7
 8
 9
10
11
12
13
14
15
16
17
Line 6
Line 7
Line 8
Line 9
Line 10
Line 11
Line 12
Line 13
Line 14
Line 15
Line 16
Line 17
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
Line 46
Line 47
Line 48
Line 49
Line 50
Line 51
Line 52
Line 53
Line 54
Line 55
Line 56
Line 57
Line 58
Line 59
Line 60
Line 61
diff_cover-7.4.0/tests/fixtures/snippet_list.md000066400000000000000000000004231436411411700217030ustar00rootroot00000000000000Lines 6-17 ```python Line 6 Line 7 Line 8 Line 9 Line 10 Line 11 Line 12 Line 13 Line 14 Line 15 Line 16 Line 17 ``` Lines 46-61 ```python Line 46 Line 47 Line 48 Line 49 Line 50 Line 51 Line 52 Line 53 Line 54 Line 55 Line 56 Line 57 Line 58 Line 59 Line 60 Line 61 ``` diff_cover-7.4.0/tests/fixtures/snippet_list2.md000066400000000000000000000001511436411411700217630ustar00rootroot00000000000000Lines 1-7 ```cpp #include int main() { std::cout << "Hello World!"; return 0; } ```diff_cover-7.4.0/tests/fixtures/snippet_list3.md000066400000000000000000000001761436411411700217730ustar00rootroot00000000000000Lines 8-16 ```cpp // this is line 8 printf("Test2"); // this is line 11 printf("Test"); } int main() { ``` diff_cover-7.4.0/tests/fixtures/snippet_no_filename_ext.html000066400000000000000000000015101436411411700244260ustar00rootroot00000000000000
4
5
6
7
# Test source
def test_func(arg):
    print arg
    return arg + 5
diff_cover-7.4.0/tests/fixtures/snippet_src.py000066400000000000000000000014271436411411700215540ustar00rootroot00000000000000Line 1 Line 2 Line 3 Line 4 Line 5 Line 6 Line 7 Line 8 Line 9 Line 10 Line 11 Line 12 Line 13 Line 14 Line 15 Line 16 Line 17 Line 18 Line 19 Line 20 Line 21 Line 22 Line 23 Line 24 Line 25 Line 26 Line 27 Line 28 Line 29 Line 30 Line 31 Line 32 Line 33 Line 34 Line 35 Line 36 Line 37 Line 38 Line 39 Line 40 Line 41 Line 42 Line 43 Line 44 Line 45 Line 46 Line 47 Line 48 Line 49 Line 50 Line 51 Line 52 Line 53 Line 54 Line 55 Line 56 Line 57 Line 58 Line 59 Line 60 Line 61 Line 62 Line 63 Line 64 Line 65 Line 66 Line 67 Line 68 Line 69 Line 70 Line 71 Line 72 Line 73 Line 74 Line 75 Line 76 Line 77 Line 78 Line 79 Line 80 Line 81 Line 82 Line 83 Line 84 Line 85 Line 86 Line 87 Line 88 Line 89 Line 90 Line 91 Line 92 Line 93 Line 94 Line 95 Line 96 Line 97 Line 98 Line 99 Line 100diff_cover-7.4.0/tests/fixtures/snippet_src2.cpp000066400000000000000000000001231436411411700217600ustar00rootroot00000000000000#include int main() { std::cout << "Hello World!"; return 0; } diff_cover-7.4.0/tests/fixtures/snippet_src3.cpp000066400000000000000000000004101436411411700217600ustar00rootroot00000000000000#include void UselessFunction() { // this is line 5 printf("Test"); // this is line 8 printf("Test2"); // this is line 11 printf("Test"); } int main() { std::cout << "Hello World!"; UselessFunction(); return 0; } diff_cover-7.4.0/tests/fixtures/snippet_unicode.html000066400000000000000000000004271436411411700227260ustar00rootroot00000000000000
1
var = ģ 塲 ㎉
diff_cover-7.4.0/tests/fixtures/snippet_unicode.py000066400000000000000000000015701436411411700224120ustar00rootroot00000000000000Line 1 Line 2 Line 3 Line 4 Line 5 Line 6 Line 7 Line 8 Line 9 Line 10 Line 11 Line 12 Line 13 Line 14 Line 15 Line 16 Line 17 Line 18 Line 19 Line 20 Line 21 Line 22 Line 23 Line 24 Line 25 Line 26 Line 27 Line 28 Line 29 Line 30 Line 31 Line 32 Line 33 Line 34 Line 35 Line 36 Line 37 Line 38 Line 39 Line 40 Line 41 Line 42 Line 43 Line 44 Line 45 Line 46 Line 47 Line 48 Line 49 Line 50 Line 51 Line 52 Line 53 Line 54 Line 55 Line 56 Line 57 Line 58 Line 59 Line 60 Line 61 Line 62 Line 63 Line 64 Line 65 Line 66 Line 67 Line 68 Line 69 Line 70 Line 71 Line 72 Line 73 Line 74 ❤ ☀ ☆ ☂ ☻ ♞ ☯ ☭ ☢ € → ☎ ❄ ♫ ✂ ▷ ✇ ♎ ⇧ ☮ ♻ ⌘ ⌛ ☘ Line 75 Line 76 Line 77 Line 78 Line 79 Line 80 Line 81 Line 82 Line 83 Line 84 Line 85 Line 86 Line 87 Line 88 Line 89 Line 90 Line 91 Line 92 Line 93 Line 94 Line 95 Line 96 Line 97 Line 98 Line 99 Line 100diff_cover-7.4.0/tests/fixtures/snippet_unicode_html_output.html000066400000000000000000000112151436411411700253670ustar00rootroot00000000000000
 6
 7
 8
 9
10
11
12
13
14
15
16
17
Line 6
Line 7
Line 8
Line 9
Line 10
Line 11
Line 12
Line 13
Line 14
Line 15
Line 16
Line 17
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
Line 46
Line 47
Line 48
Line 49
Line 50
Line 51
Line 52
Line 53
Line 54
Line 55
Line 56
Line 57
Line 58
Line 59
Line 60
Line 61
diff_cover-7.4.0/tests/fixtures/subdir_coverage_console_report.txt000066400000000000000000000003401436411411700256630ustar00rootroot00000000000000------------- Diff Coverage Diff: origin/main...HEAD, staged and unstaged changes ------------- sub/test_src.txt (50.0%): Missing lines 2,4,6,8,10 ------------- Total: 10 lines Missing: 5 lines Coverage: 50% ------------- diff_cover-7.4.0/tests/fixtures/subdir_coverage_html_report.html000066400000000000000000000122421436411411700253160ustar00rootroot00000000000000 Diff Coverage

Diff Coverage

Diff: origin/main...HEAD, staged and unstaged changes

  • Total: 10 lines
  • Missing: 5 lines
  • Coverage: 50%
Source File Diff Coverage (%) Missing Lines
sub/test_src.txt 50.0% 2,4,6,8,10
sub/test_src.txt
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
test 1
test 2
test 3
test 4
test 5
test 6
test 7
test 8
test 9
test 10
diff_cover-7.4.0/tests/fixtures/test_src.txt000066400000000000000000000001071436411411700212320ustar00rootroot00000000000000test 1 test 2 test 3 test 4 test 5 test 6 test 7 test 8 test 9 test 10 diff_cover-7.4.0/tests/fixtures/unicode_console_report.txt000066400000000000000000000003441436411411700241520ustar00rootroot00000000000000------------- Diff Coverage Diff: origin/main...HEAD, staged and unstaged changes ------------- unicode_test_src.txt (50.0%): Missing lines 2,4,6,8,10 ------------- Total: 10 lines Missing: 5 lines Coverage: 50% ------------- diff_cover-7.4.0/tests/fixtures/unicode_coverage.xml000066400000000000000000000015471436411411700226770ustar00rootroot00000000000000 diff_cover-7.4.0/tests/fixtures/unicode_html_report.html000066400000000000000000000124301436411411700236000ustar00rootroot00000000000000 Diff Coverage

Diff Coverage

Diff: origin/main...HEAD, staged and unstaged changes

  • Total: 10 lines
  • Missing: 5 lines
  • Coverage: 50%
Source File Diff Coverage (%) Missing Lines
unicode_test_src.txt 50.0% 2,4,6,8,10
unicode_test_src.txt
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
ẗëṡẗ 1
ẗëṡẗ 2
ẗëṡẗ 3
ẗëṡẗ 4
ẗëṡẗ 5
ẗëṡẗ 6
ẗëṡẗ 7
ẗëṡẗ 8
ẗëṡẗ 9
ẗëṡẗ 10
diff_cover-7.4.0/tests/fixtures/unicode_test_src.txt000066400000000000000000000002151436411411700227400ustar00rootroot00000000000000ẗëṡẗ 1 ẗëṡẗ 2 ẗëṡẗ 3 ẗëṡẗ 4 ẗëṡẗ 5 ẗëṡẗ 6 ẗëṡẗ 7 ẗëṡẗ 8 ẗëṡẗ 9 ẗëṡẗ 10 diff_cover-7.4.0/tests/fixtures/violations_test_file.py000066400000000000000000000004561436411411700234510ustar00rootroot00000000000000def func_1(apple, my_list): if apple<10: # Do something my_list.append(apple) return my_list[1:] def func_2(spongebob, squarepants): """A less messy function""" for char in spongebob: if char in squarepants: return char unused=1 return None diff_cover-7.4.0/tests/helpers.py000066400000000000000000000132621436411411700170140ustar00rootroot00000000000000""" Test helper functions. """ import os.path import random HUNK_BUFFER = 2 MAX_LINE_LENGTH = 300 LINE_STRINGS = ["test", "+ has a plus sign", "- has a minus sign"] def fixture_path(rel_path): """ Returns the absolute path to a fixture file given `rel_path` relative to the fixture directory. """ fixture_dir = os.path.join(os.path.dirname(__file__), "fixtures") return os.path.join(fixture_dir, rel_path) def load_fixture(rel_path, encoding=None): """ Return the contents of the file at `rel_path` (relative path to the "fixtures" directory). If `encoding` is not None, attempts to decode the contents as `encoding` (e.g. 'utf-8'). """ with open(fixture_path(rel_path), encoding=encoding or "utf-8") as fixture_file: contents = fixture_file.read() if encoding is not None and isinstance(contents, bytes): contents = contents.decode(encoding) return contents def line_numbers(start, end): """ Return a list of line numbers, in [start, end] (inclusive). """ return list(range(start, end + 1)) def git_diff_output(diff_dict, deleted_files=None): """ Construct fake output from `git diff` using the description defined by `diff_dict`, which is a dictionary of the form: { SRC_FILE_NAME: MODIFIED_LINES, ... } where `SRC_FILE_NAME` is the name of a source file in the diff, and `MODIFIED_LINES` is a list of lines added or changed in the source file. `deleted_files` is a list of files that have been deleted The content of the source files are randomly generated. Returns a byte string. """ output = [] # Entries for deleted files output.extend(_deleted_file_entries(deleted_files)) # Entries for source files for (src_file, modified_lines) in diff_dict.items(): output.extend(_source_file_entry(src_file, modified_lines)) return "\n".join(output) def _deleted_file_entries(deleted_files): """ Create fake `git diff` output for files that have been deleted in this changeset. `deleted_files` is a list of files deleted in the changeset. Returns a list of lines in the diff output. """ output = [] if deleted_files is not None: for src_file in deleted_files: # File information output.append(f"diff --git a/{src_file} b/{src_file}") output.append("index 629e8ad..91b8c0a 100644") output.append(f"--- a/{src_file}") output.append("+++ b/dev/null") # Choose a random number of lines num_lines = random.randint(1, 30) # Hunk information output.append(f"@@ -0,{num_lines} +0,0 @@") output.extend(["-" + _random_string() for _ in range(num_lines)]) return output def _source_file_entry(src_file, modified_lines): """ Create fake `git diff` output for added/modified lines. `src_file` is the source file with the changes; `modified_lines` is the list of modified line numbers. Returns a list of lines in the diff output. """ output = [] # Line for the file names output.append(f"diff --git a/{src_file} b/{src_file}") # Index line output.append("index 629e8ad..91b8c0a 100644") # Additions/deletions output.append(f"--- a/{src_file}") output.append(f"+++ b/{src_file}") # Hunk information for (start, end) in _hunks(modified_lines): output.extend(_hunk_entry(start, end, modified_lines)) return output def _hunk_entry(start, end, modified_lines): """ Generates fake `git diff` output for a hunk, where `start` and `end` are the start/end lines of the hunk and `modified_lines` is a list of modified lines in the hunk. Just as `git diff` does, this will include a few lines before/after the changed lines in each hunk. """ output = [] # The actual hunk usually has a few lines before/after start -= HUNK_BUFFER end += HUNK_BUFFER start = max(start, 0) # Hunk definition line # Real `git diff` output would have different line numbers # for before/after the change, but since we're only interested # in after the change, we use the same numbers for both. length = end - start output.append("@@ -{0},{1} +{0},{1} @@".format(start, length)) # Output line modifications for line_number in range(start, end + 1): # This is a changed line, so prepend a + sign if line_number in modified_lines: # Delete the old line output.append("-" + _random_string()) # Include the changed line output.append("+" + _random_string()) # This is a line we didn't modify, so no + or - signs # but prepend with a space. else: output.append(" " + _random_string()) return output def _hunks(modified_lines): """ Given a list of line numbers, return a list of hunks represented as `(start, end)` tuples. """ # Identify contiguous lines as hunks hunks = [] last_line = None for line in sorted(modified_lines): # If this is contiguous with the last line, continue the hunk # We're guaranteed at this point to have at least one hunk if (line - 1) == last_line: start, _ = hunks[-1] hunks[-1] = (start, line) # If non-contiguous, start a new hunk with just the current line else: hunks.append((line, line)) # Store the last line last_line = line return hunks def _random_string(): """ Return a random byte string with length in the range [0, `MAX_LINE_LENGTH`] (inclusive). """ return random.choice(LINE_STRINGS) diff_cover-7.4.0/tests/snippet_list_unicode.html000066400000000000000000000057571436411411700221230ustar00rootroot00000000000000
 6
 7
 8
 9
10
11
12
13
14
15
16
17
Line 6
Line 7
Line 8
Line 9
Line 10
Line 11
Line 12
Line 13
Line 14
Line 15
Line 16
Line 17
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
Line 46
Line 47
Line 48
Line 49
Line 50
Line 51
Line 52
Line 53
Line 54
Line 55
Line 56
Line 57
Line 58
Line 59
Line 60
Line 61
diff_cover-7.4.0/tests/test_clover_violations_reporter.py000066400000000000000000000012621436411411700240710ustar00rootroot00000000000000# pylint: disable=missing-function-docstring,protected-access """Test for diff_cover.violationsreporters - clover""" import xml.etree.ElementTree as etree from diff_cover.git_path import GitPathTool from diff_cover.violationsreporters.violations_reporter import XmlCoverageReporter # https://github.com/Bachmann1234/diff_cover/issues/190 def test_get_src_path_clover(datadir): GitPathTool._cwd = "/" GitPathTool._root = "/" clover_report = etree.parse(str(datadir / "test.xml")) result = XmlCoverageReporter.get_src_path_line_nodes_clover( clover_report, "isLucky.js" ) assert sorted([int(line.attrib["num"]) for line in result]) == [2, 3, 5, 6, 8, 12] diff_cover-7.4.0/tests/test_clover_violations_reporter/000077500000000000000000000000001436411411700235165ustar00rootroot00000000000000diff_cover-7.4.0/tests/test_clover_violations_reporter/test.xml000066400000000000000000000030471436411411700252230ustar00rootroot00000000000000 diff_cover-7.4.0/tests/test_config_parser.py000066400000000000000000000057721436411411700212410ustar00rootroot00000000000000import pytest from diff_cover import config_parser from diff_cover.config_parser import ParserError, TOMLParser, Tool, get_config tools = pytest.mark.parametrize("tool", list(Tool)) class TestTOMLParser: @tools def test_parse_no_toml_file(self, tool): parser = TOMLParser("myfile", tool) assert parser.parse() is None @tools def test_parse_but_no_tomli_installed(self, tool, mocker): mocker.patch.object(config_parser, "_HAS_TOML", False) parser = TOMLParser("myfile.toml", tool) with pytest.raises(ParserError): parser.parse() @pytest.mark.parametrize( "tool,content", [ (Tool.DIFF_COVER, ""), (Tool.DIFF_COVER, "[tool.diff_quality]"), (Tool.DIFF_QUALITY, ""), (Tool.DIFF_COVER, "[tool.diff_cover]"), ], ) def test_parse_but_no_data(self, tool, content, tmp_path): toml_file = tmp_path / "foo.toml" toml_file.write_text(content) parser = TOMLParser(str(toml_file), tool) with pytest.raises(ParserError): parser.parse() @pytest.mark.parametrize( "tool,content,expected", [ (Tool.DIFF_COVER, "[tool.diff_cover]\nquiet=true", {"quiet": True}), (Tool.DIFF_QUALITY, "[tool.diff_quality]\nquiet=true", {"quiet": True}), ], ) def test_parse(self, tool, content, tmp_path, expected): toml_file = tmp_path / "foo.toml" toml_file.write_text(content) parser = TOMLParser(str(toml_file), tool) assert parser.parse() == expected @tools def test_get_config_unrecognized_file(mocker, tool): parser = mocker.Mock() parser.parse_args().__dict__ = {"config_file": "foo.bar"} with pytest.raises(ParserError): get_config(parser, argv=[], defaults={}, tool=tool) @pytest.mark.parametrize( "tool,cli_config,defaults,file_content,expected", [ ( Tool.DIFF_COVER, {"a": 2, "b": None, "c": None}, {"a": 4, "b": 3}, None, {"a": 2, "b": 3, "c": None}, ), ( Tool.DIFF_QUALITY, {"a": 2, "b": None, "c": None}, {"a": 4, "b": 3}, None, {"a": 2, "b": 3, "c": None}, ), ( Tool.DIFF_COVER, {"a": 2, "b": None, "c": None, "d": None}, {"a": 4, "b": 3}, "[tool.diff_cover]\na=1\nd=6", {"a": 2, "b": 3, "c": None, "d": 6}, ), ], ) def test_get_config( mocker, tmp_path, tool, cli_config, defaults, file_content, expected ): if file_content: toml_file = tmp_path / "foo.toml" toml_file.write_text(file_content) cli_config["config_file"] = expected["config_file"] = str(toml_file) else: cli_config["config_file"] = expected["config_file"] = None parser = mocker.Mock() parser.parse_args().__dict__ = cli_config assert get_config(parser, argv=[], defaults=defaults, tool=tool) == expected diff_cover-7.4.0/tests/test_diff_cover_main.py000066400000000000000000000017721436411411700215260ustar00rootroot00000000000000# pylint: disable=missing-function-docstring """Test for diff_cover.diff_cover - main""" import pytest from diff_cover.diff_cover_tool import parse_coverage_args def test_parse_coverage_file(): argv = ["build/tests/coverage.xml", "--compare-branch=origin/other"] arg_dict = parse_coverage_args(argv) assert arg_dict["coverage_file"] == ["build/tests/coverage.xml"] assert arg_dict["compare_branch"] == "origin/other" assert arg_dict["diff_range_notation"] == "..." def test_parse_range_notation(capsys): argv = ["build/tests/coverage.xml", "--diff-range-notation=.."] arg_dict = parse_coverage_args(argv) assert arg_dict["coverage_file"] == ["build/tests/coverage.xml"] assert arg_dict["diff_range_notation"] == ".." with pytest.raises(SystemExit) as e: argv = ["build/tests/coverage.xml", "--diff-range-notation=FOO"] parse_coverage_args(argv) assert e.value.code == 2 _, err = capsys.readouterr() assert "invalid choice: 'FOO'" in err diff_cover-7.4.0/tests/test_diff_cover_tool.py000066400000000000000000000044701436411411700215550ustar00rootroot00000000000000"""Test for diff_cover/diff_cover_tool.""" import pytest from diff_cover.diff_cover_tool import parse_coverage_args def test_parse_with_html_report(): argv = ["reports/coverage.xml", "--html-report", "diff_cover.html"] arg_dict = parse_coverage_args(argv) assert arg_dict.get("coverage_file") == ["reports/coverage.xml"] assert arg_dict.get("html_report") == "diff_cover.html" assert arg_dict.get("markdown_report") is None assert arg_dict.get("json_report") is None assert not arg_dict.get("ignore_unstaged") def test_parse_with_no_report(): argv = ["reports/coverage.xml"] arg_dict = parse_coverage_args(argv) assert arg_dict.get("coverage_file") == ["reports/coverage.xml"] assert arg_dict.get("html_report") is None assert arg_dict.get("markdown_report") is None assert arg_dict.get("json_report") is None assert not arg_dict.get("ignore_unstaged") def test_parse_with_multiple_reports(): argv = [ "reports/coverage.xml", "--html-report", "report.html", "--markdown-report", "report.md", ] arg_dict = parse_coverage_args(argv) assert arg_dict.get("coverage_file") == ["reports/coverage.xml"] assert arg_dict.get("html_report") == "report.html" assert arg_dict.get("markdown_report") == "report.md" assert arg_dict.get("json_report") is None assert not arg_dict.get("ignore_unstaged") def test_parse_with_ignored_unstaged(): argv = ["reports/coverage.xml", "--ignore-unstaged"] arg_dict = parse_coverage_args(argv) assert arg_dict.get("ignore_unstaged") def test_parse_invalid_arg(): # No coverage XML report specified invalid_argv = [[], ["--html-report", "diff_cover.html"]] for argv in invalid_argv: with pytest.raises(SystemExit): parse_coverage_args(argv) def test_parse_with_exclude(): argv = ["reports/coverage.xml"] arg_dict = parse_coverage_args(argv) assert arg_dict.get("exclude") is None argv = ["reports/coverage.xml", "--exclude", "noneed/*.py"] arg_dict = parse_coverage_args(argv) assert arg_dict.get("exclude") == ["noneed/*.py"] argv = ["reports/coverage.xml", "--exclude", "noneed/*.py", "other/**/*.py"] arg_dict = parse_coverage_args(argv) assert arg_dict.get("exclude") == ["noneed/*.py", "other/**/*.py"] diff_cover-7.4.0/tests/test_diff_quality_main.py000066400000000000000000000100031436411411700220630ustar00rootroot00000000000000# pylint: disable=missing-function-docstring """Test for diff_cover.diff_quality - main""" import pytest from diff_cover.diff_quality_tool import main, parse_quality_args def test_parse_with_html_report(): argv = ["--violations", "pycodestyle", "--html-report", "diff_cover.html"] arg_dict = parse_quality_args(argv) assert arg_dict.get("violations") == "pycodestyle" assert arg_dict.get("html_report") == "diff_cover.html" assert arg_dict.get("input_reports") == [] assert not arg_dict.get("ignore_unstaged") assert arg_dict.get("diff_range_notation") == "..." def test_parse_with_no_html_report(): argv = ["--violations", "pylint"] arg_dict = parse_quality_args(argv) assert arg_dict.get("violations") == "pylint" assert arg_dict.get("input_reports") == [] assert not arg_dict.get("ignore_unstaged") assert arg_dict.get("diff_range_notation") == "..." def test_parse_with_one_input_report(): argv = ["--violations", "pylint", "pylint_report.txt"] arg_dict = parse_quality_args(argv) assert arg_dict.get("input_reports") == ["pylint_report.txt"] def test_parse_with_multiple_input_reports(): argv = ["--violations", "pylint", "pylint_report_1.txt", "pylint_report_2.txt"] arg_dict = parse_quality_args(argv) assert arg_dict.get("input_reports") == [ "pylint_report_1.txt", "pylint_report_2.txt", ] def test_parse_with_options(): argv = [ "--violations", "pycodestyle", "--options=\"--exclude='*/migrations*'\"", ] arg_dict = parse_quality_args(argv) assert arg_dict.get("options") == "\"--exclude='*/migrations*'\"" def test_parse_with_ignored_unstaged(): argv = ["--violations", "pylint", "--ignore-unstaged"] arg_dict = parse_quality_args(argv) assert arg_dict.get("ignore_unstaged") def test_parse_invalid_arg(): # No code quality test provided invalid_argv = [[], ["--html-report", "diff_cover.html"]] for argv in invalid_argv: with pytest.raises(SystemExit): print(f"args = {argv}") parse_quality_args(argv) def _test_parse_with_path_patterns(name): argv = ["--violations", "pep8"] arg_dict = parse_quality_args(argv) assert arg_dict.get("include") is None argv = ["--violations", "pep8", f"--{name}", "noneed/*.py"] arg_dict = parse_quality_args(argv) assert arg_dict.get(name) == ["noneed/*.py"] argv = ["--violations", "pep8", f"--{name}", "noneed/*.py", "other/**/*.py"] arg_dict = parse_quality_args(argv) assert arg_dict.get(name) == ["noneed/*.py", "other/**/*.py"] def test_parse_with_exclude(): _test_parse_with_path_patterns("exclude") def test_parse_with_include(): _test_parse_with_path_patterns("include") def test_parse_diff_range_notation(): argv = ["--violations", "pep8", "--diff-range-notation=.."] arg_dict = parse_quality_args(argv) assert arg_dict.get("violations") == "pep8" assert arg_dict.get("html_report") is None assert arg_dict.get("input_reports") == [] assert not arg_dict.get("ignore_unstaged") assert arg_dict.get("diff_range_notation") == ".." @pytest.fixture(autouse=True) def patch_git_patch(mocker): mocker.patch("diff_cover.diff_quality_tool.GitPathTool") @pytest.fixture def report_mock(mocker): return mocker.patch( "diff_cover.diff_quality_tool.generate_quality_report", return_value=100 ) def test_parse_options(report_mock): _run_main( report_mock, [ "diff-quality", "--violations", "pylint", '--options="--foobar"', ], ) def test_parse_options_without_quotes(report_mock): _run_main( report_mock, [ "diff-quality", "--violations", "pylint", "--options=--foobar", ], ) def _run_main(report, argv): main(argv) quality_reporter = report.call_args[0][0] assert quality_reporter.driver.name == "pylint" assert quality_reporter.options == "--foobar" diff_cover-7.4.0/tests/test_diff_reporter.py000066400000000000000000000477521436411411700212560ustar00rootroot00000000000000# pylint: disable=missing-function-docstring,protected-access """Test for diff_cover.diff_reporter""" import os import tempfile from pathlib import Path from textwrap import dedent from unittest.mock import patch import pytest from diff_cover.diff_reporter import GitDiffReporter from diff_cover.git_diff import GitDiffError, GitDiffTool from tests.helpers import git_diff_output, line_numbers @pytest.fixture def git_diff(mocker): m = mocker.MagicMock(GitDiffTool) m.range_notation = "..." return m @pytest.fixture def diff(git_diff): return GitDiffReporter(git_diff=git_diff) def test_name(diff): # Expect that diff report is named after its compare branch assert diff.name() == "origin/main...HEAD, staged and unstaged changes" def test_name_compare_branch(git_diff): # Override the default branch assert ( GitDiffReporter(git_diff=git_diff, compare_branch="release").name() == "release...HEAD, staged and unstaged changes" ) def test_name_ignore_staged(git_diff): # Override the default branch assert ( GitDiffReporter(git_diff=git_diff, ignore_staged=True).name() == "origin/main...HEAD and unstaged changes" ) def test_name_ignore_unstaged(git_diff): # Override the default branch assert ( GitDiffReporter(git_diff=git_diff, ignore_unstaged=True).name() == "origin/main...HEAD and staged changes" ) def test_name_ignore_staged_and_unstaged(git_diff): # Override the default branch assert ( GitDiffReporter( git_diff=git_diff, ignore_staged=True, ignore_unstaged=True ).name() == "origin/main...HEAD" ) def test_name_include_untracked(git_diff): # Override the default branch assert ( GitDiffReporter(git_diff=git_diff, include_untracked=True).name() == "origin/main...HEAD, staged, unstaged and untracked changes" ) @pytest.mark.parametrize( "include,exclude,expected", [ # no include/exclude --> use all paths ([], [], ["file3.py", "README.md", "subdir1/file1.py", "subdir2/file2.py"]), # specified exclude without include ( [], ["file1.py"], ["file3.py", "README.md", "subdir2/file2.py"], ), # specified include (folder) without exclude (["subdir1/**"], [], ["subdir1/file1.py"]), # specified include (file) without exclude (["subdir1/file1.py"], [], ["subdir1/file1.py"]), # specified include and exclude ( ["subdir1/**", "subdir2/**"], ["file1.py", "file3.py"], ["subdir2/file2.py"], ), ], ) def test_git_path_selection(diff, git_diff, include, exclude, expected): old_cwd = os.getcwd() with tempfile.TemporaryDirectory() as tmp_dir: # change the working directory into the temp directory so that globs are working os.chdir(tmp_dir) diff = GitDiffReporter(git_diff=git_diff, exclude=exclude, include=include) main_dir = Path(tmp_dir) (main_dir / "file3.py").touch() subdir1 = main_dir / "subdir1" subdir1.mkdir() (subdir1 / "file1.py").touch() subdir2 = main_dir / "subdir2" subdir2.mkdir() (subdir2 / "file2.py").touch() # Configure the git diff output _set_git_diff_output( diff, git_diff, git_diff_output( {"subdir1/file1.py": line_numbers(3, 10) + line_numbers(34, 47)} ), git_diff_output({"subdir2/file2.py": line_numbers(3, 10), "file3.py": [0]}), git_diff_output(dict(), deleted_files=["README.md"]), ) # Get the source paths in the diff with patch.object(os.path, "abspath", lambda path: f"{tmp_dir}/{path}"): source_paths = diff.src_paths_changed() # Validate the source paths # They should be in alphabetical order assert source_paths == expected # change back to the previous working directory os.chdir(old_cwd) def test_git_source_paths(diff, git_diff): # Configure the git diff output _set_git_diff_output( diff, git_diff, git_diff_output( {"subdir/file1.py": line_numbers(3, 10) + line_numbers(34, 47)} ), git_diff_output({"subdir/file2.py": line_numbers(3, 10), "file3.py": [0]}), git_diff_output(dict(), deleted_files=["README.md"]), ) # Get the source paths in the diff source_paths = diff.src_paths_changed() # Validate the source paths # They should be in alphabetical order assert len(source_paths) == 4 assert source_paths[0] == "file3.py" assert source_paths[1] == "README.md" assert source_paths[2] == "subdir/file1.py" assert source_paths[3] == "subdir/file2.py" def test_git_source_paths_with_space(diff, git_diff): _set_git_diff_output( diff, git_diff, git_diff_output({" weird.py": [0]}), ) source_paths = diff.src_paths_changed() assert len(source_paths) == 1 assert source_paths[0] == " weird.py" def test_duplicate_source_paths(diff, git_diff): # Duplicate the output for committed, staged, and unstaged changes diff_output = git_diff_output( {"subdir/file1.py": line_numbers(3, 10) + line_numbers(34, 47)} ) _set_git_diff_output(diff, git_diff, diff_output, diff_output, diff_output) # Get the source paths in the diff source_paths = diff.src_paths_changed() # Should see only one copy of source files assert len(source_paths) == 1 assert source_paths[0] == "subdir/file1.py" def test_git_source_paths_with_supported_extensions(diff, git_diff): # Configure the git diff output _set_git_diff_output( diff, git_diff, git_diff_output( {"subdir/file1.py": line_numbers(3, 10) + line_numbers(34, 47)} ), git_diff_output({"subdir/file2.py": line_numbers(3, 10), "file3.py": [0]}), git_diff_output({"README.md": line_numbers(3, 10)}), ) # Set supported extensions diff._supported_extensions = ["py"] # Get the source paths in the diff source_paths = diff.src_paths_changed() # Validate the source paths, README.md should be left out assert len(source_paths) == 3 assert source_paths[0] == "file3.py" assert source_paths[1] == "subdir/file1.py" assert source_paths[2] == "subdir/file2.py" def test_git_lines_changed(diff, git_diff): # Configure the git diff output _set_git_diff_output( diff, git_diff, git_diff_output( {"subdir/file1.py": line_numbers(3, 10) + line_numbers(34, 47)} ), git_diff_output({"subdir/file2.py": line_numbers(3, 10), "file3.py": [0]}), git_diff_output(dict(), deleted_files=["README.md"]), ) # Get the lines changed in the diff lines_changed = diff.lines_changed("subdir/file1.py") # Validate the lines changed assert lines_changed == line_numbers(3, 10) + line_numbers(34, 47) def test_ignore_lines_outside_src(diff, git_diff): # Add some lines at the start of the diff, before any # source files are specified diff_output = git_diff_output({"subdir/file1.py": line_numbers(3, 10)}) main_diff = "\n".join(["- deleted line", "+ added line", diff_output]) # Configure the git diff output _set_git_diff_output(diff, git_diff, main_diff, "", "") # Get the lines changed in the diff lines_changed = diff.lines_changed("subdir/file1.py") # Validate the lines changed assert lines_changed == line_numbers(3, 10) def test_one_line_file(diff, git_diff): # Files with only one line have a special format # in which the "length" part of the hunk is not specified diff_str = dedent( """ diff --git a/diff_cover/one_line.txt b/diff_cover/one_line.txt index 0867e73..9daeafb 100644 --- a/diff_cover/one_line.txt +++ b/diff_cover/one_line.txt @@ -1,3 +1 @@ test -test -test """ ).strip() # Configure the git diff output _set_git_diff_output(diff, git_diff, diff_str, "", "") # Get the lines changed in the diff lines_changed = diff.lines_changed("one_line.txt") # Expect that no lines are changed assert len(lines_changed) == 0 def test_git_deleted_lines(diff, git_diff): # Configure the git diff output _set_git_diff_output( diff, git_diff, git_diff_output( {"subdir/file1.py": line_numbers(3, 10) + line_numbers(34, 47)} ), git_diff_output({"subdir/file2.py": line_numbers(3, 10), "file3.py": [0]}), git_diff_output(dict(), deleted_files=["README.md"]), ) # Get the lines changed in the diff lines_changed = diff.lines_changed("README.md") # Validate no lines changed assert len(lines_changed) == 0 def test_git_unicode_filename(diff, git_diff): # Filenames with unicode characters have double quotes surrounding them # in the git diff output. diff_str = dedent( """ diff --git "a/unic\303\270\342\210\202e\314\201.txt" "b/unic\303\270\342\210\202e\314\201.txt" new file mode 100644 index 0000000..248ebea --- /dev/null +++ "b/unic\303\270\342\210\202e\314\201.txt" @@ -0,0 +1,13 @@ +μῆνιν ἄειδε θεὰ Πηληϊάδεω Ἀχιλῆος +οὐλομένην, ἣ μυρί᾽ Ἀχαιοῖς ἄλγε᾽ ἔθηκε, +πολλὰς δ᾽ ἰφθίμους ψυχὰς Ἄϊδι προΐαψεν """ ).strip() _set_git_diff_output(diff, git_diff, diff_str, "", "") # Get the lines changed in the diff lines_changed = diff.lines_changed("unic\303\270\342\210\202e\314\201.txt") # Expect that three lines changed assert len(lines_changed) == 3 def test_git_repeat_lines(diff, git_diff): # Same committed, staged, and unstaged lines diff_output = git_diff_output( {"subdir/file1.py": line_numbers(3, 10) + line_numbers(34, 47)} ) _set_git_diff_output(diff, git_diff, diff_output, diff_output, diff_output) # Get the lines changed in the diff lines_changed = diff.lines_changed("subdir/file1.py") # Validate the lines changed assert lines_changed == line_numbers(3, 10) + line_numbers(34, 47) def test_git_overlapping_lines(diff, git_diff): main_diff = git_diff_output( {"subdir/file1.py": line_numbers(3, 10) + line_numbers(34, 47)} ) # Overlap, extending the end of the hunk (lines 3 to 10) overlap_1 = git_diff_output({"subdir/file1.py": line_numbers(5, 14)}) # Overlap, extending the beginning of the hunk (lines 34 to 47) overlap_2 = git_diff_output({"subdir/file1.py": line_numbers(32, 37)}) # Lines in staged / unstaged overlap with lines in main _set_git_diff_output(diff, git_diff, main_diff, overlap_1, overlap_2) # Get the lines changed in the diff lines_changed = diff.lines_changed("subdir/file1.py") # Validate the lines changed assert lines_changed == line_numbers(3, 14) + line_numbers(32, 47) def test_git_line_within_hunk(diff, git_diff): main_diff = git_diff_output( {"subdir/file1.py": line_numbers(3, 10) + line_numbers(34, 47)} ) # Surround hunk in main (lines 3 to 10) surround = git_diff_output({"subdir/file1.py": line_numbers(2, 11)}) # Within hunk in main (lines 34 to 47) within = git_diff_output({"subdir/file1.py": line_numbers(35, 46)}) # Lines in staged / unstaged overlap with hunks in main _set_git_diff_output(diff, git_diff, main_diff, surround, within) # Get the lines changed in the diff lines_changed = diff.lines_changed("subdir/file1.py") # Validate the lines changed assert lines_changed == line_numbers(2, 11) + line_numbers(34, 47) def test_inter_diff_conflict(diff, git_diff): # Commit changes to lines 3 through 10 added_diff = git_diff_output({"file.py": line_numbers(3, 10)}) # Delete the lines we modified deleted_lines = [] for line in added_diff.split("\n"): # Any added line becomes a deleted line if line.startswith("+"): deleted_lines.append(line.replace("+", "-")) # No need to include lines we already deleted elif line.startswith("-"): pass # Keep any other line else: deleted_lines.append(line) deleted_diff = "\n".join(deleted_lines) # Try all combinations of diff conflicts combinations = [ (added_diff, deleted_diff, ""), (added_diff, "", deleted_diff), ("", added_diff, deleted_diff), (added_diff, deleted_diff, deleted_diff), ] for (main_diff, staged_diff, unstaged_diff) in combinations: # Set up so we add lines, then delete them _set_git_diff_output(diff, git_diff, main_diff, staged_diff, unstaged_diff) assert diff.lines_changed("file.py") == [] def test_git_no_such_file(diff, git_diff): diff_output = git_diff_output( {"subdir/file1.py": [1], "subdir/file2.py": [2], "file3.py": [3]} ) # Configure the git diff output _set_git_diff_output(diff, git_diff, diff_output, "", "") lines_changed = diff.lines_changed("no_such_file.txt") assert len(lines_changed) == 0 def test_no_diff(diff, git_diff): # Configure the git diff output _set_git_diff_output(diff, git_diff, "", "", "") # Expect no files changed source_paths = diff.src_paths_changed() assert source_paths == [] def test_git_diff_error( diff, git_diff, ): invalid_hunk_str = dedent( """ diff --git a/subdir/file1.py b/subdir/file1.py @@ invalid @@ Text """ ).strip() no_src_line_str = "@@ -33,10 +34,13 @@ Text" non_numeric_lines = dedent( """ diff --git a/subdir/file1.py b/subdir/file1.py @@ -1,2 +a,b @@ """ ).strip() missing_line_num = dedent( """ diff --git a/subdir/file1.py b/subdir/file1.py @@ -1,2 + @@ """ ).strip() missing_src_str = "diff --git " # List of (stdout, stderr) git diff pairs that should cause # a GitDiffError to be raised. err_outputs = [ invalid_hunk_str, no_src_line_str, non_numeric_lines, missing_line_num, missing_src_str, ] for diff_str in err_outputs: # Configure the git diff output _set_git_diff_output(diff, git_diff, diff_str, "", "") # Expect that both methods that access git diff raise an error with pytest.raises(GitDiffError): print("src_paths_changed() " "should fail for {}".format(diff_str)) diff.src_paths_changed() with pytest.raises(GitDiffError): print(f"lines_changed() should fail for {diff_str}") diff.lines_changed("subdir/file1.py") def test_plus_sign_in_hunk_bug(diff, git_diff): # This was a bug that caused a parse error diff_str = dedent( """ diff --git a/file.py b/file.py @@ -16,16 +16,7 @@ 1 + 2 + test + test + test + test """ ) _set_git_diff_output(diff, git_diff, diff_str, "", "") lines_changed = diff.lines_changed("file.py") assert lines_changed == [16, 17, 18, 19] def test_terminating_chars_in_hunk(diff, git_diff): # Check what happens when there's an @@ symbol after the # first terminating @@ symbol diff_str = dedent( """ diff --git a/file.py b/file.py @@ -16,16 +16,7 @@ and another +23,2 @@ symbol + test + test + test + test """ ) _set_git_diff_output(diff, git_diff, diff_str, "", "") lines_changed = diff.lines_changed("file.py") assert lines_changed == [16, 17, 18, 19] def test_merge_conflict_diff(diff, git_diff): # Handle different git diff format when in the middle # of a merge conflict diff_str = dedent( """ diff --cc subdir/src.py index d2034c0,e594d54..0000000 diff --cc subdir/src.py index d2034c0,e594d54..0000000 --- a/subdir/src.py +++ b/subdir/src.py @@@ -16,88 -16,222 +16,7 @@@ text + test ++<<<<<< HEAD + test ++======= """ ) _set_git_diff_output(diff, git_diff, diff_str, "", "") lines_changed = diff.lines_changed("subdir/src.py") assert lines_changed == [16, 17, 18, 19] def test_inclusion_list(diff, git_diff): unstaged_input = git_diff_output( {"subdir/file1.py": line_numbers(3, 10) + line_numbers(34, 47)} ) _set_git_diff_output(diff, git_diff, "", "", unstaged_input) assert len(diff._get_included_diff_results()) == 3 assert ["", "", unstaged_input] == diff._get_included_diff_results() def test_ignore_staged_inclusion(git_diff): reporter = GitDiffReporter(git_diff=git_diff, ignore_staged=True) staged_input = git_diff_output( {"subdir/file1.py": line_numbers(3, 10) + line_numbers(34, 47)} ) _set_git_diff_output(reporter, git_diff, "", staged_input, "") assert reporter._get_included_diff_results() == ["", ""] def test_ignore_unstaged_inclusion(git_diff): reporter = GitDiffReporter(git_diff=git_diff, ignore_unstaged=True) unstaged_input = git_diff_output( {"subdir/file1.py": line_numbers(3, 10) + line_numbers(34, 47)} ) _set_git_diff_output(reporter, git_diff, "", "", unstaged_input) assert reporter._get_included_diff_results() == ["", ""] def test_ignore_staged_and_unstaged_inclusion(git_diff): reporter = GitDiffReporter( git_diff=git_diff, ignore_staged=True, ignore_unstaged=True ) staged_input = git_diff_output( {"subdir/file1.py": line_numbers(3, 10) + line_numbers(34, 47)} ) unstaged_input = git_diff_output( {"subdir/file2.py": line_numbers(3, 10) + line_numbers(34, 47)} ) _set_git_diff_output(reporter, git_diff, "", staged_input, unstaged_input) assert reporter._get_included_diff_results() == [""] def test_fnmatch(diff): """Verify that our fnmatch wrapper works as expected.""" assert diff._fnmatch("foo.py", []) assert not diff._fnmatch("foo.py", ["*.pyc"]) assert diff._fnmatch("foo.pyc", ["*.pyc"]) assert diff._fnmatch("foo.pyc", ["*.swp", "*.pyc", "*.py"]) def test_fnmatch_returns_the_default_with_empty_default(diff): """The default parameter should be returned when no patterns are given.""" sentinel = object() assert diff._fnmatch("file.py", [], default=sentinel) is sentinel def test_include_untracked(mocker, git_diff): reporter = GitDiffReporter(git_diff=git_diff, include_untracked=True) diff_output = git_diff_output( {"subdir/file1.py": line_numbers(3, 10) + line_numbers(34, 47)} ) _set_git_diff_output( reporter, git_diff, staged_diff=diff_output, untracked=["u1.py", " u2.py"] ) open_mock = mocker.mock_open(read_data="1\n2\n3\n") mocker.patch("diff_cover.diff_reporter.open", open_mock) changed = reporter.src_paths_changed() assert sorted(changed) == [" u2.py", "subdir/file1.py", "u1.py"] assert reporter.lines_changed("u1.py") == [1, 2, 3] assert reporter.lines_changed(" u2.py") == [1, 2, 3] def _set_git_diff_output( reporter, diff_tool, committed_diff="", staged_diff="", unstaged_diff="", untracked=None, ): """ Configure the git diff tool to return `committed_diff`, `staged_diff`, and `unstaged_diff` as outputs from `git diff` """ reporter.clear_cache() diff_tool.diff_committed.return_value = committed_diff diff_tool.diff_staged.return_value = staged_diff diff_tool.diff_unstaged.return_value = unstaged_diff diff_tool.untracked.return_value = untracked def test_name_with_default_range(git_diff): reporter = GitDiffReporter(git_diff=git_diff, ignore_staged=True) assert reporter.name() == "origin/main...HEAD and unstaged changes" def test_name_different_range(mocker): diff = mocker.MagicMock(GitDiffTool) diff.range_notation = ".." reporter = GitDiffReporter(git_diff=diff, ignore_staged=True) assert reporter.name() == "origin/main..HEAD and unstaged changes" diff_cover-7.4.0/tests/test_git_diff.py000066400000000000000000000121731436411411700201640ustar00rootroot00000000000000# pylint: disable=missing-function-docstring """Test for diff_cover.git_diff""" import pytest from diff_cover.command_runner import CommandError from diff_cover.git_diff import GitDiffTool @pytest.fixture def process(mocker): process_ = mocker.Mock() process_.returncode = 0 return process_ @pytest.fixture(autouse=True) def subprocess(mocker, process): subprocess_ = mocker.patch("diff_cover.command_runner.subprocess") subprocess_.Popen.return_value = process return subprocess_ @pytest.fixture def tool(): return GitDiffTool(range_notation="...", ignore_whitespace=False) @pytest.fixture def set_git_diff_output(process): def _inner(stdout, stderr, returncode=0): process.communicate.return_value = (stdout, stderr) process.returncode = returncode return _inner @pytest.fixture def check_diff_committed(subprocess, set_git_diff_output): def _inner(diff_range_notation, ignore_whitespace): tool_ = GitDiffTool( range_notation=diff_range_notation, ignore_whitespace=ignore_whitespace ) set_git_diff_output("test output", "") output = tool_.diff_committed() # Expect that we get the correct output assert output == "test output" # Expect that the correct command was executed expected = [ "git", "-c", "diff.mnemonicprefix=no", "-c", "diff.noprefix=no", "diff", "--no-color", "--no-ext-diff", "-U0", ] if ignore_whitespace: expected.append("--ignore-all-space") expected.append("--ignore-blank-lines") expected.append(f"origin/main{diff_range_notation}HEAD") subprocess.Popen.assert_called_with( expected, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) return _inner def test_diff_committed(check_diff_committed): check_diff_committed("...", ignore_whitespace=False) check_diff_committed("...", ignore_whitespace=True) check_diff_committed("..", ignore_whitespace=False) check_diff_committed("..", ignore_whitespace=True) def test_diff_unstaged(set_git_diff_output, tool, subprocess): set_git_diff_output("test output", "") output = tool.diff_unstaged() # Expect that we get the correct output assert output == "test output" # Expect that the correct command was executed expected = [ "git", "-c", "diff.mnemonicprefix=no", "-c", "diff.noprefix=no", "diff", "--no-color", "--no-ext-diff", "-U0", ] subprocess.Popen.assert_called_with( expected, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) def test_diff_staged(tool, subprocess, set_git_diff_output): set_git_diff_output("test output", "") output = tool.diff_staged() # Expect that we get the correct output assert output == "test output" # Expect that the correct command was executed expected = [ "git", "-c", "diff.mnemonicprefix=no", "-c", "diff.noprefix=no", "diff", "--no-color", "--no-ext-diff", "-U0", "--cached", ] subprocess.Popen.assert_called_with( expected, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) def test_diff_missing_branch_error(set_git_diff_output, tool, subprocess): # Override the default compare branch set_git_diff_output("test output", "fatal error", 1) with pytest.raises(CommandError): tool.diff_committed(compare_branch="release") set_git_diff_output( "test output", "ambiguous argument 'origin/main...HEAD': " "unknown revision or path not in the working tree.", 1, ) with pytest.raises(ValueError): tool.diff_committed(compare_branch="release") def test_diff_committed_compare_branch(set_git_diff_output, tool, subprocess): # Override the default compare branch set_git_diff_output("test output", "") output = tool.diff_committed(compare_branch="release") # Expect that we get the correct output assert output == "test output" # Expect that the correct command was executed expected = [ "git", "-c", "diff.mnemonicprefix=no", "-c", "diff.noprefix=no", "diff", "--no-color", "--no-ext-diff", "-U0", "release...HEAD", ] subprocess.Popen.assert_called_with( expected, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) def test_errors(set_git_diff_output, tool): set_git_diff_output("test output", "fatal error", 1) with pytest.raises(CommandError): tool.diff_unstaged() with pytest.raises(CommandError): tool.diff_staged() with pytest.raises(CommandError): tool.diff_unstaged() @pytest.mark.parametrize( "output,expected", [ ("", []), ("\n", []), ("a.py\n", ["a.py"]), ("a.py\nb.py\n", ["a.py", "b.py"]), ], ) def test_untracked(tool, set_git_diff_output, output, expected): set_git_diff_output(output, b"") assert tool.untracked() == expected diff_cover-7.4.0/tests/test_git_path.py000066400000000000000000000052171436411411700202110ustar00rootroot00000000000000# pylint: disable=missing-function-docstring """Test for diff_cover.git_path""" import pytest from diff_cover.git_path import GitPathTool @pytest.fixture(autouse=True) def patch_git_path_tool(mocker): mocker.patch.object(GitPathTool, "_root", None) mocker.patch.object(GitPathTool, "_cwd", None) @pytest.fixture def process(mocker): process_ = mocker.Mock() process_.returncode = 0 return process_ @pytest.fixture(autouse=True) def subprocess(mocker, process): subprocess_ = mocker.patch("diff_cover.command_runner.subprocess") subprocess_.Popen.return_value = process return subprocess_ def test_project_root_command(process, subprocess): process.communicate.return_value = (b"/phony/path", b"") GitPathTool.set_cwd(b"/phony/path") # Expect that the correct command was executed expected = ["git", "rev-parse", "--show-toplevel", "--encoding=utf-8"] subprocess.Popen.assert_called_with( expected, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) def test_relative_path(process): process.communicate.return_value = (b"/home/user/work/diff-cover", b"") expected = "violations_reporter.py" cwd = "/home/user/work/diff-cover/diff_cover" GitPathTool.set_cwd(cwd) path = GitPathTool.relative_path("diff_cover/violations_reporter.py") # Expect relative path from diff_cover assert path == expected def test_absolute_path(process): process.communicate.return_value = ( b"/home/user/work dir/diff-cover\n--encoding=utf-8\n", b"", ) expected = "/home/user/work dir/diff-cover/other_package/file.py" cwd = "/home/user/work dir/diff-cover/diff_cover" GitPathTool.set_cwd(cwd) path = GitPathTool.absolute_path("other_package/file.py") # Expect absolute path to file.py assert path == expected def test_set_cwd_unicode(process): process.communicate.return_value = (b"\xe2\x94\xbb\xe2\x94\x81\xe2\x94\xbb", b"") expected = "\u253b\u2501\u253b/other_package/file.py" cwd = "\\u253b\\u2501\\u253b/diff_cover\n--encoding=utf-8\n" GitPathTool.set_cwd(cwd) path = GitPathTool.absolute_path("other_package/file.py") # Expect absolute path to file.py assert path == expected def test_set_cwd_unicode_byte_passed_in_for_cwd(process): process.communicate.return_value = ( b"\xe2\x94\xbb\xe2\x94\x81\xe2\x94\xbb\n--encoding=utf-8\n", b"", ) expected = "\u253b\u2501\u253b/other_package/file.py" cwd = b"\\u253b\\u2501\\u253b/diff_cover" GitPathTool.set_cwd(cwd) path = GitPathTool.absolute_path("other_package/file.py") # Expect absolute path to file.py assert path == expected diff_cover-7.4.0/tests/test_integration.py000066400000000000000000000511231436411411700207320ustar00rootroot00000000000000# pylint: disable=attribute-defined-outside-init """High-level integration tests of diff-cover tool.""" import os import os.path import re from collections import defaultdict from io import BytesIO from subprocess import Popen import pytest from diff_cover import diff_cover_tool, diff_quality_tool from diff_cover.command_runner import CommandError from diff_cover.git_path import GitPathTool from diff_cover.violationsreporters.base import QualityDriver from tests.helpers import fixture_path class ToolsIntegrationBase: """Base class for diff-cover and diff-quality integration tests.""" tool_module = None @pytest.fixture(autouse=True) def capture_fixtures(self, mocker, tmp_path): self.mocker = mocker self.tmp_path = tmp_path @pytest.fixture(autouse=True) def setup(self, mocker): """ Patch the output of `git` commands and `os.getcwd` set the cwd to the fixtures dir """ # Set the CWD to the fixtures dir old_cwd = os.getcwd() os.chdir(fixture_path("")) cwd = os.getcwd() self._mock_popen = mocker.patch("subprocess.Popen") self._mock_sys = mocker.patch(f"{self.tool_module}.sys") try: self._mock_getcwd = mocker.patch(f"{self.tool_module}.os.getcwdu") except AttributeError: self._mock_getcwd = mocker.patch(f"{self.tool_module}.os.getcwd") self._git_root_path = cwd self._mock_getcwd.return_value = self._git_root_path yield os.chdir(old_cwd) def _clear_css(self, content): """ The CSS is provided by pygments and changes fairly often. Im ok with simply saying "There was css" Perhaps I will eat these words """ clean_content = re.sub("r'", content, "", re.DOTALL) assert len(content) > len(clean_content) return clean_content def _check_html_report( self, git_diff_path, expected_html_path, tool_args, expected_status=0, css_file=None, ): """ Verify that the tool produces the expected HTML report. `git_diff_path` is a path to a fixture containing the (patched) output of the call to `git diff`. `expected_console_path` is a path to the fixture containing the expected HTML output of the tool. `tool_args` is a list of command line arguments to pass to the tool. You should include the name of the tool as the first argument. """ # Patch the output of `git diff` with open(git_diff_path, encoding="utf-8") as git_diff_file: self._set_git_diff_output(git_diff_file.read(), "") # Create a temporary directory to hold the output HTML report # Add a cleanup to ensure the directory gets deleted temp_dir = self.tmp_path / "dummy" temp_dir.mkdir() html_report_path = os.path.join(temp_dir, "diff_coverage.html") args = tool_args + ["--html-report", html_report_path] if css_file: css_file = os.path.join(temp_dir, css_file) args += ["--external-css-file", css_file] # Execute the tool if "diff-cover" in args[0]: code = diff_cover_tool.main(args) else: code = diff_quality_tool.main(args) assert code == expected_status # Check the HTML report with open(expected_html_path, encoding="utf-8") as expected_file: with open(html_report_path, encoding="utf-8") as html_report: html = html_report.read() expected = expected_file.read() if css_file is None: html = self._clear_css(html) expected = self._clear_css(expected) assert expected.strip() == html.strip() return temp_dir def _check_console_report( self, git_diff_path, expected_console_path, tool_args, expected_status=0 ): """ Verify that the tool produces the expected console report. `git_diff_path` is a path to a fixture containing the (patched) output of the call to `git diff`. `expected_console_path` is a path to the fixture containing the expected console output of the tool. `tool_args` is a list of command line arguments to pass to the tool. You should include the name of the tool as the first argument. """ # Patch the output of `git diff` with open(git_diff_path, encoding="utf-8") as git_diff_file: self._set_git_diff_output(git_diff_file.read(), "") # Capture stdout to a string buffer string_buffer = BytesIO() self._capture_stdout(string_buffer) # Execute the tool if "diff-cover" in tool_args[0]: code = diff_cover_tool.main(tool_args) else: code = diff_quality_tool.main(tool_args) assert code == expected_status # Check the console report with open(expected_console_path) as expected_file: report = string_buffer.getvalue() expected = expected_file.read() assert expected.strip() == report.strip().decode("utf-8") def _capture_stdout(self, string_buffer): """ Redirect output sent to `sys.stdout` to the BytesIO buffer `string_buffer`. """ self._mock_sys.stdout.buffer = string_buffer def _set_git_diff_output(self, stdout, stderr, returncode=0): """ Patch the call to `git diff` to output `stdout` and `stderr`. Patch the `git rev-parse` command to output a phony directory. """ def patch_diff(command, **kwargs): if command[0:6] == [ "git", "-c", "diff.mnemonicprefix=no", "-c", "diff.noprefix=no", "diff", ]: mock = self.mocker.Mock() mock.communicate.return_value = (stdout, stderr) mock.returncode = returncode return mock if command[0:2] == ["git", "rev-parse"]: mock = self.mocker.Mock() mock.communicate.return_value = (self._git_root_path, "") mock.returncode = returncode return mock return Popen(command, **kwargs) self._mock_popen.side_effect = patch_diff class TestDiffCoverIntegration(ToolsIntegrationBase): """ High-level integration test. The `git diff` is a mock, but everything else is our code. """ tool_module = "diff_cover.diff_cover_tool" def test_added_file_html(self): self._check_html_report( "git_diff_add.txt", "add_html_report.html", ["diff-cover", "coverage.xml"] ) def test_added_file_console(self): self._check_console_report( "git_diff_add.txt", "add_console_report.txt", ["diff-cover", "coverage.xml"] ) def test_added_file_console_lcov(self): self._check_console_report( "git_diff_add.txt", "add_console_report.txt", ["diff-cover", "lcov.info"] ) def test_lua_coverage(self): """ coverage report shows that diff-cover needs to normalize paths read in """ self._check_console_report( "git_diff_lua.txt", "lua_console_report.txt", ["diff-cover", "luacoverage.xml"], ) def test_fail_under_console(self): self._check_console_report( "git_diff_add.txt", "add_console_report.txt", ["diff-cover", "coverage.xml", "--fail-under=90"], expected_status=1, ) def test_fail_under_pass_console(self): self._check_console_report( "git_diff_add.txt", "add_console_report.txt", ["diff-cover", "coverage.xml", "--fail-under=5"], expected_status=0, ) def test_deleted_file_html(self): self._check_html_report( "git_diff_delete.txt", "delete_html_report.html", ["diff-cover", "coverage.xml"], ) def test_deleted_file_console(self): self._check_console_report( "git_diff_delete.txt", "delete_console_report.txt", ["diff-cover", "coverage.xml"], ) def test_changed_file_html(self): self._check_html_report( "git_diff_changed.txt", "changed_html_report.html", ["diff-cover", "coverage.xml"], ) def test_fail_under_html(self): self._check_html_report( "git_diff_changed.txt", "changed_html_report.html", ["diff-cover", "coverage.xml", "--fail-under=100.1"], expected_status=1, ) def test_fail_under_pass_html(self): self._check_html_report( "git_diff_changed.txt", "changed_html_report.html", ["diff-cover", "coverage.xml", "--fail-under=100"], expected_status=0, ) def test_changed_file_console(self): self._check_console_report( "git_diff_changed.txt", "changed_console_report.txt", ["diff-cover", "coverage.xml"], ) def test_moved_file_html(self): self._check_html_report( "git_diff_moved.txt", "moved_html_report.html", ["diff-cover", "moved_coverage.xml"], ) def test_moved_file_console(self): self._check_console_report( "git_diff_moved.txt", "moved_console_report.txt", ["diff-cover", "moved_coverage.xml"], ) def test_mult_inputs_html(self): self._check_html_report( "git_diff_mult.txt", "mult_inputs_html_report.html", ["diff-cover", "coverage1.xml", "coverage2.xml"], ) def test_mult_inputs_console(self): self._check_console_report( "git_diff_mult.txt", "mult_inputs_console_report.txt", ["diff-cover", "coverage1.xml", "coverage2.xml"], ) def test_changed_file_lcov_console(self): self._check_console_report( "git_diff_changed.txt", "changed_console_report.txt", ["diff-cover", "lcov.info"], ) def test_subdir_coverage_html(self): """ Assert that when diff-cover is ran from a subdirectory it generates correct reports. """ old_cwd = self._mock_getcwd.return_value self._mock_getcwd.return_value = os.path.join(old_cwd, "sub") self._check_html_report( "git_diff_subdir.txt", "subdir_coverage_html_report.html", ["diff-cover", "coverage.xml"], ) self._mock_getcwd.return_value = old_cwd def test_subdir_coverage_console(self): """ Assert that when diff-cover is ran from a subdirectory it generates correct reports. """ old_cwd = self._mock_getcwd.return_value self._mock_getcwd.return_value = os.path.join(old_cwd, "sub") self._check_console_report( "git_diff_subdir.txt", "subdir_coverage_console_report.txt", ["diff-cover", "coverage.xml"], ) self._mock_getcwd.return_value = old_cwd def test_unicode_console(self): self._check_console_report( "git_diff_unicode.txt", "unicode_console_report.txt", ["diff-cover", "unicode_coverage.xml"], ) def test_dot_net_diff(self): mock_path = "/code/samplediff/" self._mock_getcwd.return_value = mock_path self.mocker.patch.object(GitPathTool, "_git_root", return_value=mock_path) self._check_console_report( "git_diff_dotnet.txt", "dotnet_coverage_console_report.txt", ["diff-cover", "dotnet_coverage.xml"], ) def test_unicode_html(self): self._check_html_report( "git_diff_unicode.txt", "unicode_html_report.html", ["diff-cover", "unicode_coverage.xml"], ) def test_html_with_external_css(self): temp_dir = self._check_html_report( "git_diff_external_css.txt", "external_css_html_report.html", ["diff-cover", "coverage.xml"], css_file="external_style.css", ) assert os.path.exists(os.path.join(temp_dir, "external_style.css")) def test_git_diff_error(self): # Patch the output of `git diff` to return an error self._set_git_diff_output("", "fatal error", 1) # Expect an error with pytest.raises(CommandError): diff_cover_tool.main(["diff-cover", "coverage.xml"]) def test_quiet_mode(self): self._check_console_report( "git_diff_violations.txt", "empty.txt", ["diff-cover", "coverage.xml", "-q"], ) def test_show_uncovered_lines_console(self): self._check_console_report( "git_diff_add.txt", "show_uncovered_lines_console.txt", ["diff-cover", "--show-uncovered", "coverage.xml"], ) class TestDiffQualityIntegration(ToolsIntegrationBase): """ High-level integration test. """ tool_module = "diff_cover.diff_quality_tool" def test_git_diff_error_diff_quality(self): # Patch the output of `git diff` to return an error self._set_git_diff_output("", "fatal error", 1) # Expect an error with pytest.raises(CommandError): diff_quality_tool.main(["diff-quality", "--violations", "pycodestyle"]) def test_added_file_pycodestyle_html(self): self._check_html_report( "git_diff_violations.txt", "pycodestyle_violations_report.html", ["diff-quality", "--violations=pycodestyle"], ) def test_added_file_pyflakes_html(self): self._check_html_report( "git_diff_violations.txt", "pyflakes_violations_report.html", ["diff-quality", "--violations=pyflakes"], ) def test_added_file_pylint_html(self): self._check_html_report( "git_diff_violations.txt", "pylint_violations_report.html", ["diff-quality", "--violations=pylint"], ) def test_fail_under_html(self): self._check_html_report( "git_diff_violations.txt", "pylint_violations_report.html", ["diff-quality", "--violations=pylint", "--fail-under=80"], expected_status=1, ) def test_fail_under_pass_html(self): self._check_html_report( "git_diff_violations.txt", "pylint_violations_report.html", ["diff-quality", "--violations=pylint", "--fail-under=40"], expected_status=0, ) def test_html_with_external_css(self): temp_dir = self._check_html_report( "git_diff_violations.txt", "pycodestyle_violations_report_external_css.html", ["diff-quality", "--violations=pycodestyle"], css_file="external_style.css", ) assert os.path.exists(os.path.join(temp_dir, "external_style.css")) def test_added_file_pycodestyle_console(self): self._check_console_report( "git_diff_violations.txt", "pycodestyle_violations_report.txt", ["diff-quality", "--violations=pycodestyle"], ) def test_added_file_pycodestyle_console_exclude_file(self): self._check_console_report( "git_diff_violations.txt", "empty_pycodestyle_violations.txt", [ "diff-quality", "--violations=pycodestyle", '--options="--exclude=violations_test_file.py"', ], ) def test_fail_under_console(self): self._check_console_report( "git_diff_violations.txt", "pyflakes_violations_report.txt", ["diff-quality", "--violations=pyflakes", "--fail-under=90"], expected_status=1, ) def test_fail_under_pass_console(self): self._check_console_report( "git_diff_violations.txt", "pyflakes_violations_report.txt", ["diff-quality", "--violations=pyflakes", "--fail-under=30"], expected_status=0, ) def test_added_file_pyflakes_console(self): self._check_console_report( "git_diff_violations.txt", "pyflakes_violations_report.txt", ["diff-quality", "--violations=pyflakes"], ) def test_added_file_pyflakes_console_two_files(self): self._check_console_report( "git_diff_violations_two_files.txt", "pyflakes_two_files.txt", ["diff-quality", "--violations=pyflakes"], ) def test_added_file_pylint_console(self): console_report = "pylint_violations_console_report.txt" self._check_console_report( "git_diff_violations.txt", console_report, ["diff-quality", "--violations=pylint"], ) def test_pre_generated_pycodestyle_report(self): # Pass in a pre-generated pycodestyle report instead of letting # the tool call pycodestyle itself. self._check_console_report( "git_diff_violations.txt", "pycodestyle_violations_report.txt", ["diff-quality", "--violations=pycodestyle", "pycodestyle_report.txt"], ) def test_pre_generated_pyflakes_report(self): # Pass in a pre-generated pyflakes report instead of letting # the tool call pyflakes itself. self._check_console_report( "git_diff_violations.txt", "pyflakes_violations_report.txt", ["diff-quality", "--violations=pyflakes", "pyflakes_violations_report.txt"], ) def test_pre_generated_pylint_report(self): # Pass in a pre-generated pylint report instead of letting # the tool call pylint itself. self._check_console_report( "git_diff_violations.txt", "pylint_violations_report.txt", ["diff-quality", "--violations=pylint", "pylint_report.txt"], ) def test_pylint_report_with_dup_code_violation(self): self._check_console_report( "git_diff_code_dupe.txt", "pylint_dupe_violations_report.txt", ["diff-quality", "--violations=pylint", "pylint_dupe.txt"], ) def _call_quality_expecting_error( self, tool_name, expected_error, report_arg="pylint_report.txt" ): """ Makes calls to diff_quality that should fail to ensure we get back the correct failure. Takes in a string which is a tool to call and an string which is the error you expect to see """ with open("git_diff_add.txt", encoding="utf-8") as git_diff_file: self._set_git_diff_output(git_diff_file.read(), "") argv = ["diff-quality", f"--violations={tool_name}"] if report_arg: argv.append(report_arg) logger = self.mocker.patch("diff_cover.diff_quality_tool.LOGGER") exit_value = diff_quality_tool.main(argv) logger.error.assert_called_with(*expected_error) assert exit_value == 1 def test_tool_not_recognized(self): self._call_quality_expecting_error( "garbage", ("Quality tool not recognized: '%s'", "garbage"), "'garbage'" ) def test_tool_not_installed(self): # Pretend we support a tool named not_installed self.mocker.patch.dict( diff_quality_tool.QUALITY_DRIVERS, { "not_installed": DoNothingDriver( "not_installed", ["txt"], ["not_installed"] ) }, ) self._call_quality_expecting_error( "not_installed", ("Failure: '%s'", "not_installed is not installed"), report_arg=None, ) def test_do_nothing_reporter(self): # Pedantic, but really. This reporter # should not do anything # Does demonstrate a reporter can take in any tool # name though which is cool reporter = DoNothingDriver("pycodestyle", [], []) assert reporter.parse_reports("") == {} def test_quiet_mode(self): self._check_console_report( "git_diff_violations.txt", "empty.txt", ["diff-quality", "--violations=pylint", "-q"], ) class DoNothingDriver(QualityDriver): """Dummy class that implements necessary abstract functions.""" def parse_reports(self, reports): return defaultdict(list) def installed(self): return False diff_cover-7.4.0/tests/test_java_violations_reporter.py000066400000000000000000000530021436411411700235170ustar00rootroot00000000000000# pylint: disable=missing-function-docstring,line-too-long """Test for diff_cover.violationsreporters.java_violations_reporter""" from io import BytesIO from textwrap import dedent import pytest from diff_cover.command_runner import CommandError from diff_cover.violationsreporters import base from diff_cover.violationsreporters.base import QualityReporter from diff_cover.violationsreporters.java_violations_reporter import ( CheckstyleXmlDriver, FindbugsXmlDriver, PmdXmlDriver, Violation, checkstyle_driver, ) @pytest.fixture(autouse=True) def patch_so_all_files_exist(mocker): mock = mocker.patch.object(base.os.path, "exists") mock.returnvalue = True @pytest.fixture def process_patcher(mocker): def _inner(return_value, status_code=0): mocked_process = mocker.Mock() mocked_process.returncode = status_code mocked_process.communicate.return_value = return_value mocked_subprocess = mocker.patch("diff_cover.command_runner.subprocess") mocked_subprocess.Popen.return_value = mocked_process return mocked_process return _inner class TestCheckstyleQualityReporterTest: def test_no_such_file(self): """Expect that we get no results.""" quality = QualityReporter(checkstyle_driver) result = quality.violations("") assert result == [] def test_no_java_file(self): """Expect that we get no results because no Python files.""" quality = QualityReporter(checkstyle_driver) file_paths = ["file1.coffee", "subdir/file2.js"] for path in file_paths: result = quality.violations(path) assert result == [] def test_quality(self, process_patcher): """Integration test.""" # Patch the output of `checkstyle` process_patcher( ( dedent( """ [WARN] ../new_file.java:1:1: Line contains a tab character. [WARN] ../new_file.java:13: 'if' construct must use '{}'s. """ ) .strip() .encode("ascii"), "", ) ) expected_violations = [ Violation(1, "Line contains a tab character."), Violation(13, "'if' construct must use '{}'s."), ] # Parse the report quality = QualityReporter(checkstyle_driver) # Expect that the name is set assert quality.name() == "checkstyle" # Measured_lines is undefined for a # quality reporter since all lines are measured assert not quality.measured_lines("../new_file.java") # Expect that we get violations for file1.java only # We're not guaranteed that the violations are returned # in any particular order. actual_violations = quality.violations("../new_file.java") assert len(actual_violations) == len(expected_violations) for expected in expected_violations: assert expected in actual_violations class TestCheckstyleXmlQualityReporterTest: @pytest.fixture(autouse=True) def setup(self, mocker): # Paths generated by git_path are always the given argument _git_path_mock = mocker.patch( "diff_cover.violationsreporters.java_violations_reporter.GitPathTool" ) _git_path_mock.relative_path = lambda path: path _git_path_mock.absolute_path = lambda path: path def test_no_such_file(self): quality = QualityReporter(CheckstyleXmlDriver()) # Expect that we get no results result = quality.violations("") assert result == [] def test_no_java_file(self): quality = QualityReporter(CheckstyleXmlDriver()) file_paths = ["file1.coffee", "subdir/file2.js"] # Expect that we get no results because no Java files for path in file_paths: result = quality.violations(path) assert result == [] def test_quality(self, process_patcher): # Patch the output of `checkstyle` process_patcher( ( dedent( """ """ ) .strip() .encode("ascii"), "", ) ) expected_violations = [ Violation(1, "error: Missing docstring"), Violation(2, "error: Unused variable 'd'"), Violation(2, "warning: TODO: Not the real way we'll store usages!"), Violation(579, "error: Unable to import 'rooted_paths'"), Violation( 150, "error: error while code parsing ([Errno 2] No such file or directory)", ), Violation(149, "error: Comma not followed by a space"), Violation(113, "error: Unused argument 'cls'"), ] # Parse the report quality = QualityReporter(CheckstyleXmlDriver()) # Expect that the name is set assert quality.name() == "checkstyle" # Measured_lines is undefined for a # quality reporter since all lines are measured assert not quality.measured_lines("file1.java") # Expect that we get violations for file1.java only # We're not guaranteed that the violations are returned # in any particular order. actual_violations = quality.violations("file1.java") assert len(actual_violations) == len(expected_violations) for expected in expected_violations: assert expected in actual_violations def test_quality_error(self, mocker, process_patcher): # Patch the output stderr/stdout and returncode of `checkstyle` process_patcher( ( dedent( """ """ ), b"oops", ), status_code=1, ) # Parse the report code = mocker.patch( "diff_cover.violationsreporters.java_violations_reporter.run_command_for_code" ) code.return_value = 0 quality = QualityReporter(CheckstyleXmlDriver()) with pytest.raises(CommandError): quality.violations("file1.java") def test_quality_pregenerated_report(self): # When the user provides us with a pre-generated checkstyle report # then use that instead of calling checkstyle directly. checkstyle_reports = [ BytesIO( dedent( """ """ ) .strip() .encode("utf-8") ), BytesIO( dedent( """ """ ) .strip() .encode("utf-8") ), ] # Generate the violation report quality = QualityReporter(CheckstyleXmlDriver(), reports=checkstyle_reports) # Expect that we get the right violations expected_violations = [ Violation(1, "error: Missing docstring"), Violation( 57, "warning: TODO the name of this method is a little bit confusing" ), Violation( 183, "error: Invalid name '' for type argument (should match [a-z_][a-z0-9_]{2,30}$)", ), ] # We're not guaranteed that the violations are returned # in any particular order. actual_violations = quality.violations("path/to/file.java") assert len(actual_violations) == len(expected_violations) for expected in expected_violations: assert expected in actual_violations class TestFindbugsQualityReporterTest: @pytest.fixture(autouse=True) def setup(self, mocker): # Paths generated by git_path are always the given argument _git_path_mock = mocker.patch( "diff_cover.violationsreporters.java_violations_reporter.GitPathTool" ) _git_path_mock.relative_path = lambda path: path _git_path_mock.absolute_path = lambda path: path def test_no_such_file(self): quality = QualityReporter(FindbugsXmlDriver()) # Expect that we get no results result = quality.violations("") assert result == [] def test_no_java_file(self): quality = QualityReporter(FindbugsXmlDriver()) file_paths = ["file1.coffee", "subdir/file2.js"] # Expect that we get no results because no Java files for path in file_paths: result = quality.violations(path) assert result == [] def test_quality_pregenerated_report(self): # When the user provides us with a pre-generated findbugs report # then use that instead of calling findbugs directly. findbugs_reports = [ BytesIO( dedent( """ Consider using Locale parameterized version of invoked method Use of non-localized String.toUpperCase() or String.toLowerCase() in org.opensource.sample.file$1.isMultipart(HttpServletRequest) At file.java:[lines 94-103] In class org.opensource.sample.file$1 In method org.opensource.sample.file$1.isMultipart(HttpServletRequest) At file.java:[line 97] Another occurrence at file.java:[line 103, 104] """ ) .strip() .encode("utf-8") ), BytesIO( dedent( """ Consider using Locale parameterized version of invoked method Use of non-localized String.toUpperCase() or String.toLowerCase() in org.opensource.sample.file$1.isMultipart(HttpServletRequest) At file.java:[lines 94-103] In class org.opensource.sample.file$1 In method org.opensource.sample.file$1.isMultipart(HttpServletRequest) At file.java:[line 97] Another occurrence at file.java:[line 183] """ ) .strip() .encode("utf-8") ), # this is a violation which is not bounded to a specific line. We'll skip those BytesIO( dedent( """ Non-transient non-serializable instance field in serializable class Class org.opensource.sample.file defines non-transient non-serializable instance field In file.java """ ) .strip() .encode("utf-8") ), ] # Generate the violation report quality = QualityReporter(FindbugsXmlDriver(), reports=findbugs_reports) # Expect that we get the right violations expected_violations = [ Violation( 97, "I18N: Consider using Locale parameterized version of invoked method", ), Violation( 183, "I18N: Consider using Locale parameterized version of invoked method", ), ] # We're not guaranteed that the violations are returned # in any particular order. actual_violations = quality.violations("path/to/file.java") assert len(actual_violations) == len(expected_violations) for expected in expected_violations: assert expected in actual_violations class TestPmdXmlQualityReporterTest: @pytest.fixture(autouse=True) def setup(self, mocker): # Paths generated by git_path are always the given argument _git_path_mock = mocker.patch( "diff_cover.violationsreporters.java_violations_reporter.GitPathTool" ) _git_path_mock.relative_path = lambda path: path _git_path_mock.absolute_path = lambda path: path def test_no_such_file(self): quality = QualityReporter(PmdXmlDriver()) # Expect that we get no results result = quality.violations("") assert result == [] def test_no_java_file(self): quality = QualityReporter(PmdXmlDriver()) file_paths = ["file1.coffee", "subdir/file2.js"] # Expect that we get no results because no Java files for path in file_paths: result = quality.violations(path) assert result == [] def test_quality_pregenerated_report(self): # When the user provides us with a pre-generated findbugs report # then use that instead of calling findbugs directly. pmd_reports = [ BytesIO( dedent( """ must have @author comment interface method must include javadoc comment """ ) .strip() .encode("utf-8") ) ] pmd_xml_driver = PmdXmlDriver() # Generate the violation report quality = QualityReporter(pmd_xml_driver, reports=pmd_reports) # Expect that pmd is not installed assert not pmd_xml_driver.installed() # Expect that we get the right violations expected_violations = [ Violation(21, "ClassMustHaveAuthorRule: must have @author comment"), Violation( 10, "AbstractMethodOrInterfaceMethodMustUseJavadocRule: interface method must include javadoc comment", ), ] # We're not guaranteed that the violations are returned # in any particular order. actual_violations = quality.violations("path/to/file.java") assert len(actual_violations) == len(expected_violations) for expected in expected_violations: assert expected in actual_violations diff_cover-7.4.0/tests/test_report_generator.py000066400000000000000000000432601436411411700217730ustar00rootroot00000000000000# pylint: disable=attribute-defined-outside-init,not-callable import copy import json from io import BytesIO from textwrap import dedent import pytest from diff_cover.diff_reporter import BaseDiffReporter from diff_cover.report_generator import ( BaseReportGenerator, HtmlReportGenerator, JsonReportGenerator, MarkdownReportGenerator, StringReportGenerator, TemplateReportGenerator, ) from diff_cover.violationsreporters.violations_reporter import ( BaseViolationReporter, Violation, ) from tests.helpers import load_fixture class SimpleReportGenerator(BaseReportGenerator): """Bare-bones concrete implementation of a report generator.""" def generate_report(self, output_file): pass class BaseReportGeneratorTest: """Base class for constructing test cases of report generators.""" # Test data, returned by default from the mocks SRC_PATHS = {"file1.py", "subdir/file2.py"} LINES = [2, 3, 4, 5, 10, 11, 12, 13, 14, 15] VIOLATIONS = [Violation(n, None) for n in (10, 11, 20)] MEASURED = [1, 2, 3, 4, 7, 10, 11, 15, 20, 30] XML_REPORT_NAME = ["reports/coverage.xml"] DIFF_REPORT_NAME = "main" # Subclasses override this to provide the class under test REPORT_GENERATOR_CLASS = None # Snippet returned by the mock SNIPPET_HTML = "
Snippet with \u1235 \u8292 unicode
" SNIPPET_MARKDOWN = "Lines 1-1\n\n```\nSnippet with \u1235 \u8292 unicode\n```" SNIPPET_STYLE = ".css { color:red }" SNIPPET_TERMINAL = SNIPPET_MARKDOWN @pytest.fixture(autouse=True) def base_setup(self, mocker): # Create mocks of the dependencies self.coverage = mocker.MagicMock( BaseViolationReporter, ) self.diff = mocker.MagicMock(BaseDiffReporter) # Patch snippet loading to always return the same string self._load_formatted_snippets = mocker.patch( "diff_cover.snippets.Snippet.load_formatted_snippets" ) self.set_num_snippets(0) # Patch snippet style style_defs = mocker.patch("diff_cover.snippets.Snippet.style_defs") style_defs.return_value = self.SNIPPET_STYLE # Set the names of the XML and diff reports self.coverage.name.return_value = self.XML_REPORT_NAME self.diff.name.return_value = self.DIFF_REPORT_NAME # Configure the mocks self.set_src_paths_changed([]) self._lines_dict = dict() self.diff.lines_changed.side_effect = self._lines_dict.get self._violations_dict = dict() self.coverage.violations.side_effect = self._violations_dict.get self.coverage.violations_batch.side_effect = NotImplementedError self._measured_dict = dict() self.coverage.measured_lines.side_effect = self._measured_dict.get # Create a concrete instance of a report generator self.report = self.REPORT_GENERATOR_CLASS(self.coverage, self.diff) def set_src_paths_changed(self, src_paths): """ Patch the dependency `src_paths_changed()` return value """ self.diff.src_paths_changed.return_value = src_paths def set_lines_changed(self, src_path, lines): """ Patch the dependency `lines_changed()` to return `lines` when called with argument `src_path`. """ self._lines_dict.update({src_path: lines}) def set_violations(self, src_path, violations): """ Patch the dependency `violations()` to return `violations` when called with argument `src_path`. """ self._violations_dict.update({src_path: violations}) def set_measured(self, src_path, measured): """ Patch the dependency `measured_lines()` return `measured` when called with argument `src_path`. """ self._measured_dict.update({src_path: measured}) def set_num_snippets(self, num_snippets): """ Patch the depdenency `Snippet.load_snippets_html()` to return `num_snippets` of the fake snippet HTML. """ self._load_formatted_snippets.return_value = { "html": num_snippets * [self.SNIPPET_HTML], "markdown": num_snippets * [self.SNIPPET_MARKDOWN], "terminal": num_snippets * [self.SNIPPET_TERMINAL], } def use_default_values(self): """ Configure the mocks to use default values provided by class constants. All source files are given the same line, violation, and measured information. """ self.set_src_paths_changed(self.SRC_PATHS) for src in self.SRC_PATHS: self.set_lines_changed(src, self.LINES) self.set_violations(src, self.VIOLATIONS) self.set_measured(src, self.MEASURED) self.set_num_snippets(0) def get_report(self): """ Generate a report and assert that it matches the string `expected`. """ # Create a buffer for the output output = BytesIO() # Generate the report self.report.generate_report(output) # Get the output output_str = output.getvalue() output.close() return output_str.decode("utf-8") def assert_report(self, expected): output_report_string = self.get_report() assert expected.strip() == output_report_string.strip() class TestSimpleReportGenerator(BaseReportGeneratorTest): REPORT_GENERATOR_CLASS = SimpleReportGenerator @pytest.fixture(autouse=True) def setup(self): self.use_default_values() def test_src_paths(self): assert self.report.src_paths() == self.SRC_PATHS def test_coverage_name(self): assert self.report.coverage_report_name() == self.XML_REPORT_NAME def test_diff_name(self): assert self.report.diff_report_name() == self.DIFF_REPORT_NAME def test_percent_covered(self): # Check that we get the expected coverage percentages # By construction, both files have the same diff line # and coverage information # There are 6 lines that are both in the diff and measured, # and 4 of those are covered. for src_path in self.SRC_PATHS: assert self.report.percent_covered(src_path) == pytest.approx(4.0 / 6 * 100) def test_violation_lines(self): # By construction, each file has the same coverage information expected = [10, 11] for src_path in self.SRC_PATHS: assert self.report.violation_lines(src_path) == expected def test_src_with_no_info(self): assert "unknown.py" not in self.report.src_paths() assert self.report.percent_covered("unknown.py") is None assert self.report.violation_lines("unknown.py") == [] def test_src_paths_not_measured(self): # Configure one of the source files to have no coverage info self.set_measured("file1.py", []) self.set_violations("file1.py", []) # Expect that we treat the file like it doesn't exist assert "file1.py" not in self.report.src_paths() assert self.report.percent_covered("file1.py") is None assert self.report.violation_lines("file1.py") == [] def test_total_num_lines(self): # By construction, each source file has the same coverage info num_lines_in_file = len(set(self.MEASURED).intersection(self.LINES)) expected = len(self.SRC_PATHS) * num_lines_in_file assert self.report.total_num_lines() == expected def test_total_num_missing(self): # By construction, each source file has the same coverage info, # in which 3 lines are uncovered, 2 of which are changed expected = len(self.SRC_PATHS) * 2 assert self.report.total_num_violations() == expected def test_total_percent_covered(self): # Since each file has the same coverage info, # the total percent covered is the same as each file # individually. assert self.report.total_percent_covered() == 66 class TestTemplateReportGenerator(BaseReportGeneratorTest): REPORT_GENERATOR_CLASS = TemplateReportGenerator def _test_input_expected_output(self, input_with_expected_output): for test_input, expected_output in input_with_expected_output: assert expected_output == TemplateReportGenerator.combine_adjacent_lines( test_input ) def test_combine_adjacent_lines_no_adjacent(self): in_out = [([1, 3], ["1", "3"]), ([1, 5, 7, 10], ["1", "5", "7", "10"])] self._test_input_expected_output(in_out) def test_combine_adjacent_lines(self): in_out = [ ([1, 2, 3, 4, 5, 8, 10, 12, 13, 14, 15], ["1-5", "8", "10", "12-15"]), ([1, 4, 5, 6, 10], ["1", "4-6", "10"]), ([402, 403], ["402-403"]), ] self._test_input_expected_output(in_out) def test_empty_list(self): assert [] == TemplateReportGenerator.combine_adjacent_lines([]) def test_one_number(self): assert ["1"] == TemplateReportGenerator.combine_adjacent_lines([1]) class TestJsonReportGenerator(BaseReportGeneratorTest): REPORT_GENERATOR_CLASS = JsonReportGenerator def assert_report(self, expected): output_report_string = self.get_report() assert json.loads(expected) == json.loads(output_report_string) def test_generate_report(self): # Generate a default report self.use_default_values() # Verify that we got the expected string expected = json.dumps( { "report_name": ["reports/coverage.xml"], "diff_name": "main", "src_stats": { "file1.py": { "percent_covered": 66.66666666666667, "violation_lines": [10, 11], "violations": [[10, None], [11, None]], }, "subdir/file2.py": { "percent_covered": 66.66666666666667, "violation_lines": [10, 11], "violations": [[10, None], [11, None]], }, }, "total_num_lines": 12, "total_num_violations": 4, "total_percent_covered": 66, "num_changed_lines": len(self.SRC_PATHS) * len(self.LINES), } ) self.assert_report(expected) def test_hundred_percent(self): # Have the dependencies return an empty report self.set_src_paths_changed(["file.py"]) self.set_lines_changed("file.py", list(range(0, 100))) self.set_violations("file.py", []) self.set_measured("file.py", [2]) expected = json.dumps( { "report_name": ["reports/coverage.xml"], "diff_name": "main", "src_stats": { "file.py": { "percent_covered": 100.0, "violation_lines": [], "violations": [], } }, "total_num_lines": 1, "total_num_violations": 0, "total_percent_covered": 100, "num_changed_lines": 100, } ) self.assert_report(expected) def test_empty_report(self): # Have the dependencies return an empty report # (this is the default) expected = json.dumps( { "report_name": ["reports/coverage.xml"], "diff_name": "main", "src_stats": {}, "total_num_lines": 0, "total_num_violations": 0, "total_percent_covered": 100, "num_changed_lines": 0, } ) self.assert_report(expected) class TestStringReportGenerator(BaseReportGeneratorTest): REPORT_GENERATOR_CLASS = StringReportGenerator def test_generate_report(self): # Generate a default report self.use_default_values() # Verify that we got the expected string expected = dedent( """ ------------- Diff Coverage Diff: main ------------- file1.py (66.7%): Missing lines 10-11 subdir/file2.py (66.7%): Missing lines 10-11 ------------- Total: 12 lines Missing: 4 lines Coverage: 66% ------------- """ ).strip() self.assert_report(expected) def test_hundred_percent(self): # Have the dependencies return an empty report self.set_src_paths_changed(["file.py"]) self.set_lines_changed("file.py", list(range(0, 100))) self.set_violations("file.py", []) self.set_measured("file.py", [2]) expected = dedent( """ ------------- Diff Coverage Diff: main ------------- file.py (100%) ------------- Total: 1 line Missing: 0 lines Coverage: 100% ------------- """ ).strip() self.assert_report(expected) def test_empty_report(self): # Have the dependencies return an empty report # (this is the default) expected = dedent( """ ------------- Diff Coverage Diff: main ------------- No lines with coverage information in this diff. ------------- """ ).strip() self.assert_report(expected) class TestHtmlReportGenerator(BaseReportGeneratorTest): REPORT_GENERATOR_CLASS = HtmlReportGenerator def test_generate_report(self): self.use_default_values() expected = load_fixture("html_report.html") self.assert_report(expected) def test_empty_report(self): # Have the dependencies return an empty report # (this is the default) # Verify that we got the expected string expected = load_fixture("html_report_empty.html") self.assert_report(expected) def test_one_snippet(self): self.use_default_values() # Have the snippet loader always report # provide one snippet (for every source file) self.set_num_snippets(1) # Verify that we got the expected string expected = load_fixture("html_report_one_snippet.html").strip() self.assert_report(expected) def test_multiple_snippets(self): self.use_default_values() # Have the snippet loader always report # multiple snippets for each source file self.set_num_snippets(2) # Verify that we got the expected string expected = load_fixture("html_report_two_snippets.html").strip() self.assert_report(expected) class TestMarkdownReportGenerator(BaseReportGeneratorTest): REPORT_GENERATOR_CLASS = MarkdownReportGenerator def test_generate_report(self): # Generate a default report self.use_default_values() # Verify that we got the expected string expected = dedent( """ # Diff Coverage ## Diff: main - file1.py (66.7%): Missing lines 10-11 - subdir/file2.py (66.7%): Missing lines 10-11 ## Summary - **Total**: 12 lines - **Missing**: 4 lines - **Coverage**: 66% """ ).strip() self.assert_report(expected) def test_hundred_percent(self): # Have the dependencies return an empty report self.set_src_paths_changed(["file.py"]) self.set_lines_changed("file.py", list(range(0, 100))) self.set_violations("file.py", []) self.set_measured("file.py", [2]) expected = dedent( """ # Diff Coverage ## Diff: main - file.py (100%) ## Summary - **Total**: 1 line - **Missing**: 0 lines - **Coverage**: 100% """ ).strip() self.assert_report(expected) def test_empty_report(self): # Have the dependencies return an empty report # (this is the default) expected = dedent( """ # Diff Coverage ## Diff: main No lines with coverage information in this diff. """ ).strip() self.assert_report(expected) def test_one_snippet(self): self.use_default_values() # Have the snippet loader always report # provide one snippet (for every source file) self.set_num_snippets(1) # Verify that we got the expected string expected = load_fixture("markdown_report_one_snippet.md").strip() self.assert_report(expected) def test_multiple_snippets(self): self.use_default_values() # Have the snippet loader always report # multiple snippets for each source file self.set_num_snippets(2) # Verify that we got the expected string expected = load_fixture("markdown_report_two_snippets.md").strip() self.assert_report(expected) class TestSimpleReportGeneratorWithBatchViolationReporter(BaseReportGeneratorTest): REPORT_GENERATOR_CLASS = SimpleReportGenerator @pytest.fixture(autouse=True) def setup(self): self.use_default_values() # Have violations_batch() return the violations. self.coverage.violations_batch.side_effect = None self.coverage.violations_batch.return_value = copy.deepcopy( self._violations_dict ) # Have violations() return an empty list to ensure violations_batch() # is used. for src in self.SRC_PATHS: self.set_violations(src, []) def test_violation_lines(self): # By construction, each file has the same coverage information expected = [10, 11] for src_path in self.SRC_PATHS: assert self.report.violation_lines(src_path) == expected diff_cover-7.4.0/tests/test_snippets.py000066400000000000000000000204161436411411700202550ustar00rootroot00000000000000# pylint: disable=missing-function-docstring """Test for diff_cover.snippets""" import os import pytest from pygments.token import Token from diff_cover.git_path import GitPathTool from diff_cover.snippets import Snippet from tests.helpers import fixture_path, load_fixture SRC_TOKENS = [ (Token.Comment, "# Test source"), (Token.Text, "\n"), (Token.Keyword, "def"), (Token.Text, " "), (Token.Name.Function, "test_func"), (Token.Punctuation, "("), (Token.Name, "arg"), (Token.Punctuation, ")"), (Token.Punctuation, ":"), (Token.Text, "\n"), (Token.Text, " "), (Token.Keyword, "print"), (Token.Text, " "), (Token.Name, "arg"), (Token.Text, "\n"), (Token.Text, " "), (Token.Keyword, "return"), (Token.Text, " "), (Token.Name, "arg"), (Token.Text, " "), (Token.Operator, "+"), (Token.Text, " "), (Token.Literal.Number.Integer, "5"), (Token.Text, "\n"), ] def _assert_line_range(src_path, violation_lines, expected_ranges): """ Assert that the snippets loaded using `violation_lines` have the correct ranges of lines. `violation_lines` is a list of line numbers containing violations (which should get included in snippets). `expected_ranges` is a list of `(start, end)` tuples representing the starting and ending lines expected in a snippet. Line numbers start at 1. """ # Load snippets from the source file snippet_list = Snippet.load_snippets(src_path, violation_lines) # Check that we got the right number of snippets assert len(snippet_list) == len(expected_ranges) # Check that the snippets have the desired ranges for snippet, line_range in zip(snippet_list, expected_ranges): # Expect that the line range is correct assert snippet.line_range() == line_range # Expect that the source contents are correct start, end = line_range assert snippet.text() == _src_lines(start, end) def _src_lines(start_line, end_line): """ Test lines to write to the source file (Line 1, Line 2, ...). """ return "\n".join( [f"Line {line_num}" for line_num in range(start_line, end_line + 1)] ) def _assert_format( src_tokens, src_filename, start_line, last_line, violation_lines, expected_fixture, ): snippet = Snippet( src_tokens, src_filename, start_line, last_line, violation_lines, None ) result = snippet.html() expected_str = load_fixture(expected_fixture, encoding="utf-8") assert expected_str.strip() == result.strip() assert isinstance(result, str) def _compare_snippets_output(format_, filename, violations, expected_out_filename): # One higher-level test to make sure # the snippets are being rendered correctly formatted_snippets = Snippet.load_formatted_snippets(filename, violations) snippets_selected = "\n\n".join(formatted_snippets[format_]) # Load the fixture for the expected contents expected_path = fixture_path(expected_out_filename) with open(expected_path, encoding="utf-8") as fixture_file: expected = fixture_file.read() if isinstance(expected, bytes): expected = expected.decode("utf-8") # Check that we got what we expected assert expected.strip() == snippets_selected.strip() @pytest.fixture def tmpfile(tmp_path): """ Write to the temporary file "Line 1", "Line 2", etc. up to `num_src_lines`. """ def _inner(num_src_lines): file = tmp_path / "src" file.write_text(_src_lines(1, num_src_lines)) return str(file.resolve()) return _inner @pytest.fixture(autouse=True) def patch_path_tool(mocker): # Path tool should not be aware of testing command mocker.patch.object(GitPathTool, "absolute_path", lambda path: path) mocker.patch.object(GitPathTool, "relative_path", lambda path: path) @pytest.fixture def switch_to_fixture_dir(request): # Need to be in the fixture directory # so the source path is displayed correctly os.chdir(fixture_path("")) yield os.chdir(request.config.invocation_dir) def test_style_defs(): style_str = Snippet.style_defs() expected_styles = load_fixture("snippet.css").strip() # Check that a sample of the styles are present # (use only a sample to make the test more robust # against Pygments changes). for expect_line in expected_styles.split("\n"): assert expect_line in style_str def test_format(): _assert_format(SRC_TOKENS, "test.py", 4, 6, [4, 6], "snippet_default.html") def test_format_with_invalid_start_line(): for start_line in [-2, -1, 0]: with pytest.raises(ValueError): Snippet("# test", "test.py", start_line, start_line + 1, [], None) def test_format_with_invalid_violation_lines(): # Violation lines outside the range of lines in the file # should be ignored. _assert_format( SRC_TOKENS, "test.py", 1, 2, [-1, 0, 5, 6], "snippet_invalid_violations.html", ) def test_no_filename_ext(): # No filename extension: should default to text lexer _assert_format(SRC_TOKENS, "test", 4, 6, [4, 6], "snippet_no_filename_ext.html") def test_unicode(): unicode_src = [(Token.Text, "var = \u0123 \u5872 \u3389")] _assert_format(unicode_src, "test.py", 1, 2, [], "snippet_unicode.html") def test_one_snippet(tmpfile): src_path = tmpfile(10) violations = [2, 3, 4, 5] expected_ranges = [(1, 9)] _assert_line_range(src_path, violations, expected_ranges) def test_multiple_snippets(tmpfile): src_path = tmpfile(100) violations = [30, 31, 32, 35, 36, 60, 62] expected_ranges = [(26, 40), (56, 66)] _assert_line_range(src_path, violations, expected_ranges) def test_no_lead_line(tmpfile): src_path = tmpfile(10) violations = [1, 2, 3] expected_ranges = [(1, 7)] _assert_line_range(src_path, violations, expected_ranges) def test_no_lag_line(tmpfile): src_path = tmpfile(10) violations = [9, 10] expected_ranges = [(5, 10)] _assert_line_range(src_path, violations, expected_ranges) def test_one_line_file(tmpfile): src_path = tmpfile(1) violations = [1] expected_ranges = [(1, 1)] _assert_line_range(src_path, violations, expected_ranges) def test_empty_file(tmpfile): src_path = tmpfile(0) violations = [0] expected_ranges = [] _assert_line_range(src_path, violations, expected_ranges) def test_no_violations(tmpfile): src_path = tmpfile(10) violations = [] expected_ranges = [] _assert_line_range(src_path, violations, expected_ranges) def test_end_range_on_violation(tmpfile): src_path = tmpfile(40) # With context, the range for the snippet at 28 is 33 # Expect that the snippet expands to include the violation at the border violations = [28, 33] expected_ranges = [(24, 37)] _assert_line_range(src_path, violations, expected_ranges) @pytest.mark.usefixtures("switch_to_fixture_dir") def test_load_snippets_html(): _compare_snippets_output( "html", "snippet_src.py", [10, 12, 13, 50, 51, 54, 55, 57], "snippet_list.html", ) @pytest.mark.usefixtures("switch_to_fixture_dir") def test_load_snippets_markdown(): _compare_snippets_output( "markdown", "snippet_src.py", [10, 12, 13, 50, 51, 54, 55, 57], "snippet_list.md", ) _compare_snippets_output( "markdown", "snippet_src2.cpp", [4, 5], "snippet_list2.md", ) _compare_snippets_output( "markdown", "snippet_src3.cpp", [12], "snippet_list3.md", ) @pytest.mark.usefixtures("switch_to_fixture_dir") def test_load_utf8_snippets(): _compare_snippets_output( "html", "snippet_unicode.py", [10, 12, 13, 50, 51, 54, 55, 57], "snippet_unicode_html_output.html", ) @pytest.mark.usefixtures("switch_to_fixture_dir") def test_load_declared_arabic(): _compare_snippets_output( "html", "snippet_8859.py", [7], "snippet_arabic_output.html" ) def test_latin_one_undeclared(tmp_path): file = tmp_path / "tmp" file.write_bytes("I am some latin 1 Â encoded text".encode("latin1")) contents = Snippet.load_contents(str(file)) assert contents == "I am some latin 1 Â encoded text" diff_cover-7.4.0/tests/test_util.py000066400000000000000000000005441436411411700173650ustar00rootroot00000000000000import sys from diff_cover import util def test_to_unix_path(): """ Ensure the _to_unix_path static function handles paths properly. """ assert util.to_unix_path("foo/bar") == "foo/bar" assert util.to_unix_path("foo\\bar") == "foo/bar" if sys.platform.startswith("win"): assert util.to_unix_path("FOO\\bar") == "foo/bar" diff_cover-7.4.0/tests/test_violations_reporter.py000066400000000000000000001737171436411411700225360ustar00rootroot00000000000000# pylint: disable=missing-function-docstring,line-too-long # pylint: disable=too-many-lines,attribute-defined-outside-init """Test for diff_cover.violations_reporter""" import os import subprocess from io import BytesIO, StringIO try: # Needed for Python < 3.3, works up to 3.8 import xml.etree.cElementTree as etree except ImportError: # Python 3.9 onwards import xml.etree.ElementTree as etree from subprocess import Popen from textwrap import dedent import pytest from diff_cover.command_runner import CommandError, run_command_for_code from diff_cover.violationsreporters import base from diff_cover.violationsreporters.base import QualityReporter from diff_cover.violationsreporters.violations_reporter import ( CppcheckDriver, EslintDriver, PylintDriver, Violation, XmlCoverageReporter, flake8_driver, jshint_driver, pycodestyle_driver, pydocstyle_driver, pyflakes_driver, ) @pytest.fixture(autouse=True) def patch_so_all_files_exist(mocker, request): if "disable_all_files_exist" in request.keywords: return mock = mocker.patch.object(base.os.path, "exists") mock.returnvalue = True @pytest.fixture def process_patcher(mocker): def _inner(return_value, status_code=0): mocked_process = mocker.Mock() mocked_process.returncode = status_code mocked_process.communicate.return_value = return_value mocked_subprocess = mocker.patch("diff_cover.command_runner.subprocess") mocked_subprocess.Popen.return_value = mocked_process return mocked_process return _inner class TestXmlCoverageReporterTest: MANY_VIOLATIONS = { Violation(3, None), Violation(7, None), Violation(11, None), Violation(13, None), } FEW_MEASURED = {2, 3, 5, 7, 11, 13} FEW_VIOLATIONS = {Violation(3, None), Violation(11, None)} MANY_MEASURED = {2, 3, 5, 7, 11, 13, 17} ONE_VIOLATION = {Violation(11, None)} VERY_MANY_MEASURED = {2, 3, 5, 7, 11, 13, 17, 23, 24, 25, 26, 26, 27} @pytest.fixture(autouse=True) def patch_git_patch(self, mocker): # Paths generated by git_path are always the given argument _git_path_mock = mocker.patch( "diff_cover.violationsreporters.violations_reporter.GitPathTool" ) _git_path_mock.relative_path = lambda path: path _git_path_mock.absolute_path = lambda path: path def test_violations(self): # Construct the XML report file_paths = ["file1.py", "subdir/file2.py"] violations = self.MANY_VIOLATIONS measured = self.FEW_MEASURED xml = self._coverage_xml(file_paths, violations, measured) # Parse the report coverage = XmlCoverageReporter(xml) # Expect that the name is set assert coverage.name() == "XML" # By construction, each file has the same set # of covered/uncovered lines assert violations == coverage.violations("file1.py") assert measured == coverage.measured_lines("file1.py") # Try getting a smaller range result = coverage.violations("subdir/file2.py") assert result == violations # Once more on the first file (for caching) result = coverage.violations("file1.py") assert result == violations def test_non_python_violations(self): """ Non python projects often just have a file name specified while the full path can be acquired from a sources tag in the XML. This test checks that flow by requesting violation info from a path that can only be constructed by using the path provided in the sources tag """ fancy_path = "superFancyPath" file_paths = ["file1.java"] source_paths = [fancy_path] violations = self.MANY_VIOLATIONS measured = self.FEW_MEASURED xml = self._coverage_xml( file_paths, violations, measured, source_paths=source_paths ) coverage = XmlCoverageReporter([xml]) assert violations == coverage.violations( "{}/{}".format(fancy_path, file_paths[0]) ) assert measured == coverage.measured_lines( "{}/{}".format(fancy_path, file_paths[0]) ) def test_non_python_violations_empty_path(self): """ In the wild empty sources can happen. See https://github.com/Bachmann1234/diff-cover/issues/88 Best I can tell its mostly irrelevant but I mostly don't want it crashing """ xml = etree.fromstring( """ """ ) coverage = XmlCoverageReporter([xml]) assert set() == coverage.violations("") assert set() == coverage.measured_lines("") def test_two_inputs_first_violate(self): # Construct the XML report file_paths = ["file1.py"] violations1 = self.MANY_VIOLATIONS violations2 = self.FEW_VIOLATIONS measured1 = self.FEW_MEASURED measured2 = self.MANY_MEASURED xml = self._coverage_xml(file_paths, violations1, measured1) xml2 = self._coverage_xml(file_paths, violations2, measured2) # Parse the report coverage = XmlCoverageReporter([xml, xml2]) # By construction, each file has the same set # of covered/uncovered lines assert violations1 & violations2 == coverage.violations("file1.py") assert measured1 | measured2 == coverage.measured_lines("file1.py") def test_two_inputs_second_violate(self): # Construct the XML report file_paths = ["file1.py"] violations1 = self.MANY_VIOLATIONS violations2 = self.FEW_VIOLATIONS measured1 = self.FEW_MEASURED measured2 = self.MANY_MEASURED xml = self._coverage_xml(file_paths, violations1, measured1) xml2 = self._coverage_xml(file_paths, violations2, measured2) # Parse the report coverage = XmlCoverageReporter([xml2, xml]) # By construction, each file has the same set # of covered/uncovered lines assert violations1 & violations2 == coverage.violations("file1.py") assert measured1 | measured2 == coverage.measured_lines("file1.py") def test_three_inputs(self): # Construct the XML report file_paths = ["file1.py"] violations1 = self.MANY_VIOLATIONS violations2 = self.FEW_VIOLATIONS violations3 = self.ONE_VIOLATION measured1 = self.FEW_MEASURED measured2 = self.MANY_MEASURED measured3 = self.VERY_MANY_MEASURED xml = self._coverage_xml(file_paths, violations1, measured1) xml2 = self._coverage_xml(file_paths, violations2, measured2) xml3 = self._coverage_xml(file_paths, violations3, measured3) # Parse the report coverage = XmlCoverageReporter([xml2, xml, xml3]) # By construction, each file has the same set # of covered/uncovered lines assert violations1 & violations2 & violations3 == coverage.violations( "file1.py" ) assert measured1 | measured2 | measured3 == coverage.measured_lines("file1.py") def test_different_files_in_inputs(self): # Construct the XML report xml_roots = [ self._coverage_xml(["file.py"], self.MANY_VIOLATIONS, self.FEW_MEASURED), self._coverage_xml( ["other_file.py"], self.FEW_VIOLATIONS, self.MANY_MEASURED ), ] # Parse the report coverage = XmlCoverageReporter(xml_roots) assert self.MANY_VIOLATIONS == coverage.violations("file.py") assert self.FEW_VIOLATIONS == coverage.violations("other_file.py") def test_empty_violations(self): """ Test that an empty violations report is handled properly """ # Construct the XML report file_paths = ["file1.py"] violations1 = self.MANY_VIOLATIONS violations2 = set() measured1 = self.FEW_MEASURED measured2 = self.MANY_MEASURED xml = self._coverage_xml(file_paths, violations1, measured1) xml2 = self._coverage_xml(file_paths, violations2, measured2) # Parse the report coverage = XmlCoverageReporter([xml2, xml]) # By construction, each file has the same set # of covered/uncovered lines assert violations1 & violations2 == coverage.violations("file1.py") assert measured1 | measured2 == coverage.measured_lines("file1.py") def test_no_such_file(self): # Construct the XML report with no source files xml = self._coverage_xml([], [], []) # Parse the report coverage = XmlCoverageReporter(xml) # Expect that we get no results result = coverage.violations("file.py") assert result == set() def _coverage_xml(self, file_paths, violations, measured, source_paths=None): """ Build an XML tree with source files specified by `file_paths`. Each source fill will have the same set of covered and uncovered lines. `file_paths` is a list of path strings `line_dict` is a dictionary with keys that are line numbers and values that are True/False indicating whether the line is covered This leaves out some attributes of the Cobertura format, but includes all the elements. """ root = etree.Element("coverage") if source_paths: sources = etree.SubElement(root, "sources") for path in source_paths: source = etree.SubElement(sources, "source") source.text = path packages = etree.SubElement(root, "packages") classes = etree.SubElement(packages, "classes") violation_lines = {violation.line for violation in violations} for path in file_paths: src_node = etree.SubElement(classes, "class") src_node.set("filename", path) etree.SubElement(src_node, "methods") lines_node = etree.SubElement(src_node, "lines") # Create a node for each line in measured for line_num in measured: is_covered = line_num not in violation_lines line = etree.SubElement(lines_node, "line") hits = 1 if is_covered else 0 line.set("hits", str(hits)) line.set("number", str(line_num)) return root class TestCloverXmlCoverageReporterTest: MANY_VIOLATIONS = { Violation(3, None), Violation(7, None), Violation(11, None), Violation(13, None), } FEW_MEASURED = {2, 3, 5, 7, 11, 13} FEW_VIOLATIONS = {Violation(3, None), Violation(11, None)} MANY_MEASURED = {2, 3, 5, 7, 11, 13, 17} ONE_VIOLATION = {Violation(11, None)} VERY_MANY_MEASURED = {2, 3, 5, 7, 11, 13, 17, 23, 24, 25, 26, 26, 27} @pytest.fixture(autouse=True) def patch_git_patch(self, mocker): # Paths generated by git_path are always the given argument _git_path_mock = mocker.patch( "diff_cover.violationsreporters.violations_reporter.GitPathTool" ) _git_path_mock.relative_path = lambda path: path _git_path_mock.absolute_path = lambda path: path def test_violations(self): # Construct the XML report file_paths = ["file1.java", "subdir/file2.java"] violations = self.MANY_VIOLATIONS measured = self.FEW_MEASURED xml = self._coverage_xml(file_paths, violations, measured) # Parse the report coverage = XmlCoverageReporter([xml]) # Expect that the name is set assert coverage.name() == "XML" # By construction, each file has the same set # of covered/uncovered lines assert violations == coverage.violations("file1.java") assert measured == coverage.measured_lines("file1.java") # Try getting a smaller range result = coverage.violations("subdir/file2.java") assert result == violations # Once more on the first file (for caching) result = coverage.violations("file1.java") assert result == violations def test_two_inputs_first_violate(self): # Construct the XML report file_paths = ["file1.java"] violations1 = self.MANY_VIOLATIONS violations2 = self.FEW_VIOLATIONS measured1 = self.FEW_MEASURED measured2 = self.MANY_MEASURED xml = self._coverage_xml(file_paths, violations1, measured1) xml2 = self._coverage_xml(file_paths, violations2, measured2) # Parse the report coverage = XmlCoverageReporter([xml, xml2]) # By construction, each file has the same set # of covered/uncovered lines assert violations1 & violations2 == coverage.violations("file1.java") assert measured1 | measured2 == coverage.measured_lines("file1.java") def test_two_inputs_second_violate(self): # Construct the XML report file_paths = ["file1.java"] violations1 = self.MANY_VIOLATIONS violations2 = self.FEW_VIOLATIONS measured1 = self.FEW_MEASURED measured2 = self.MANY_MEASURED xml = self._coverage_xml(file_paths, violations1, measured1) xml2 = self._coverage_xml(file_paths, violations2, measured2) # Parse the report coverage = XmlCoverageReporter([xml2, xml]) # By construction, each file has the same set # of covered/uncovered lines assert violations1 & violations2 == coverage.violations("file1.java") assert measured1 | measured2 == coverage.measured_lines("file1.java") def test_three_inputs(self): # Construct the XML report file_paths = ["file1.java"] violations1 = self.MANY_VIOLATIONS violations2 = self.FEW_VIOLATIONS violations3 = self.ONE_VIOLATION measured1 = self.FEW_MEASURED measured2 = self.MANY_MEASURED measured3 = self.VERY_MANY_MEASURED xml = self._coverage_xml(file_paths, violations1, measured1) xml2 = self._coverage_xml(file_paths, violations2, measured2) xml3 = self._coverage_xml(file_paths, violations3, measured3) # Parse the report coverage = XmlCoverageReporter([xml2, xml, xml3]) # By construction, each file has the same set # of covered/uncovered lines assert violations1 & violations2 & violations3 == coverage.violations( "file1.java" ) assert measured1 | measured2 | measured3 == coverage.measured_lines( "file1.java" ) def test_different_files_in_inputs(self): # Construct the XML report xml_roots = [ self._coverage_xml(["file.java"], self.MANY_VIOLATIONS, self.FEW_MEASURED), self._coverage_xml( ["other_file.java"], self.FEW_VIOLATIONS, self.MANY_MEASURED ), ] # Parse the report coverage = XmlCoverageReporter(xml_roots) assert self.MANY_VIOLATIONS == coverage.violations("file.java") assert self.FEW_VIOLATIONS == coverage.violations("other_file.java") def test_empty_violations(self): """ Test that an empty violations report is handled properly """ # Construct the XML report file_paths = ["file1.java"] violations1 = self.MANY_VIOLATIONS violations2 = set() measured1 = self.FEW_MEASURED measured2 = self.MANY_MEASURED xml = self._coverage_xml(file_paths, violations1, measured1) xml2 = self._coverage_xml(file_paths, violations2, measured2) # Parse the report coverage = XmlCoverageReporter([xml2, xml]) # By construction, each file has the same set # of covered/uncovered lines assert violations1 & violations2 == coverage.violations("file1.java") assert measured1 | measured2 == coverage.measured_lines("file1.java") def test_no_such_file(self): # Construct the XML report with no source files xml = self._coverage_xml([], [], []) # Parse the report coverage = XmlCoverageReporter(xml) # Expect that we get no results result = coverage.violations("file.java") assert result == set() def _coverage_xml(self, file_paths, violations, measured): """ Build an XML tree with source files specified by `file_paths`. Each source fill will have the same set of covered and uncovered lines. `file_paths` is a list of path strings `line_dict` is a dictionary with keys that are line numbers and values that are True/False indicating whether the line is covered This leaves out some attributes of the Cobertura format, but includes all the elements. """ root = etree.Element("coverage") root.set("clover", "4.2.0") project = etree.SubElement(root, "project") package = etree.SubElement(project, "package") violation_lines = {violation.line for violation in violations} for path in file_paths: src_node = etree.SubElement(package, "file") src_node.set("path", path) # Create a node for each line in measured for line_num in measured: is_covered = line_num not in violation_lines line = etree.SubElement(src_node, "line") hits = 1 if is_covered else 0 line.set("count", str(hits)) line.set("num", str(line_num)) line.set("type", "stmt") return root class TestJacocoXmlCoverageReporterTest: MANY_VIOLATIONS = { Violation(3, None), Violation(7, None), Violation(11, None), Violation(13, None), } FEW_MEASURED = {2, 3, 5, 7, 11, 13} FEW_VIOLATIONS = {Violation(3, None), Violation(11, None)} MANY_MEASURED = {2, 3, 5, 7, 11, 13, 17} ONE_VIOLATION = {Violation(11, None)} VERY_MANY_MEASURED = {2, 3, 5, 7, 11, 13, 17, 23, 24, 25, 26, 26, 27} @pytest.fixture(autouse=True) def patch_git_patch(self, mocker): # Paths generated by git_path are always the given argument _git_path_mock = mocker.patch( "diff_cover.violationsreporters.violations_reporter.GitPathTool" ) _git_path_mock.relative_path = lambda path: path _git_path_mock.absolute_path = lambda path: path def test_violations(self): # Construct the XML report file_paths = ["file1.java", "subdir/file2.java"] violations = self.MANY_VIOLATIONS measured = self.FEW_MEASURED xml = self._coverage_xml(file_paths, violations, measured) # Parse the report coverage = XmlCoverageReporter([xml]) # Expect that the name is set assert coverage.name() == "XML" # By construction, each file has the same set # of covered/uncovered lines assert violations == coverage.violations("file1.java") assert measured == coverage.measured_lines("file1.java") # Try getting a smaller range result = coverage.violations("subdir/file2.java") assert result == violations # Once more on the first file (for caching) result = coverage.violations("file1.java") assert result == violations def test_two_inputs_first_violate(self): # Construct the XML report file_paths = ["file1.java"] violations1 = self.MANY_VIOLATIONS violations2 = self.FEW_VIOLATIONS measured1 = self.FEW_MEASURED measured2 = self.MANY_MEASURED xml = self._coverage_xml(file_paths, violations1, measured1) xml2 = self._coverage_xml(file_paths, violations2, measured2) # Parse the report coverage = XmlCoverageReporter([xml, xml2]) # By construction, each file has the same set # of covered/uncovered lines assert violations1 & violations2 == coverage.violations("file1.java") assert measured1 | measured2 == coverage.measured_lines("file1.java") def test_two_inputs_second_violate(self): # Construct the XML report file_paths = ["file1.java"] violations1 = self.MANY_VIOLATIONS violations2 = self.FEW_VIOLATIONS measured1 = self.FEW_MEASURED measured2 = self.MANY_MEASURED xml = self._coverage_xml(file_paths, violations1, measured1) xml2 = self._coverage_xml(file_paths, violations2, measured2) # Parse the report coverage = XmlCoverageReporter([xml2, xml]) # By construction, each file has the same set # of covered/uncovered lines assert violations1 & violations2 == coverage.violations("file1.java") assert measured1 | measured2 == coverage.measured_lines("file1.java") def test_three_inputs(self): # Construct the XML report file_paths = ["file1.java"] violations1 = self.MANY_VIOLATIONS violations2 = self.FEW_VIOLATIONS violations3 = self.ONE_VIOLATION measured1 = self.FEW_MEASURED measured2 = self.MANY_MEASURED measured3 = self.VERY_MANY_MEASURED xml = self._coverage_xml(file_paths, violations1, measured1) xml2 = self._coverage_xml(file_paths, violations2, measured2) xml3 = self._coverage_xml(file_paths, violations3, measured3) # Parse the report coverage = XmlCoverageReporter([xml2, xml, xml3]) # By construction, each file has the same set # of covered/uncovered lines assert violations1 & violations2 & violations3 == coverage.violations( "file1.java" ) assert measured1 | measured2 | measured3 == coverage.measured_lines( "file1.java" ) def test_different_files_in_inputs(self): # Construct the XML report xml_roots = [ self._coverage_xml(["file.java"], self.MANY_VIOLATIONS, self.FEW_MEASURED), self._coverage_xml( ["other_file.java"], self.FEW_VIOLATIONS, self.MANY_MEASURED ), ] # Parse the report coverage = XmlCoverageReporter(xml_roots) assert self.MANY_VIOLATIONS == coverage.violations("file.java") assert self.FEW_VIOLATIONS == coverage.violations("other_file.java") def test_empty_violations(self): """ Test that an empty violations report is handled properly """ # Construct the XML report file_paths = ["file1.java"] violations1 = self.MANY_VIOLATIONS violations2 = set() measured1 = self.FEW_MEASURED measured2 = self.MANY_MEASURED xml = self._coverage_xml(file_paths, violations1, measured1) xml2 = self._coverage_xml(file_paths, violations2, measured2) # Parse the report coverage = XmlCoverageReporter([xml2, xml]) # By construction, each file has the same set # of covered/uncovered lines assert violations1 & violations2 == coverage.violations("file1.java") assert measured1 | measured2 == coverage.measured_lines("file1.java") def test_no_such_file(self): # Construct the XML report with no source files xml = self._coverage_xml([], [], []) # Parse the report coverage = XmlCoverageReporter(xml) # Expect that we get no results result = coverage.violations("file.java") assert result == set() def _coverage_xml(self, file_paths, violations, measured): """ Build an XML tree with source files specified by `file_paths`. Each source fill will have the same set of covered and uncovered lines. `file_paths` is a list of path strings `line_dict` is a dictionary with keys that are line numbers and values that are True/False indicating whether the line is covered This leaves out some attributes of the Cobertura format, but includes all the elements. """ root = etree.Element("report") root.set("name", "diff-cover") sessioninfo = etree.SubElement(root, "sessioninfo") sessioninfo.set("id", "C13WQ1WFHTEE-83e2bc9b") violation_lines = {violation.line for violation in violations} for path in file_paths: package = etree.SubElement(root, "package") package.set("name", os.path.dirname(path)) src_node = etree.SubElement(package, "sourcefile") src_node.set("name", os.path.basename(path)) # Create a node for each line in measured for line_num in measured: is_covered = line_num not in violation_lines line = etree.SubElement(src_node, "line") hits = 1 if is_covered else 0 line.set("ci", str(hits)) line.set("nr", str(line_num)) return root class TestPycodestyleQualityReporterTest: def test_quality(self, mocker, process_patcher): # Patch the output of `pycodestyle` mocker.patch.object(Popen, "communicate") return_string = ( "\n" + dedent( """ ../new_file.py:1:17: E231 whitespace ../new_file.py:3:13: E225 whitespace ../new_file.py:7:1: E302 blank lines """ ).strip() + "\n" ) process_patcher((return_string.encode("utf-8"), b"")) # Parse the report quality = QualityReporter(pycodestyle_driver) # Expect that the name is set assert quality.name() == "pycodestyle" # Measured_lines is undefined for # a quality reporter since all lines are measured assert quality.measured_lines("../new_file.py") is None # Expect that we get the right violations expected_violations = [ Violation(1, "E231 whitespace"), Violation(3, "E225 whitespace"), Violation(7, "E302 blank lines"), ] assert expected_violations == quality.violations("../new_file.py") def test_no_quality_issues_newline(self, process_patcher): # Patch the output of `pycodestyle` process_patcher((b"\n", b"")) # Parse the report quality = QualityReporter(pycodestyle_driver) assert [] == quality.violations("file1.py") def test_no_quality_issues_emptystring(self, process_patcher): # Patch the output of `pycodestyle` process_patcher((b"", b"")) # Parse the report quality = QualityReporter(pycodestyle_driver) assert [] == quality.violations("file1.py") def test_quality_error(self, mocker, process_patcher): # Patch the output of `pycodestyle` process_patcher((b"", "whoops Ƕئ".encode()), status_code=255) code = mocker.patch("diff_cover.violationsreporters.base.run_command_for_code") code.return_value = 0 # Parse the report quality = QualityReporter(pycodestyle_driver) # Expect that the name is set assert quality.name() == "pycodestyle" with pytest.raises(CommandError, match="whoops Ƕئ"): quality.violations("file1.py") def test_no_such_file(self): quality = QualityReporter(pycodestyle_driver) # Expect that we get no results result = quality.violations("") assert result == [] def test_no_python_file(self): quality = QualityReporter(pycodestyle_driver) file_paths = ["file1.coffee", "subdir/file2.js"] # Expect that we get no results because no Python files for path in file_paths: result = quality.violations(path) assert result == [] def test_quality_pregenerated_report(self): # When the user provides us with a pre-generated pycodestyle report # then use that instead of calling pycodestyle directly. pycodestyle_reports = [ BytesIO( ( "\n" + dedent( """ path/to/file.py:1:17: E231 whitespace path/to/file.py:3:13: E225 whitespace another/file.py:7:1: E302 blank lines """ ).strip() + "\n" ).encode("utf-8") ), BytesIO( ( "\n" + dedent( """ path/to/file.py:24:2: W123 \u9134\u1912 another/file.py:50:1: E302 blank lines """ ).strip() + "\n" ).encode("utf-8") ), ] # Parse the report quality = QualityReporter(pycodestyle_driver, reports=pycodestyle_reports) # Measured_lines is undefined for # a quality reporter since all lines are measured assert quality.measured_lines("path/to/file.py") is None # Expect that we get the right violations expected_violations = [ Violation(1, "E231 whitespace"), Violation(3, "E225 whitespace"), Violation(24, "W123 \u9134\u1912"), ] # We're not guaranteed that the violations are returned # in any particular order. actual_violations = quality.violations("path/to/file.py") assert len(actual_violations) == len(expected_violations) for expected in expected_violations: assert expected in actual_violations class TestPyflakesQualityReporterTest: """ Tests for Pyflakes quality violations """ def test_quality(self, process_patcher): # Patch the output of `pyflakes` return_string = ( "\n" + dedent( """ ../new_file.py:328: undefined name '_thing' ../new_file.py:418: 'random' imported but unused """ ).strip() + "\n" ) process_patcher((return_string.encode("utf-8"), b"")) # Parse the report quality = QualityReporter(pyflakes_driver) # Expect that the name is set assert quality.name() == "pyflakes" # Measured_lines is undefined for # a quality reporter since all lines are measured assert quality.measured_lines("../new_file.py") is None # Expect that we get the right violations expected_violations = [ Violation(328, "undefined name '_thing'"), Violation(418, "'random' imported but unused"), ] assert expected_violations == quality.violations("../new_file.py") def test_no_quality_issues_newline(self, process_patcher): process_patcher((b"\n", b"")) # Parse the report quality = QualityReporter(pyflakes_driver) assert [] == quality.violations("file1.py") def test_no_quality_issues_emptystring(self, process_patcher): # Patch the output of `pyflakes` process_patcher((b"", b"")) # Parse the report quality = QualityReporter(pyflakes_driver) assert [] == quality.violations("file1.py") def test_quality_error(self, mocker, process_patcher): # Patch the output of `pyflakes` process_patcher((b"", b"whoops"), status_code=255) code = mocker.patch("diff_cover.violationsreporters.base.run_command_for_code") code.return_value = 0 quality = QualityReporter(pyflakes_driver) # Expect that the name is set assert quality.name() == "pyflakes" with pytest.raises(CommandError): quality.violations("file1.py") def test_no_such_file(self): quality = QualityReporter(pyflakes_driver) # Expect that we get no results result = quality.violations("") assert result == [] def test_no_python_file(self): quality = QualityReporter(pyflakes_driver) file_paths = ["file1.coffee", "subdir/file2.js"] # Expect that we get no results because no Python files for path in file_paths: result = quality.violations(path) assert result == [] def test_quality_pregenerated_report(self): # When the user provides us with a pre-generated pyflakes report # then use that instead of calling pyflakes directly. pyflakes_reports = [ BytesIO( ( "\n" + dedent( """ path/to/file.py:1: undefined name 'this' path/to/file.py:3: 'random' imported but unused another/file.py:7: 'os' imported but unused """ ).strip() + "\n" ).encode("utf-8") ), BytesIO( ( "\n" + dedent( """ path/to/file.py:24: undefined name 'that' another/file.py:50: undefined name 'another' """ ).strip() + "\n" ).encode("utf-8") ), ] # Parse the report quality = QualityReporter(pyflakes_driver, reports=pyflakes_reports) # Measured_lines is undefined for # a quality reporter since all lines are measured assert quality.measured_lines("path/to/file.py") is None # Expect that we get the right violations expected_violations = [ Violation(1, "undefined name 'this'"), Violation(3, "'random' imported but unused"), Violation(24, "undefined name 'that'"), ] # We're not guaranteed that the violations are returned # in any particular order. actual_violations = quality.violations("path/to/file.py") assert len(actual_violations) == len(expected_violations) for expected in expected_violations: assert expected in actual_violations class TestFlake8QualityReporterTest: def test_quality(self, process_patcher): # Patch the output of `flake8` return_string = ( "\n" + dedent( """ ../new_file.py:1:17: E231 whitespace ../new_file.py:3:13: E225 whitespace ../new_file.py:7:1: E302 blank lines ../new_file.py:8:1: W191 indentation contains tabs ../new_file.py:10:1: F841 local variable name is assigned to but never used ../new_file.py:20:1: C901 'MyModel.mymethod' is too complex (14) ../new_file.py:50:1: N801 class names should use CapWords convention ../new_file.py:60:10: T000 Todo note found. ../new_file.py:70:0: I100 statements are in the wrong order. ../new_file.py:80:0: B901 blind except: statement ../new_file.py:90:0: D207 Docstring is under-indented ../new_file.py:100:0: S100 Snippet found ../new_file.py:110:0: Q000 Remove Single quotes ../new_file.py:120:0: ABCXYZ000 Dummy """ ).strip() + "\n" ) process_patcher((return_string.encode("utf-8"), b"")) # Parse the report quality = QualityReporter(flake8_driver) # Expect that the name is set assert quality.name() == "flake8" # Measured_lines is undefined for # a quality reporter since all lines are measured assert quality.measured_lines("../new_file.py") is None # Expect that we get the right violations expected_violations = [ Violation(1, "E231 whitespace"), Violation(3, "E225 whitespace"), Violation(7, "E302 blank lines"), Violation(8, "W191 indentation contains tabs"), Violation(10, "F841 local variable name is assigned to but never used"), Violation(20, "C901 'MyModel.mymethod' is too complex (14)"), Violation(50, "N801 class names should use CapWords convention"), Violation(60, "T000 Todo note found."), Violation(70, "I100 statements are in the wrong order."), Violation(80, "B901 blind except: statement"), Violation(90, "D207 Docstring is under-indented"), Violation(100, "S100 Snippet found"), Violation(110, "Q000 Remove Single quotes"), Violation(120, "ABCXYZ000 Dummy"), ] assert expected_violations == quality.violations("../new_file.py") def test_no_quality_issues_newline(self, process_patcher): process_patcher((b"\n", b""), 0) quality = QualityReporter(flake8_driver) assert [] == quality.violations("file1.py") def test_no_quality_issues_emptystring(self, process_patcher): # Patch the output of `flake8` process_patcher((b"", b""), 0) # Parse the report quality = QualityReporter(flake8_driver) assert [] == quality.violations("file1.py") def test_quality_error(self, mocker, process_patcher): # Patch the output of `flake8` process_patcher((b"", "whoops Ƕئ".encode()), status_code=255) # Parse the report code = mocker.patch("diff_cover.violationsreporters.base.run_command_for_code") code.return_value = 0 quality = QualityReporter(flake8_driver) # Expect that the name is set assert quality.name() == "flake8" with pytest.raises(CommandError, match="whoops Ƕئ"): quality.violations("file1.py") def test_no_such_file(self): quality = QualityReporter(flake8_driver) # Expect that we get no results result = quality.violations("") assert result == [] def test_no_python_file(self): quality = QualityReporter(flake8_driver) file_paths = ["file1.coffee", "subdir/file2.js"] # Expect that we get no results because no Python files for path in file_paths: result = quality.violations(path) assert result == [] @pytest.mark.disable_all_files_exist def test_file_does_not_exist(self): quality = QualityReporter(flake8_driver) file_paths = ["ajshdjlasdhajksdh.py"] # Expect that we get no results because that file does not exist for path in file_paths: result = quality.violations(path) assert result == [] def test_quality_pregenerated_report(self): # When the user provides us with a pre-generated flake8 report # then use that instead of calling flake8 directly. flake8_reports = [ BytesIO( ( "\n" + dedent( """ path/to/file.py:1:17: E231 whitespace path/to/file.py:3:13: E225 whitespace another/file.py:7:1: E302 blank lines """ ).strip() + "\n" ).encode("utf-8") ), BytesIO( ( "\n" + dedent( """ path/to/file.py:24:2: W123 \u9134\u1912 another/file.py:50:1: E302 blank lines """ ).strip() + "\n" ).encode("utf-8") ), ] # Parse the report quality = QualityReporter(flake8_driver, reports=flake8_reports) # Measured_lines is undefined for # a quality reporter since all lines are measured assert quality.measured_lines("path/to/file.py") is None # Expect that we get the right violations expected_violations = [ Violation(1, "E231 whitespace"), Violation(3, "E225 whitespace"), Violation(24, "W123 \u9134\u1912"), ] # We're not guaranteed that the violations are returned # in any particular order. actual_violations = quality.violations("path/to/file.py") assert len(actual_violations) == len(expected_violations) for expected in expected_violations: assert expected in actual_violations class TestPydocstlyeQualityReporterTest: """Tests for pydocstyle quality violations.""" def test_no_such_file(self): """Expect that we get no results.""" quality = QualityReporter(pydocstyle_driver) result = quality.violations("") assert result == [] def test_no_python_file(self): """Expect that we get no results because no Python files.""" quality = QualityReporter(pydocstyle_driver) file_paths = ["file1.coffee", "subdir/file2.js"] for path in file_paths: result = quality.violations(path) assert result == [] def test_quality(self, process_patcher): """Integration test.""" # Patch the output of `pydocstye` process_patcher( ( dedent( """ ../new_file.py:1 at module level: D100: Missing docstring in public module ../new_file.py:13 in public function `gather`: D103: Missing docstring in public function """ ) .strip() .encode("ascii"), "", ) ) expected_violations = [ Violation(1, "D100: Missing docstring in public module"), Violation(13, "D103: Missing docstring in public function"), ] # Parse the report quality = QualityReporter(pydocstyle_driver) # Expect that the name is set assert quality.name() == "pydocstyle" # Measured_lines is undefined for a # quality reporter since all lines are measured assert quality.measured_lines("../new_file.py") is None # Expect that we get violations for file1.py only # We're not guaranteed that the violations are returned # in any particular order. actual_violations = quality.violations("../new_file.py") assert len(actual_violations) == len(expected_violations) for expected in expected_violations: assert expected in actual_violations class TestPylintQualityReporterTest: def test_no_such_file(self): quality = QualityReporter(PylintDriver()) # Expect that we get no results result = quality.violations("") assert result == [] def test_no_python_file(self): quality = QualityReporter(PylintDriver()) file_paths = ["file1.coffee", "subdir/file2.js"] # Expect that we get no results because no Python files for path in file_paths: result = quality.violations(path) assert result == [] def test_quality(self, process_patcher): # Patch the output of `pylint` process_patcher( ( dedent( """ file1.py:1: [C0111] Missing docstring file1.py:1: [C0111, func_1] Missing docstring file1.py:2: [W0612, cls_name.func] Unused variable 'd' file1.py:2: [W0511] TODO: Not the real way we'll store usages! file1.py:579: [F0401] Unable to import 'rooted_paths' file1.py:113: [W0613, cache_relation.clear_pk] Unused argument 'cls' file1.py:150: [F0010] error while code parsing ([Errno 2] No such file or directory) file1.py:149: [C0324, Foo.__dict__] Comma not followed by a space self.peer_grading._find_corresponding_module_for_location(Location('i4x','a','b','c','d')) file1.py:162: [R0801] Similar lines in 2 files ==file1:162 ==student.views:4 import json import logging import random file2.py:170: [R0801] Similar lines in 2 files ==file1:[170:172] ==student.views:[4:6] import foo import bar path/to/file2.py:100: [W0212, openid_login_complete] Access to a protected member """ ) .strip() .encode("ascii"), "", ) ) expected_violations = [ Violation(1, "C0111: Missing docstring"), Violation(1, "C0111: func_1: Missing docstring"), Violation(2, "W0612: cls_name.func: Unused variable 'd'"), Violation(2, "W0511: TODO: Not the real way we'll store usages!"), Violation(579, "F0401: Unable to import 'rooted_paths'"), Violation( 150, "F0010: error while code parsing ([Errno 2] No such file or directory)", ), Violation(149, "C0324: Foo.__dict__: Comma not followed by a space"), Violation(162, "R0801: Similar lines in 2 files"), Violation(170, "R0801: Similar lines in 2 files"), Violation(113, "W0613: cache_relation.clear_pk: Unused argument 'cls'"), ] # Parse the report quality = QualityReporter(PylintDriver()) # Expect that the name is set assert quality.name() == "pylint" # Measured_lines is undefined for a # quality reporter since all lines are measured assert quality.measured_lines("file1.py") is None # Expect that we get violations for file1.py only # We're not guaranteed that the violations are returned # in any particular order. actual_violations = quality.violations("file1.py") assert len(actual_violations) == len(expected_violations) for expected in expected_violations: assert expected in actual_violations def test_unicode(self, process_patcher): process_patcher( ( dedent( """ file_\u6729.py:616: [W1401] Anomalous backslash in string: '\u5922'. String constant might be missing an r prefix. file.py:2: [W0612, cls_name.func_\u9492] Unused variable '\u2920' """ ).encode("utf-8"), "", ), 0, ) quality = QualityReporter(PylintDriver()) violations = quality.violations("file_\u6729.py") assert violations == [ Violation( 616, "W1401: Anomalous backslash in string: '\u5922'. " "String constant might be missing an r prefix.", ), ] violations = quality.violations("file.py") assert violations == [ Violation(2, "W0612: cls_name.func_\u9492: Unused variable '\u2920'") ] def test_unicode_continuation_char(self, process_patcher): process_patcher((b"file.py:2: [W1401]" b" Invalid char '\xc3'", ""), 0) # Since we are replacing characters we can't interpet, this should # return a valid string with the char replaced with '?' quality = QualityReporter(PylintDriver()) violations = quality.violations("file.py") assert violations == [Violation(2, "W1401: Invalid char '\ufffd'")] def test_non_integer_line_num(self, process_patcher): process_patcher( ( dedent( """ file.py:not_a_number: C0111: Missing docstring file.py:\u8911: C0111: Missing docstring """ ).encode("utf-8"), "", ), 0, ) # None of the violations have a valid line number, so they should all be skipped violations = QualityReporter(PylintDriver()).violations("file.py") assert violations == [] def test_quality_deprecation_warning(self, process_patcher): # Patch the output stderr/stdout and returncode of `pylint` process_patcher( ( b"file1.py:1: [C0111] Missing docstring\n" b"file1.py:1: [C0111, func_1] Missing docstring", b"Foobar: pylintrc deprecation warning", ), 0, ) # Parse the report quality = QualityReporter(PylintDriver()) actual_violations = quality.violations("file1.py") # Assert that pylint successfully runs and finds 2 violations assert len(actual_violations) == 2 def test_quality_error(self, mocker, process_patcher): # Patch the output stderr/stdout and returncode of `pylint` process_patcher( (b"file1.py:1: [C0111] Missing docstring", b"oops"), status_code=1 ) # Parse the report code = mocker.patch( "diff_cover.violationsreporters.violations_reporter.run_command_for_code" ) code.return_value = 0 quality = QualityReporter(PylintDriver()) # Expect an error with pytest.raises(CommandError): quality.violations("file1.py") def test_no_quality_issues_newline(self, process_patcher): process_patcher((b"\n", b""), 0) # Parse the report quality = QualityReporter(PylintDriver()) assert [] == quality.violations("file1.py") def test_no_quality_issues_emptystring(self, process_patcher): # Patch the output of `pylint` process_patcher((b"", b""), 0) # Parse the report quality = QualityReporter(PylintDriver()) assert [] == quality.violations("file1.py") def test_quality_pregenerated_report(self): # When the user provides us with a pre-generated pylint report # then use that instead of calling pylint directly. pylint_reports = [ BytesIO( dedent( """ path/to/file.py:1: [C0111] Missing docstring path/to/file.py:57: [W0511] TODO the name of this method is a little bit confusing another/file.py:41: [W1201, assign_default_role] Specify string format arguments as logging function parameters another/file.py:175: [C0322, Foo.bar] Operator not preceded by a space x=2+3 ^ Unicode: \u9404 \u1239 another/file.py:259: [C0103, bar] Invalid name "\u4920" for type variable (should match [a-z_][a-z0-9_]{2,30}$) """ ) .strip() .encode("utf-8") ), BytesIO( dedent( """ path/to/file.py:183: [C0103, Foo.bar.gettag] Invalid name "\u3240" for type argument (should match [a-z_][a-z0-9_]{2,30}$) another/file.py:183: [C0111, Foo.bar.gettag] Missing docstring """ ) .strip() .encode("utf-8") ), ] # Generate the violation report quality = QualityReporter(PylintDriver(), reports=pylint_reports) # Expect that we get the right violations expected_violations = [ Violation(1, "C0111: Missing docstring"), Violation( 57, "W0511: TODO the name of this method is a little bit confusing" ), Violation( 183, 'C0103: Foo.bar.gettag: Invalid name "\u3240" for type argument (should match [a-z_][a-z0-9_]{2,30}$)', ), ] # We're not guaranteed that the violations are returned # in any particular order. actual_violations = quality.violations("path/to/file.py") assert len(actual_violations) == len(expected_violations) for expected in expected_violations: assert expected in actual_violations def test_quality_pregenerated_report_continuation_char(self): # The report contains a non-ASCII continuation char pylint_reports = [BytesIO(b"file.py:2: [W1401] Invalid char '\xc3'")] # Generate the violation report quality = QualityReporter(PylintDriver(), reports=pylint_reports) violations = quality.violations("file.py") # Expect that the char is replaced assert violations == [Violation(2, "W1401: Invalid char '\ufffd'")] def test_windows_paths(self, process_patcher): process_patcher( ("this\\is\\win.py:42: [C0111] Missing docstring", ""), 0, ) quality = QualityReporter(PylintDriver()) violations = quality.violations("this/is/win.py") assert violations == [Violation(42, "C0111: Missing docstring")] class JsQualityBaseReporterMixin: """ Generic JS linter tests. Assumes the linter is not available as a python library, but is available on the commandline. """ @pytest.fixture(autouse=True) def patcher(self, mocker): # Mock patch the installation of the linter self._mock_command_simple = mocker.patch( "diff_cover.violationsreporters.violations_reporter.run_command_for_code" ) self._mock_command_simple.return_value = 0 # Mock patch the linter results self._mock_communicate = mocker.patch.object(subprocess, "Popen") self.subproc_mock = mocker.MagicMock() self.subproc_mock.returncode = 0 def _get_out(self): """ get Object Under Test """ return None # pragma: no cover def test_quality(self): """ Test basic scenarios, including special characters that would appear in JavaScript and mixed quotation marks """ # Patch the output of the linter cmd return_string = ( "\n" + dedent( """ ../test_file.js: line 3, col 9, Missing "use strict" statement. ../test_file.js: line 10, col 17, '$hi' is defined but never used. """ ).strip() + "\n" ) self.subproc_mock.communicate.return_value = ( return_string.encode("utf-8"), b"", ) self._mock_communicate.return_value = self.subproc_mock # Parse the report quality = QualityReporter(self._get_out()) # Expect that the name is set assert quality.name() == self.quality_name # Measured_lines is undefined for # a quality reporter since all lines are measured assert quality.measured_lines("../blah.js") is None # Expect that we get the right violations expected_violations = [ Violation(3, 'Missing "use strict" statement.'), Violation(10, "'$hi' is defined but never used."), ] assert expected_violations == quality.violations("../test_file.js") def test_no_quality_issues_newline(self): # Patch the output of the linter cmd self.subproc_mock.communicate.return_value = (b"\n", b"") self._mock_communicate.return_value = self.subproc_mock # Parse the report quality = QualityReporter(self._get_out()) assert [] == quality.violations("test-file.js") def test_no_quality_issues_emptystring(self): # Patch the output of the linter cmd self.subproc_mock.communicate.return_value = (b"", b"") self._mock_communicate.return_value = self.subproc_mock # Parse the report quality = QualityReporter(self._get_out()) assert [] == quality.violations("file1.js") def test_quality_error(self, mocker, process_patcher): process_patcher((b"", "whoops Ƕئ".encode()), status_code=1) code = mocker.patch("diff_cover.violationsreporters.base.run_command_for_code") code.return_value = 0 # Parse the report quality = QualityReporter(self._get_out()) # Expect that the name is set assert quality.name() == self.quality_name with pytest.raises(CommandError, match="whoops Ƕئ"): quality.violations("file1.js") def test_no_such_file(self): quality = QualityReporter(self._get_out()) # Expect that we get no results result = quality.violations("") assert result == [] def test_no_js_file(self): quality = QualityReporter(self._get_out()) file_paths = ["file1.py", "subdir/file2.java"] # Expect that we get no results because no JS files for path in file_paths: result = quality.violations(path) assert result == [] def test_quality_pregenerated_report(self): # When the user provides us with a pre-generated linter report # then use that instead of calling linter directly. reports = [ BytesIO( ( "\n" + dedent( """ path/to/file.js: line 3, col 9, Missing "use strict" statement. path/to/file.js: line 10, col 130, Line is too long. another/file.js: line 1, col 1, 'require' is not defined. """ ).strip() + "\n" ).encode("utf-8") ), BytesIO( ( "\n" + dedent( """ path/to/file.js: line 12, col 14, \u9134\u1912 path/to/file.js: line 10, col 17, '$hi' is defined but never used. """ ).strip() + "\n" ).encode("utf-8") ), ] # Parse the report quality = QualityReporter(self._get_out(), reports=reports) # Measured_lines is undefined for # a quality reporter since all lines are measured assert quality.measured_lines("path/to/file.js") is None # Expect that we get the right violations expected_violations = [ Violation(3, 'Missing "use strict" statement.'), Violation(10, "Line is too long."), Violation(10, "'$hi' is defined but never used."), Violation(12, "\u9134\u1912"), ] # We're not guaranteed that the violations are returned # in any particular order. actual_violations = quality.violations("path/to/file.js") assert len(actual_violations) == len(expected_violations) for expected in expected_violations: assert expected in actual_violations def test_not_installed(self, mocker): """ If linter is not available via commandline, it should raise an EnvironmentError """ self._mock_command_simple = mocker.patch( "diff_cover.violationsreporters.violations_reporter.run_command_for_code" ) self._mock_command_simple.return_value = 1 with pytest.raises(EnvironmentError): QualityReporter(self._get_out()).violations("test.js") class TestJsHintQualityReporterTest(JsQualityBaseReporterMixin): """ JsHintQualityReporter tests. Assumes JsHint is not available as a python library, but is available on the commandline. """ quality_name = "jshint" def _get_out(self): return jshint_driver class TestESLintQualityReporterTest(JsQualityBaseReporterMixin): """ ESLintQualityReporter tests. Assumes ESLint is not available as a python library, but is available on the commandline. """ quality_name = "eslint" def _get_out(self): return EslintDriver() def test_report_root_path(self): reports = [ BytesIO( "foo/bar/path/to/file.js: line 3, col 9, Found issue".encode("utf-8") ), ] driver = self._get_out() driver.add_driver_args(report_root_path="foo/bar") quality = QualityReporter(driver, reports=reports) expected_violation = Violation(3, "Found issue") actual_violations = quality.violations("path/to/file.js") assert actual_violations == [expected_violation] class TestSimpleCommandTestCase: """ Tests that the exit code detected by the method is passed as the return value of the method. """ @pytest.fixture(autouse=True) def patcher(self, mocker): self._mock_communicate = mocker.patch.object(subprocess, "Popen") self.subproc_mock = mocker.MagicMock() def test_run_simple_failure(self): # command_simple should fail self.subproc_mock.returncode = 127 self._mock_communicate.return_value = self.subproc_mock # Create an implementation of BaseQualityReporter and explicitly call _run_command_simple bad_command = run_command_for_code("foo") assert bad_command == 127 def test_run_simple_success(self): self.subproc_mock.returncode = 0 self._mock_communicate.return_value = self.subproc_mock # Create an implementation of BaseQualityReporter and explicitly call _run_command_simple good_command = run_command_for_code("foo") assert good_command == 0 class TestSubprocessErrorTestCase: """Error in subprocess call(s)""" @pytest.fixture(autouse=True) def patcher(self, mocker): # when you create a new subprocess.Popen() object and call .communicate() # on it, raise an OSError popen = mocker.Mock() popen.return_value.communicate.side_effect = OSError mocker.patch("diff_cover.command_runner.subprocess.Popen", popen) def test_quality_reporter(self, mocker): mock_stderr = mocker.patch("sys.stderr", new_callable=StringIO) code = mocker.patch("diff_cover.violationsreporters.base.run_command_for_code") code.return_value = 0 reporter = QualityReporter(pycodestyle_driver) with pytest.raises(OSError): reporter.violations("path/to/file.py") assert mock_stderr.getvalue() == "pycodestyle path/to/file.py" class TestCppcheckQualityDriverTest: """Tests for cppcheck quality driver.""" def test_parse_report(self): """Basic report test parse""" expected_violations = { "src/foo.c": Violation( 123, "(error) Array 'yolo[4]' accessed at index 4, which is out of bounds.", ), } report = "[src/foo.c:123]: (error) Array 'yolo[4]' accessed at index 4, which is out of bounds." driver = CppcheckDriver() actual_violations = driver.parse_reports([report]) assert len(actual_violations) == len(expected_violations) for expected in expected_violations: assert expected in actual_violations diff_cover-7.4.0/verify.sh000077500000000000000000000006471436411411700155040ustar00rootroot00000000000000#!/bin/bash set -euo pipefail IFS=$'\n\t' black diff_cover tests --check isort diff_cover tests --check python -m pytest --cov --cov-report=xml tests git fetch origin main:refs/remotes/origin/main diff-cover --version diff-quality --version diff-cover coverage.xml --include-untracked diff-quality --violations flake8 --include-untracked diff-quality --violations pylint --include-untracked doc8 README.rst --ignore D001