pax_global_header 0000666 0000000 0000000 00000000064 14576405254 0014526 g ustar 00root root 0000000 0000000 52 comment=7546a2fd6f812c3201f85842fa2e733bcb689535
pytest-codspeed-2.2.1/ 0000775 0000000 0000000 00000000000 14576405254 0014644 5 ustar 00root root 0000000 0000000 pytest-codspeed-2.2.1/.github/ 0000775 0000000 0000000 00000000000 14576405254 0016204 5 ustar 00root root 0000000 0000000 pytest-codspeed-2.2.1/.github/workflows/ 0000775 0000000 0000000 00000000000 14576405254 0020241 5 ustar 00root root 0000000 0000000 pytest-codspeed-2.2.1/.github/workflows/ci.yml 0000664 0000000 0000000 00000003373 14576405254 0021365 0 ustar 00root root 0000000 0000000 name: CI
on:
push:
branches: [master]
pull_request:
branches: [master]
workflow_dispatch:
concurrency:
group: ${{ github.ref }}
cancel-in-progress: true
jobs:
static-analysis:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.11
uses: actions/setup-python@v5
with:
python-version: "3.11"
- uses: pre-commit/action@v3.0.0
with:
extra_args: --all-files
tests:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
config:
- headless
- pytest-benchmark
- valgrind
python-version:
- "3.8"
- "3.9"
- "3.10"
- "3.11"
- "3.12"
pytest-version:
- ">=8.1.1"
include:
- config: pytest-benchmark
python-version: "3.7"
pytest-version: "<8.1.1"
steps:
- uses: actions/checkout@v4
- name: "Set up Python ${{ matrix.python-version }}"
uses: actions/setup-python@v5
with:
python-version: "${{ matrix.python-version }}"
- if: matrix.config == 'valgrind' || matrix.config == 'pytest-benchmark'
name: Install valgrind
run: sudo apt-get install valgrind -y
- name: Install dependencies with pytest${{ matrix.pytest-version }}
run: pip install .[dev,compat] "pytest${{ matrix.pytest-version }}"
- if: matrix.config != 'pytest-benchmark'
name: Uninstall pytest-benchmark
run: pip uninstall -y pytest-benchmark
- name: Run tests
run: pytest -vs
all-checks:
runs-on: ubuntu-latest
steps:
- run: echo "All CI checks passed."
needs:
- static-analysis
- tests
pytest-codspeed-2.2.1/.github/workflows/codspeed.yml 0000664 0000000 0000000 00000001030 14576405254 0022544 0 ustar 00root root 0000000 0000000 name: benchmarks
on:
push:
branches: [master]
pull_request:
branches: [master]
workflow_dispatch:
jobs:
benchmarks:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Python 3.12
uses: actions/setup-python@v2
with:
python-version: "3.12"
- name: Install local version of pytest-codspeed
run: pip install .
- name: Run benchmarks
uses: CodSpeedHQ/action@main
with:
run: pytest tests/benchmarks/ --codspeed
pytest-codspeed-2.2.1/.github/workflows/release.yml 0000664 0000000 0000000 00000001470 14576405254 0022406 0 ustar 00root root 0000000 0000000 name: Release on tag
on:
push:
tags:
- "v*"
permissions:
id-token: write
contents: write
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: actions/setup-python@v2
with:
python-version: "3.11"
- name: Install dependencies
run: pip install hatch
- name: Build the library
run: hatch build
- name: Publish package distributions to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
- name: Create a draft release
run: |
NEW_VERSION=$(hatch project metadata | jq -r '.version')
gh release create v$NEW_VERSION --title "v$NEW_VERSION" --generate-notes -d
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
pytest-codspeed-2.2.1/.gitignore 0000664 0000000 0000000 00000006047 14576405254 0016643 0 ustar 00root root 0000000 0000000 # Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
.venvubuntu
.python-version
*.o
pytest-codspeed-2.2.1/.pre-commit-config.yaml 0000664 0000000 0000000 00000001031 14576405254 0021120 0 ustar 00root root 0000000 0000000 # See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.4.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-added-large-files
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.3.0
hooks:
- id: mypy
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.3.3
hooks:
- id: ruff
- id: ruff-format
pytest-codspeed-2.2.1/.vscode/ 0000775 0000000 0000000 00000000000 14576405254 0016205 5 ustar 00root root 0000000 0000000 pytest-codspeed-2.2.1/.vscode/settings.json 0000664 0000000 0000000 00000000200 14576405254 0020730 0 ustar 00root root 0000000 0000000 {
"python.testing.pytestArgs": ["tests"],
"python.testing.unittestEnabled": false,
"python.testing.pytestEnabled": true
}
pytest-codspeed-2.2.1/LICENSE 0000664 0000000 0000000 00000002104 14576405254 0015646 0 ustar 00root root 0000000 0000000 The MIT License (MIT)
Copyright (c) 2022 CodSpeed and contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
pytest-codspeed-2.2.1/README.md 0000664 0000000 0000000 00000006421 14576405254 0016126 0 ustar 00root root 0000000 0000000
pytest-codspeed
[](https://github.com/CodSpeedHQ/pytest-codspeed/actions/workflows/ci.yml)
[](https://pypi.org/project/pytest-codspeed)

[](https://discord.com/invite/MxpaCfKSqF)
[](https://codspeed.io/CodSpeedHQ/pytest-codspeed)
Pytest plugin to create CodSpeed benchmarks
## Requirements
**Python**: 3.7 and later
**pytest**: any recent version
## Installation
```shell
pip install pytest-codspeed
```
## Usage
### Creating benchmarks
Creating benchmarks with `pytest-codspeed` is compatible with the standard `pytest-benchmark` API. So if you already have benchmarks written with it, you can start using `pytest-codspeed` right away.
#### Marking a whole test function as a benchmark with `pytest.mark.benchmark`
```python
import pytest
from statistics import median
@pytest.mark.benchmark
def test_median_performance():
return median([1, 2, 3, 4, 5])
```
#### Benchmarking selected lines of a test function with the `benchmark` fixture
```python
import pytest
from statistics import mean
def test_mean_performance(benchmark):
# Precompute some data useful for the benchmark but that should not be
# included in the benchmark time
data = [1, 2, 3, 4, 5]
# Benchmark the execution of the function
benchmark(lambda: mean(data))
def test_mean_and_median_performance(benchmark):
# Precompute some data useful for the benchmark but that should not be
# included in the benchmark time
data = [1, 2, 3, 4, 5]
# Benchmark the execution of the function:
# The `@benchmark` decorator will automatically call the function and
# measure its execution
@benchmark
def bench():
mean(data)
median(data)
```
### Running benchmarks
#### Testing the benchmarks locally
If you want to run only the benchmarks tests locally, you can use the `--codspeed` pytest flag:
```shell
pytest tests/ --codspeed
```
> **Note:** Running `pytest-codspeed` locally will not produce any performance reporting. It's only useful for making sure that your benchmarks are working as expected. If you want to get performance reporting, you should run the benchmarks in your CI.
#### In your CI
You can use the [CodSpeedHQ/action](https://github.com/CodSpeedHQ/action) to run the benchmarks in Github Actions and upload the results to CodSpeed.
Example workflow:
```yaml
name: benchmarks
on:
push:
branches:
- "main" # or "master"
pull_request:
jobs:
benchmarks:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v3
with:
python-version: "3.9"
- name: Install dependencies
run: pip install -r requirements.txt
- name: Run benchmarks
uses: CodSpeedHQ/action@v2
with:
token: ${{ secrets.CODSPEED_TOKEN }}
run: pytest tests/ --codspeed
```
pytest-codspeed-2.2.1/pyproject.toml 0000664 0000000 0000000 00000005336 14576405254 0017567 0 ustar 00root root 0000000 0000000 [build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[project.urls]
Homepage = "https://codspeed.io/"
Documentation = "https://docs.codspeed.io/"
Source = "https://github.com/CodSpeedHQ/pytest-codspeed"
[project]
name = "pytest-codspeed"
dynamic = ["version"]
description = "Pytest plugin to create CodSpeed benchmarks"
readme = "README.md"
license = { file = "LICENSE" }
requires-python = ">=3.7"
authors = [{ name = "Arthur Pastel", email = "arthur@codspeed.io" }]
keywords = ["codspeed", "benchmark", "performance", "pytest"]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Framework :: Pytest",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Topic :: Software Development :: Testing",
"Topic :: System :: Benchmark",
"Topic :: Utilities",
"Typing :: Typed",
]
dependencies = [
"cffi >= 1.15.1",
# cffi doesn't automatically install setuptools with python 3.12+
# cf https://github.com/python-cffi/cffi/releases/tag/v1.16.0
"setuptools; python_full_version >= '3.12.0'",
"pytest>=3.8",
"filelock >= 3.12.2",
]
[project.optional-dependencies]
lint = ["mypy ~= 1.3.0", "ruff ~= 0.3.3"]
compat = ["pytest-benchmark ~= 4.0.0", "pytest-xdist ~= 2.0.0"]
test = ["pytest ~= 7.0", "pytest-cov ~= 4.0.0"]
[project.entry-points]
pytest11 = { codspeed = "pytest_codspeed.plugin" }
[tool.hatch.envs.default]
python = "3.11"
features = ["lint", "test", "compat"]
[tool.hatch.envs.test]
features = ["test"]
[[tool.hatch.envs.test.matrix]]
python = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]
features = ["compat", "test"]
[tool.hatch.version]
path = "src/pytest_codspeed/__init__.py"
[tool.hatch.build.targets.sdist]
include = ["/src"]
[tool.mypy]
python_version = "3.12"
[tool.ruff]
line-length = 88
[tool.ruff.lint]
select = ["E", "F", "I", "C"]
[tool.isort]
line_length = 88
multi_line_output = 3
include_trailing_comma = true
use_parentheses = true
force_grid_wrap = 0
float_to_top = true
[tool.pytest.ini_options]
addopts = "--ignore=tests/benchmarks --ignore=tests/examples"
filterwarnings = ["ignore::DeprecationWarning:pytest_benchmark.utils.*:"]
[tool.coverage.run]
branch = true
[tool.coverage.report]
include = ["src/*", "tests/*"]
omit = ["**/conftest.py"]
exclude_lines = [
"pragma: no cover",
"if TYPE_CHECKING:",
"@pytest.mark.skip",
"@abstractmethod",
]
pytest-codspeed-2.2.1/scripts/ 0000775 0000000 0000000 00000000000 14576405254 0016333 5 ustar 00root root 0000000 0000000 pytest-codspeed-2.2.1/scripts/release.sh 0000775 0000000 0000000 00000000655 14576405254 0020320 0 ustar 00root root 0000000 0000000 #!/bin/bash
# Usage: ./scripts/release.sh
set -ex
if [ $# -ne 1 ]; then
echo "Usage: ./release.sh "
exit 1
fi
hatch version $1
NEW_VERSION=$(hatch version)
git add src/pytest_codspeed/__init__.py
# Fail if there are any unstaged changes left
git diff --exit-code
git commit -am "Release v$NEW_VERSION 🚀"
git tag v$NEW_VERSION -m "Release v$NEW_VERSION 🚀"
git push --follow-tags
pytest-codspeed-2.2.1/src/ 0000775 0000000 0000000 00000000000 14576405254 0015433 5 ustar 00root root 0000000 0000000 pytest-codspeed-2.2.1/src/pytest_codspeed/ 0000775 0000000 0000000 00000000000 14576405254 0020631 5 ustar 00root root 0000000 0000000 pytest-codspeed-2.2.1/src/pytest_codspeed/__init__.py 0000664 0000000 0000000 00000000153 14576405254 0022741 0 ustar 00root root 0000000 0000000 __version__ = "2.2.1"
from .plugin import BenchmarkFixture
__all__ = ["BenchmarkFixture", "__version__"]
pytest-codspeed-2.2.1/src/pytest_codspeed/_wrapper/ 0000775 0000000 0000000 00000000000 14576405254 0022450 5 ustar 00root root 0000000 0000000 pytest-codspeed-2.2.1/src/pytest_codspeed/_wrapper/.gitignore 0000664 0000000 0000000 00000000031 14576405254 0024432 0 ustar 00root root 0000000 0000000 dist_callgrind_wrapper.*
pytest-codspeed-2.2.1/src/pytest_codspeed/_wrapper/__init__.py 0000664 0000000 0000000 00000001640 14576405254 0024562 0 ustar 00root root 0000000 0000000 import os
from typing import TYPE_CHECKING
from cffi import FFI # type: ignore
from filelock import FileLock
if TYPE_CHECKING:
from .wrapper import lib as LibType
_wrapper_dir = os.path.dirname(os.path.abspath(__file__))
def _get_ffi():
ffi = FFI()
with open(f"{_wrapper_dir}/wrapper.h") as f:
ffi.cdef(f.read())
ffi.set_source(
"dist_callgrind_wrapper",
'#include "wrapper.h"',
sources=["wrapper.c"],
)
return ffi
def get_lib() -> "LibType":
try:
ffi = _get_ffi()
build_lock = FileLock(f"{_wrapper_dir}/build.lock")
with build_lock:
ffi.compile(
target="dist_callgrind_wrapper.*",
tmpdir=_wrapper_dir,
)
from .dist_callgrind_wrapper import lib # type: ignore
return lib
except Exception as e:
raise Exception("Failed to compile the wrapper") from e
pytest-codspeed-2.2.1/src/pytest_codspeed/_wrapper/wrapper.c 0000664 0000000 0000000 00000000610 14576405254 0024271 0 ustar 00root root 0000000 0000000 #include
void start_instrumentation() {
CALLGRIND_START_INSTRUMENTATION;
}
void stop_instrumentation() {
CALLGRIND_STOP_INSTRUMENTATION;
}
void dump_stats() {
CALLGRIND_DUMP_STATS;
}
void dump_stats_at(char *s) {
CALLGRIND_DUMP_STATS_AT(s);
}
void zero_stats() {
CALLGRIND_ZERO_STATS;
}
void toggle_collect() {
CALLGRIND_TOGGLE_COLLECT;
}
pytest-codspeed-2.2.1/src/pytest_codspeed/_wrapper/wrapper.h 0000664 0000000 0000000 00000000225 14576405254 0024300 0 ustar 00root root 0000000 0000000 void start_instrumentation();
void stop_instrumentation();
void dump_stats();
void dump_stats_at(char *s);
void zero_stats();
void toggle_collect();
pytest-codspeed-2.2.1/src/pytest_codspeed/_wrapper/wrapper.pyi 0000664 0000000 0000000 00000000555 14576405254 0024660 0 ustar 00root root 0000000 0000000 class lib:
@staticmethod
def start_instrumentation() -> None: ...
@staticmethod
def stop_instrumentation() -> None: ...
@staticmethod
def dump_stats() -> None: ...
@staticmethod
def dump_stats_at(trigger: bytes) -> None: ...
@staticmethod
def zero_stats() -> None: ...
@staticmethod
def toggle_collect() -> None: ...
pytest-codspeed-2.2.1/src/pytest_codspeed/plugin.py 0000664 0000000 0000000 00000023625 14576405254 0022511 0 ustar 00root root 0000000 0000000 import gc
import os
import pkgutil
import sys
from dataclasses import dataclass, field
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Tuple,
TypeVar,
Union,
)
import pytest
from _pytest.fixtures import FixtureManager
from pytest_codspeed.utils import get_git_relative_uri
from . import __version__
from ._wrapper import get_lib
if TYPE_CHECKING:
from ._wrapper import LibType
IS_PYTEST_BENCHMARK_INSTALLED = pkgutil.find_loader("pytest_benchmark") is not None
SUPPORTS_PERF_TRAMPOLINE = sys.version_info >= (3, 12)
BEFORE_PYTEST_8_1_1 = pytest.version_tuple < (8, 1, 1)
@pytest.hookimpl(trylast=True)
def pytest_addoption(parser: "pytest.Parser"):
group = parser.getgroup("CodSpeed benchmarking")
group.addoption(
"--codspeed",
action="store_true",
default=False,
help="Enable codspeed (not required when using the CodSpeed action)",
)
@dataclass(unsafe_hash=True)
class CodSpeedPlugin:
is_codspeed_enabled: bool
should_measure: bool
lib: Optional["LibType"]
disabled_plugins: Tuple[str, ...]
benchmark_count: int = field(default=0, hash=False, compare=False)
PLUGIN_NAME = "codspeed_plugin"
def get_plugin(config: "pytest.Config") -> "CodSpeedPlugin":
return config.pluginmanager.get_plugin(PLUGIN_NAME)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config: "pytest.Config"):
config.addinivalue_line(
"markers", "codspeed_benchmark: mark an entire test for codspeed benchmarking"
)
config.addinivalue_line(
"markers", "benchmark: mark an entire test for codspeed benchmarking"
)
is_codspeed_enabled = (
config.getoption("--codspeed") or os.environ.get("CODSPEED_ENV") is not None
)
should_measure = os.environ.get("CODSPEED_ENV") is not None
lib = get_lib() if should_measure else None
if lib is not None:
lib.dump_stats_at(f"Metadata: pytest-codspeed {__version__}".encode("ascii"))
disabled_plugins: List[str] = []
# Disable pytest-benchmark if codspeed is enabled
if is_codspeed_enabled and IS_PYTEST_BENCHMARK_INSTALLED:
object.__setattr__(config.option, "benchmark_disable", True)
config.pluginmanager.set_blocked("pytest-benchmark")
disabled_plugins.append("pytest-benchmark")
plugin = CodSpeedPlugin(
is_codspeed_enabled=is_codspeed_enabled,
should_measure=should_measure,
lib=lib,
disabled_plugins=tuple(disabled_plugins),
)
config.pluginmanager.register(plugin, PLUGIN_NAME)
def pytest_plugin_registered(plugin, manager: "pytest.PytestPluginManager"):
"""Patch the benchmark fixture to use the codspeed one if codspeed is enabled"""
if IS_PYTEST_BENCHMARK_INSTALLED and isinstance(plugin, FixtureManager):
fixture_manager = plugin
codspeed_plugin: CodSpeedPlugin = manager.get_plugin(PLUGIN_NAME)
if codspeed_plugin.is_codspeed_enabled:
codspeed_benchmark_fixtures = plugin.getfixturedefs(
"codspeed_benchmark",
fixture_manager.session.nodeid
if BEFORE_PYTEST_8_1_1
else fixture_manager.session,
)
assert codspeed_benchmark_fixtures is not None
# Archive the pytest-benchmark fixture
fixture_manager._arg2fixturedefs["__benchmark"] = (
fixture_manager._arg2fixturedefs["benchmark"]
)
# Replace the pytest-benchmark fixture with the codspeed one
fixture_manager._arg2fixturedefs["benchmark"] = codspeed_benchmark_fixtures
@pytest.hookimpl(trylast=True)
def pytest_report_header(config: "pytest.Config"):
out = [
f"codspeed: {__version__} "
f"(callgraph: {'enabled' if SUPPORTS_PERF_TRAMPOLINE else 'not supported'})"
]
plugin = get_plugin(config)
if plugin.is_codspeed_enabled and not plugin.should_measure:
out.append(
"\033[1m"
"NOTICE: codspeed is enabled, but no performance measurement"
" will be made since it's running in an unknown environment."
"\033[0m"
)
if len(plugin.disabled_plugins) > 0:
out.append(
"\033[93mCodSpeed had to disable the following plugins: "
f"{', '.join(plugin.disabled_plugins)}\033[0m"
)
return "\n".join(out)
def has_benchmark_fixture(item: "pytest.Item") -> bool:
item_fixtures = getattr(item, "fixturenames", [])
return "benchmark" in item_fixtures or "codspeed_benchmark" in item_fixtures
def has_benchmark_marker(item: "pytest.Item") -> bool:
return (
item.get_closest_marker("codspeed_benchmark") is not None
or item.get_closest_marker("benchmark") is not None
)
def should_benchmark_item(item: "pytest.Item") -> bool:
return has_benchmark_fixture(item) or has_benchmark_marker(item)
@pytest.hookimpl()
def pytest_sessionstart(session: "pytest.Session"):
plugin = get_plugin(session.config)
if plugin.is_codspeed_enabled:
plugin.benchmark_count = 0
if plugin.should_measure and SUPPORTS_PERF_TRAMPOLINE:
sys.activate_stack_trampoline("perf") # type: ignore
@pytest.hookimpl(trylast=True)
def pytest_collection_modifyitems(
session: "pytest.Session", config: "pytest.Config", items: "List[pytest.Item]"
):
plugin = get_plugin(config)
if plugin.is_codspeed_enabled:
deselected = []
selected = []
for item in items:
if should_benchmark_item(item):
selected.append(item)
else:
deselected.append(item)
config.hook.pytest_deselected(items=deselected)
items[:] = selected
def _run_with_instrumentation(
lib: "LibType",
nodeId: str,
config: "pytest.Config",
fn: Callable[..., Any],
*args,
**kwargs,
):
is_gc_enabled = gc.isenabled()
if is_gc_enabled:
gc.collect()
gc.disable()
result = None
def __codspeed_root_frame__():
nonlocal result
result = fn(*args, **kwargs)
if SUPPORTS_PERF_TRAMPOLINE:
# Warmup CPython performance map cache
__codspeed_root_frame__()
lib.zero_stats()
lib.start_instrumentation()
__codspeed_root_frame__()
lib.stop_instrumentation()
uri = get_git_relative_uri(nodeId, config.rootpath)
lib.dump_stats_at(uri.encode("ascii"))
if is_gc_enabled:
gc.enable()
return result
@pytest.hookimpl(tryfirst=True)
def pytest_runtest_protocol(item: "pytest.Item", nextitem: Union["pytest.Item", None]):
plugin = get_plugin(item.config)
if not plugin.is_codspeed_enabled or not should_benchmark_item(item):
return (
None # Defer to the default test protocol since no benchmarking is needed
)
if has_benchmark_fixture(item):
return None # Instrumentation is handled by the fixture
plugin.benchmark_count += 1
if not plugin.should_measure:
return None # Benchmark counted but will be run in the default protocol
# Setup phase
reports = []
ihook = item.ihook
ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
setup_call = pytest.CallInfo.from_call(
lambda: ihook.pytest_runtest_setup(item=item, nextitem=nextitem), "setup"
)
setup_report = ihook.pytest_runtest_makereport(item=item, call=setup_call)
ihook.pytest_runtest_logreport(report=setup_report)
reports.append(setup_report)
# Run phase
if setup_report.passed and not item.config.getoption("setuponly"):
assert plugin.lib is not None
runtest_call = pytest.CallInfo.from_call(
lambda: _run_with_instrumentation(
plugin.lib, item.nodeid, item.config, item.runtest
),
"call",
)
runtest_report = ihook.pytest_runtest_makereport(item=item, call=runtest_call)
ihook.pytest_runtest_logreport(report=runtest_report)
reports.append(runtest_report)
# Teardown phase
teardown_call = pytest.CallInfo.from_call(
lambda: ihook.pytest_runtest_teardown(item=item, nextitem=nextitem), "teardown"
)
teardown_report = ihook.pytest_runtest_makereport(item=item, call=teardown_call)
ihook.pytest_runtest_logreport(report=teardown_report)
reports.append(teardown_report)
ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
return reports # Deny further protocol hooks execution
T = TypeVar("T")
class BenchmarkFixture:
"""The fixture that can be used to benchmark a function."""
def __init__(self, request: "pytest.FixtureRequest"):
self.extra_info: Dict = {}
self._request = request
def __call__(self, func: Callable[..., T], *args: Any, **kwargs: Any) -> T:
config = self._request.config
plugin = get_plugin(config)
plugin.benchmark_count += 1
if plugin.is_codspeed_enabled and plugin.should_measure:
assert plugin.lib is not None
return _run_with_instrumentation(
plugin.lib, self._request.node.nodeid, config, func, *args, **kwargs
)
else:
return func(*args, **kwargs)
@pytest.fixture(scope="function")
def codspeed_benchmark(request: "pytest.FixtureRequest") -> Callable:
return BenchmarkFixture(request)
if not IS_PYTEST_BENCHMARK_INSTALLED:
@pytest.fixture(scope="function")
def benchmark(codspeed_benchmark, request: "pytest.FixtureRequest"):
"""
Compatibility with pytest-benchmark
"""
return codspeed_benchmark
@pytest.hookimpl()
def pytest_sessionfinish(session: "pytest.Session", exitstatus):
plugin = get_plugin(session.config)
if plugin.is_codspeed_enabled:
reporter = session.config.pluginmanager.get_plugin("terminalreporter")
count_suffix = "benchmarked" if plugin.should_measure else "benchmark tested"
reporter.write_sep(
"=",
f"{plugin.benchmark_count} {count_suffix}",
)
pytest-codspeed-2.2.1/src/pytest_codspeed/utils.py 0000664 0000000 0000000 00000002426 14576405254 0022347 0 ustar 00root root 0000000 0000000 from pathlib import Path
def get_git_relative_path(abs_path: Path) -> Path:
"""Get the path relative to the git root directory. If the path is not
inside a git repository, the original path itself is returned.
"""
git_path = Path(abs_path).resolve()
while (
git_path != git_path.parent
): # stops at root since parent of root is root itself
if (git_path / ".git").exists():
return abs_path.resolve().relative_to(git_path)
git_path = git_path.parent
return abs_path
def get_git_relative_uri(uri: str, pytest_rootdir: Path) -> str:
"""Get the benchmark uri relative to the git root dir.
Args:
uri (str): the benchmark uri, for example:
testing/test_excinfo.py::TestFormattedExcinfo::test_repr_source
pytest_rootdir (str): the pytest root dir, for example:
/home/user/gitrepo/folder
Returns:
str: the benchmark uri relative to the git root dir, for example:
folder/testing/test_excinfo.py::TestFormattedExcinfo::test_repr_source
"""
file_path, function_path = uri.split("::", 1)
absolute_file_path = pytest_rootdir / Path(file_path)
relative_git_path = get_git_relative_path(absolute_file_path)
return f"{str(relative_git_path)}::{function_path}"
pytest-codspeed-2.2.1/tests/ 0000775 0000000 0000000 00000000000 14576405254 0016006 5 ustar 00root root 0000000 0000000 pytest-codspeed-2.2.1/tests/benchmarks/ 0000775 0000000 0000000 00000000000 14576405254 0020123 5 ustar 00root root 0000000 0000000 pytest-codspeed-2.2.1/tests/benchmarks/test_bench_fibo.py 0000664 0000000 0000000 00000002014 14576405254 0023607 0 ustar 00root root 0000000 0000000 def recursive_fibonacci(n: int) -> int:
if n in [0, 1]:
return n
return recursive_fibonacci(n - 1) + recursive_fibonacci(n - 2)
def recursive_cached_fibonacci(n: int) -> int:
cache = {0: 0, 1: 1}
def fibo(n) -> int:
if n in cache:
return cache[n]
cache[n] = fibo(n - 1) + fibo(n - 2)
return cache[n]
return fibo(n)
def iterative_fibonacci(n: int) -> int:
a, b = 0, 1
for _ in range(n):
a, b = b, a + b
return a
def test_iterative_fibo_10(benchmark):
@benchmark
def _():
iterative_fibonacci(10)
def test_recursive_fibo_10(benchmark):
@benchmark
def _():
recursive_fibonacci(10)
def test_recursive_fibo_20(benchmark):
@benchmark
def _():
recursive_fibonacci(20)
def test_recursive_cached_fibo_10(benchmark):
@benchmark
def _():
recursive_cached_fibonacci(10)
def test_recursive_cached_fibo_100(benchmark):
@benchmark
def _():
recursive_cached_fibonacci(100)
pytest-codspeed-2.2.1/tests/conftest.py 0000664 0000000 0000000 00000002333 14576405254 0020206 0 ustar 00root root 0000000 0000000 import importlib.util
import shutil
import sys
import pytest
pytest_plugins = ["pytester"]
IS_PYTEST_BENCHMARK_INSTALLED = importlib.util.find_spec("pytest_benchmark") is not None
skip_without_pytest_benchmark = pytest.mark.skipif(
not IS_PYTEST_BENCHMARK_INSTALLED, reason="pytest_benchmark not installed"
)
skip_with_pytest_benchmark = pytest.mark.skipif(
IS_PYTEST_BENCHMARK_INSTALLED, reason="pytest_benchmark installed"
)
if IS_PYTEST_BENCHMARK_INSTALLED:
pytest_plugins.append("pytest_benchmark")
print(
"NOTICE: Testing with pytest-benchmark compatibility",
file=sys.stderr,
flush=True,
)
IS_VALGRIND_INSTALLED = shutil.which("valgrind") is not None
skip_without_valgrind = pytest.mark.skipif(
not IS_VALGRIND_INSTALLED, reason="valgrind not installed"
)
if IS_VALGRIND_INSTALLED:
print("NOTICE: Testing with valgrind compatibility", file=sys.stderr, flush=True)
IS_PERF_TRAMPOLINE_SUPPORTED = sys.version_info >= (3, 12)
skip_without_perf_trampoline = pytest.mark.skipif(
not IS_PERF_TRAMPOLINE_SUPPORTED, reason="perf trampoline is not supported"
)
skip_with_perf_trampoline = pytest.mark.skipif(
IS_PERF_TRAMPOLINE_SUPPORTED, reason="perf trampoline is supported"
)
pytest-codspeed-2.2.1/tests/examples/ 0000775 0000000 0000000 00000000000 14576405254 0017624 5 ustar 00root root 0000000 0000000 pytest-codspeed-2.2.1/tests/examples/test_addition_fixture.py 0000664 0000000 0000000 00000000140 14576405254 0024571 0 ustar 00root root 0000000 0000000 def test_some_addition_performance(benchmark):
@benchmark
def _():
return 1 + 1
pytest-codspeed-2.2.1/tests/test_pytest_plugin.py 0000664 0000000 0000000 00000024120 14576405254 0022324 0 ustar 00root root 0000000 0000000 import os
from contextlib import contextmanager
import pytest
from conftest import (
IS_PERF_TRAMPOLINE_SUPPORTED,
skip_with_perf_trampoline,
skip_with_pytest_benchmark,
skip_without_perf_trampoline,
skip_without_pytest_benchmark,
skip_without_valgrind,
)
@pytest.fixture(scope="function")
def codspeed_env(monkeypatch):
@contextmanager
def ctx_manager():
monkeypatch.setenv("CODSPEED_ENV", "1")
try:
yield
finally:
monkeypatch.delenv("CODSPEED_ENV", raising=False)
return ctx_manager
def test_plugin_enabled_without_env(pytester: pytest.Pytester) -> None:
pytester.makepyfile(
"""
def test_some_addition_performance(benchmark):
@benchmark
def _():
return 1 + 1
"""
)
result = pytester.runpytest("--codspeed")
result.stdout.fnmatch_lines(
[
(
"*NOTICE: codspeed is enabled, but no "
"performance measurement will be made*"
),
"*1 benchmark tested*",
"*1 passed*",
]
)
def test_plugin_enabled_with_kwargs(pytester: pytest.Pytester, codspeed_env) -> None:
pytester.makepyfile(
"""
def test_arg_kwarg_addition(benchmark):
def fn(arg, kwarg=None):
assert arg + kwarg == 40
benchmark(fn, 25, kwarg=15)
"""
)
result = pytester.runpytest("--codspeed")
result.stdout.fnmatch_lines(["*1 benchmark tested*"])
@skip_without_valgrind
@skip_without_perf_trampoline
def test_bench_enabled_header_with_perf(
pytester: pytest.Pytester, codspeed_env
) -> None:
pytester.copy_example("tests/examples/test_addition_fixture.py")
with codspeed_env():
result = pytester.runpytest()
result.stdout.fnmatch_lines(["codspeed: * (callgraph: enabled)"])
@skip_without_valgrind
@skip_with_perf_trampoline
def test_bench_enabled_header_without_perf(
pytester: pytest.Pytester, codspeed_env
) -> None:
pytester.copy_example("tests/examples/test_addition_fixture.py")
with codspeed_env():
result = pytester.runpytest()
result.stdout.fnmatch_lines(["codspeed: * (callgraph: not supported)"])
@skip_without_valgrind
def test_plugin_enabled_by_env(pytester: pytest.Pytester, codspeed_env) -> None:
pytester.copy_example("tests/examples/test_addition_fixture.py")
with codspeed_env():
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 benchmarked*", "*1 passed*"])
@skip_without_valgrind
def test_plugin_enabled_and_env(pytester: pytest.Pytester, codspeed_env) -> None:
pytester.copy_example("tests/examples/test_addition_fixture.py")
with codspeed_env():
result = pytester.runpytest("--codspeed")
result.stdout.fnmatch_lines(["*1 benchmarked*", "*1 passed*"])
@skip_without_valgrind
def test_plugin_enabled_and_env_bench_run_once(
pytester: pytest.Pytester, codspeed_env
) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.benchmark
def test_noisy_bench_marked():
print() # make sure noise is on its own line
print("I'm noisy marked!!!")
print()
def test_noisy_bench_fxt(benchmark):
@benchmark
def _():
print() # make sure noise is on its own line
print("I'm noisy fixtured!!!")
print()
"""
)
EXPECTED_OUTPUT_COUNT = 2 if IS_PERF_TRAMPOLINE_SUPPORTED else 1
with codspeed_env():
run_result = pytester.runpytest("--codspeed", "-s")
print(run_result.stdout.str())
assert run_result.outlines.count("I'm noisy marked!!!") == EXPECTED_OUTPUT_COUNT
assert (
run_result.outlines.count("I'm noisy fixtured!!!") == EXPECTED_OUTPUT_COUNT
)
@skip_without_valgrind
def test_plugin_enabled_and_env_bench_hierachy_called(
pytester: pytest.Pytester, codspeed_env
) -> None:
pytester.makepyfile(
"""
import pytest
class TestGroup:
def setup_method(self):
print(); print("Setup called")
def teardown_method(self):
print(); print("Teardown called")
@pytest.mark.benchmark
def test_child(self):
print(); print("Test called")
"""
)
with codspeed_env():
result = pytester.runpytest("--codspeed", "-s")
result.stdout.fnmatch_lines(
[
"Setup called",
"Test called",
"Teardown called",
]
)
def test_plugin_disabled(pytester: pytest.Pytester) -> None:
pytester.copy_example("tests/examples/test_addition_fixture.py")
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
@skip_without_valgrind
def test_plugin_enabled_nothing_to_benchmark(
pytester: pytest.Pytester, codspeed_env
) -> None:
pytester.makepyfile(
"""
def test_some_addition_performance():
return 1 + 1
"""
)
with codspeed_env():
result = pytester.runpytest("--codspeed")
result.stdout.fnmatch_lines(["*0 benchmarked*", "*1 deselected*"])
def test_plugin_only_benchmark_collection(pytester: pytest.Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.codspeed_benchmark
def test_some_addition_performance():
return 1 + 1
@pytest.mark.benchmark
def test_some_addition_performance_shorthand():
return 1 + 1
def test_some_wrapped_benchmark(benchmark):
@benchmark
def _():
hello = "hello"
def test_another_useless_thing():
assert True
"""
)
collection_result = pytester.runpytest("--codspeed", "--collect-only")
collection_result.stdout.fnmatch_lines_random(
[
"**",
"**",
"**",
"*3/4 tests collected (1 deselected)*",
],
)
collection_result = pytester.runpytest(
"--codspeed", "--collect-only", "-k", "test_some_wrapped_benchmark"
)
collection_result.stdout.fnmatch_lines_random(
[
"**",
"*1/4 tests collected (3 deselected)*",
],
)
@skip_without_pytest_benchmark
def test_pytest_benchmark_compatibility(pytester: pytest.Pytester) -> None:
pytester.makepyfile(
"""
def test_some_wrapped_benchmark(benchmark):
@benchmark
def _():
hello = "hello"
"""
)
result = pytester.runpytest("--benchmark-only")
result.stdout.fnmatch_lines_random(
[
"*benchmark: 1 tests*",
"*Name*",
"*test_some_wrapped_benchmark*",
"*Legend:*",
"*Outliers:*",
"*OPS: Operations Per Second*",
"*Outliers:*",
"*1 passed*",
]
)
def test_pytest_benchmark_extra_info(pytester: pytest.Pytester) -> None:
"""https://pytest-benchmark.readthedocs.io/en/latest/usage.html#extra-info"""
pytester.makepyfile(
"""
import time
def test_my_stuff(benchmark):
benchmark.extra_info['foo'] = 'bar'
benchmark(time.sleep, 0.02)
"""
)
result = pytester.runpytest("--codspeed")
assert result.ret == 0, "the run should have succeeded"
def test_pytest_benchmark_return_value(pytester: pytest.Pytester) -> None:
pytester.makepyfile(
"""
def calculate_something():
return 1 + 1
def test_my_stuff(benchmark):
value = benchmark(calculate_something)
assert value == 2
"""
)
result = pytester.runpytest("--codspeed")
assert result.ret == 0, "the run should have succeeded"
@skip_without_valgrind
@skip_without_perf_trampoline
def test_perf_maps_generation(pytester: pytest.Pytester, codspeed_env) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.benchmark
def test_some_addition_marked():
return 1 + 1
def test_some_addition_fixtured(benchmark):
@benchmark
def fixtured_child():
return 1 + 1
"""
)
with codspeed_env():
result = pytester.runpytest("--codspeed")
result.stdout.fnmatch_lines(["*2 benchmarked*", "*2 passed*"])
current_pid = os.getpid()
perf_filepath = f"/tmp/perf-{current_pid}.map"
print(perf_filepath)
with open(perf_filepath, "r") as perf_file:
lines = perf_file.readlines()
assert any(
"py::_run_with_instrumentation..__codspeed_root_frame__" in line
for line in lines
), "No root frame found in perf map"
assert any(
"py::test_some_addition_marked" in line for line in lines
), "No marked test frame found in perf map"
assert any(
"py::test_some_addition_fixtured" in line for line in lines
), "No fixtured test frame found in perf map"
assert any(
"py::test_some_addition_fixtured..fixtured_child" in line
for line in lines
), "No fixtured child test frame found in perf map"
@skip_without_valgrind
@skip_with_pytest_benchmark
def test_pytest_xdist_concurrency_compatibility(
pytester: pytest.Pytester, codspeed_env
) -> None:
pytester.makepyfile(
"""
import time, pytest
def do_something():
time.sleep(1)
@pytest.mark.parametrize("i", range(256))
def test_my_stuff(benchmark, i):
benchmark(do_something)
"""
)
# Run the test multiple times to reduce the chance of a false positive
ITERATIONS = 5
for i in range(ITERATIONS):
with codspeed_env():
result = pytester.runpytest("--codspeed", "-n", "128")
assert result.ret == 0, "the run should have succeeded"
result.stdout.fnmatch_lines(["*256 passed*"])
pytest-codspeed-2.2.1/tests/test_utils.py 0000664 0000000 0000000 00000002054 14576405254 0020560 0 ustar 00root root 0000000 0000000 from pathlib import Path
from unittest.mock import patch
from pytest_codspeed.utils import get_git_relative_path, get_git_relative_uri
def test_get_git_relative_path_found():
with patch.object(
Path, "exists", lambda self: str(self) == "/home/user/gitrepo/.git"
):
path = Path("/home/user/gitrepo/folder/nested_folder")
assert get_git_relative_path(path) == Path("folder/nested_folder")
def test_get_git_relative_path_not_found():
with patch.object(Path, "exists", lambda self: False):
path = Path("/home/user/gitrepo/folder")
assert get_git_relative_path(path) == path
def test_get_git_relative_uri():
with patch.object(
Path, "exists", lambda self: str(self) == "/home/user/gitrepo/.git"
):
pytest_rootdir = Path("/home/user/gitrepo/pytest_root")
uri = "testing/test_excinfo.py::TestFormattedExcinfo::test_fn"
assert (
get_git_relative_uri(uri, pytest_rootdir)
== "pytest_root/testing/test_excinfo.py::TestFormattedExcinfo::test_fn"
)