pax_global_header 0000666 0000000 0000000 00000000064 14710416711 0014514 g ustar 00root root 0000000 0000000 52 comment=fcc60e0fc842d435a07ab2c4fb9e93cd8d50a543
ionelmc-pytest-benchmark-5edf251/ 0000775 0000000 0000000 00000000000 14710416711 0017035 5 ustar 00root root 0000000 0000000 ionelmc-pytest-benchmark-5edf251/.bumpversion.cfg 0000664 0000000 0000000 00000001345 14710416711 0022150 0 ustar 00root root 0000000 0000000 [bumpversion]
current_version = 5.1.0
commit = True
tag = True
[bumpversion:file:setup.py]
search = version='{current_version}'
replace = version='{new_version}'
[bumpversion:file (badge):README.rst]
search = /v{current_version}.svg
replace = /v{new_version}.svg
[bumpversion:file (link):README.rst]
search = /v{current_version}...main
replace = /v{new_version}...main
[bumpversion:file:docs/conf.py]
search = version = release = '{current_version}'
replace = version = release = '{new_version}'
[bumpversion:file:src/pytest_benchmark/__init__.py]
search = __version__ = '{current_version}'
replace = __version__ = '{new_version}'
[bumpversion:file:.cookiecutterrc]
search = version: {current_version}
replace = version: {new_version}
ionelmc-pytest-benchmark-5edf251/.cookiecutterrc 0000664 0000000 0000000 00000003143 14710416711 0022064 0 ustar 00root root 0000000 0000000 # Generated by cookiepatcher, a small shim around cookiecutter (pip install cookiepatcher)
default_context:
c_extension_optional: 'no'
c_extension_support: 'no'
codacy: 'no'
codacy_projectid: '[Get ID from https://app.codacy.com/app/ionelmc/pytest-benchmark/settings]'
codeclimate: 'no'
codecov: 'yes'
command_line_interface: argparse
command_line_interface_bin_name: py.test-benchmark
coveralls: 'yes'
distribution_name: pytest-benchmark
email: contact@ionelmc.ro
formatter_quote_style: single
full_name: Ionel Cristian Mărieș
function_name: compute
github_actions: 'yes'
github_actions_osx: 'yes'
github_actions_windows: 'yes'
license: BSD 2-Clause License
module_name: core
package_name: pytest_benchmark
pre_commit: 'yes'
project_name: pytest-benchmark
project_short_description: A ``pytest`` fixture for benchmarking code. It will group the tests into rounds that are calibrated to the chosen timer.
pypi_badge: 'yes'
pypi_disable_upload: 'no'
release_date: '2022-10-26'
repo_hosting: github.com
repo_hosting_domain: github.com
repo_main_branch: main
repo_name: pytest-benchmark
repo_username: ionelmc
scrutinizer: 'no'
setup_py_uses_setuptools_scm: 'no'
sphinx_docs: 'yes'
sphinx_docs_hosting: https://pytest-benchmark.readthedocs.io/
sphinx_doctest: 'no'
sphinx_theme: furo
test_matrix_separate_coverage: 'yes'
tests_inside_package: 'no'
version: 5.1.0
version_manager: bump2version
website: http://blog.ionelmc.ro
year_from: '2014'
year_to: '2023'
ionelmc-pytest-benchmark-5edf251/.coveragerc 0000664 0000000 0000000 00000000265 14710416711 0021161 0 ustar 00root root 0000000 0000000 [paths]
source = src
[run]
branch = true
source =
src
tests
parallel = true
[report]
show_missing = true
precision = 2
omit =
*migrations*
*pep418*
*hookspec*
ionelmc-pytest-benchmark-5edf251/.editorconfig 0000664 0000000 0000000 00000000541 14710416711 0021512 0 ustar 00root root 0000000 0000000 # see https://editorconfig.org/
root = true
[*]
# Use Unix-style newlines for most files (except Windows files, see below).
end_of_line = lf
trim_trailing_whitespace = true
indent_style = space
insert_final_newline = true
indent_size = 4
charset = utf-8
[*.{bat,cmd,ps1}]
end_of_line = crlf
[*.{yml,yaml}]
indent_size = 2
[*.tsv]
indent_style = tab
ionelmc-pytest-benchmark-5edf251/.github/ 0000775 0000000 0000000 00000000000 14710416711 0020375 5 ustar 00root root 0000000 0000000 ionelmc-pytest-benchmark-5edf251/.github/workflows/ 0000775 0000000 0000000 00000000000 14710416711 0022432 5 ustar 00root root 0000000 0000000 ionelmc-pytest-benchmark-5edf251/.github/workflows/github-actions.yml 0000664 0000000 0000000 00000043523 14710416711 0026104 0 ustar 00root root 0000000 0000000 name: build
on: [push, pull_request, workflow_dispatch]
jobs:
test:
name: ${{ matrix.name }}
runs-on: ${{ matrix.os }}
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
include:
- name: 'check'
python: '3.11'
toxpython: 'python3.11'
tox_env: 'check'
os: 'ubuntu-latest'
- name: 'docs'
python: '3.11'
toxpython: 'python3.11'
tox_env: 'docs'
os: 'ubuntu-latest'
- name: 'py39-pytest83-nodist-cover (ubuntu)'
python: '3.9'
toxpython: 'python3.9'
python_arch: 'x64'
tox_env: 'py39-pytest83-nodist-cover'
os: 'ubuntu-latest'
- name: 'py39-pytest83-nodist-cover (windows)'
python: '3.9'
toxpython: 'python3.9'
python_arch: 'x64'
tox_env: 'py39-pytest83-nodist-cover'
os: 'windows-latest'
- name: 'py39-pytest83-nodist-cover (macos)'
python: '3.9'
toxpython: 'python3.9'
python_arch: 'arm64'
tox_env: 'py39-pytest83-nodist-cover'
os: 'macos-latest'
- name: 'py39-pytest83-nodist-nocov (ubuntu)'
python: '3.9'
toxpython: 'python3.9'
python_arch: 'x64'
tox_env: 'py39-pytest83-nodist-nocov'
os: 'ubuntu-latest'
- name: 'py39-pytest83-nodist-nocov (windows)'
python: '3.9'
toxpython: 'python3.9'
python_arch: 'x64'
tox_env: 'py39-pytest83-nodist-nocov'
os: 'windows-latest'
- name: 'py39-pytest83-nodist-nocov (macos)'
python: '3.9'
toxpython: 'python3.9'
python_arch: 'arm64'
tox_env: 'py39-pytest83-nodist-nocov'
os: 'macos-latest'
- name: 'py39-pytest83-xdist-cover (ubuntu)'
python: '3.9'
toxpython: 'python3.9'
python_arch: 'x64'
tox_env: 'py39-pytest83-xdist-cover'
os: 'ubuntu-latest'
- name: 'py39-pytest83-xdist-cover (windows)'
python: '3.9'
toxpython: 'python3.9'
python_arch: 'x64'
tox_env: 'py39-pytest83-xdist-cover'
os: 'windows-latest'
- name: 'py39-pytest83-xdist-cover (macos)'
python: '3.9'
toxpython: 'python3.9'
python_arch: 'arm64'
tox_env: 'py39-pytest83-xdist-cover'
os: 'macos-latest'
- name: 'py39-pytest83-xdist-nocov (ubuntu)'
python: '3.9'
toxpython: 'python3.9'
python_arch: 'x64'
tox_env: 'py39-pytest83-xdist-nocov'
os: 'ubuntu-latest'
- name: 'py39-pytest83-xdist-nocov (windows)'
python: '3.9'
toxpython: 'python3.9'
python_arch: 'x64'
tox_env: 'py39-pytest83-xdist-nocov'
os: 'windows-latest'
- name: 'py39-pytest83-xdist-nocov (macos)'
python: '3.9'
toxpython: 'python3.9'
python_arch: 'arm64'
tox_env: 'py39-pytest83-xdist-nocov'
os: 'macos-latest'
- name: 'py310-pytest83-nodist-cover (ubuntu)'
python: '3.10'
toxpython: 'python3.10'
python_arch: 'x64'
tox_env: 'py310-pytest83-nodist-cover'
os: 'ubuntu-latest'
- name: 'py310-pytest83-nodist-cover (windows)'
python: '3.10'
toxpython: 'python3.10'
python_arch: 'x64'
tox_env: 'py310-pytest83-nodist-cover'
os: 'windows-latest'
- name: 'py310-pytest83-nodist-cover (macos)'
python: '3.10'
toxpython: 'python3.10'
python_arch: 'arm64'
tox_env: 'py310-pytest83-nodist-cover'
os: 'macos-latest'
- name: 'py310-pytest83-nodist-nocov (ubuntu)'
python: '3.10'
toxpython: 'python3.10'
python_arch: 'x64'
tox_env: 'py310-pytest83-nodist-nocov'
os: 'ubuntu-latest'
- name: 'py310-pytest83-nodist-nocov (windows)'
python: '3.10'
toxpython: 'python3.10'
python_arch: 'x64'
tox_env: 'py310-pytest83-nodist-nocov'
os: 'windows-latest'
- name: 'py310-pytest83-nodist-nocov (macos)'
python: '3.10'
toxpython: 'python3.10'
python_arch: 'arm64'
tox_env: 'py310-pytest83-nodist-nocov'
os: 'macos-latest'
- name: 'py310-pytest83-xdist-cover (ubuntu)'
python: '3.10'
toxpython: 'python3.10'
python_arch: 'x64'
tox_env: 'py310-pytest83-xdist-cover'
os: 'ubuntu-latest'
- name: 'py310-pytest83-xdist-cover (windows)'
python: '3.10'
toxpython: 'python3.10'
python_arch: 'x64'
tox_env: 'py310-pytest83-xdist-cover'
os: 'windows-latest'
- name: 'py310-pytest83-xdist-cover (macos)'
python: '3.10'
toxpython: 'python3.10'
python_arch: 'arm64'
tox_env: 'py310-pytest83-xdist-cover'
os: 'macos-latest'
- name: 'py310-pytest83-xdist-nocov (ubuntu)'
python: '3.10'
toxpython: 'python3.10'
python_arch: 'x64'
tox_env: 'py310-pytest83-xdist-nocov'
os: 'ubuntu-latest'
- name: 'py310-pytest83-xdist-nocov (windows)'
python: '3.10'
toxpython: 'python3.10'
python_arch: 'x64'
tox_env: 'py310-pytest83-xdist-nocov'
os: 'windows-latest'
- name: 'py310-pytest83-xdist-nocov (macos)'
python: '3.10'
toxpython: 'python3.10'
python_arch: 'arm64'
tox_env: 'py310-pytest83-xdist-nocov'
os: 'macos-latest'
- name: 'py311-pytest83-nodist-cover (ubuntu)'
python: '3.11'
toxpython: 'python3.11'
python_arch: 'x64'
tox_env: 'py311-pytest83-nodist-cover'
os: 'ubuntu-latest'
- name: 'py311-pytest83-nodist-cover (windows)'
python: '3.11'
toxpython: 'python3.11'
python_arch: 'x64'
tox_env: 'py311-pytest83-nodist-cover'
os: 'windows-latest'
- name: 'py311-pytest83-nodist-cover (macos)'
python: '3.11'
toxpython: 'python3.11'
python_arch: 'arm64'
tox_env: 'py311-pytest83-nodist-cover'
os: 'macos-latest'
- name: 'py311-pytest83-nodist-nocov (ubuntu)'
python: '3.11'
toxpython: 'python3.11'
python_arch: 'x64'
tox_env: 'py311-pytest83-nodist-nocov'
os: 'ubuntu-latest'
- name: 'py311-pytest83-nodist-nocov (windows)'
python: '3.11'
toxpython: 'python3.11'
python_arch: 'x64'
tox_env: 'py311-pytest83-nodist-nocov'
os: 'windows-latest'
- name: 'py311-pytest83-nodist-nocov (macos)'
python: '3.11'
toxpython: 'python3.11'
python_arch: 'arm64'
tox_env: 'py311-pytest83-nodist-nocov'
os: 'macos-latest'
- name: 'py311-pytest83-xdist-cover (ubuntu)'
python: '3.11'
toxpython: 'python3.11'
python_arch: 'x64'
tox_env: 'py311-pytest83-xdist-cover'
os: 'ubuntu-latest'
- name: 'py311-pytest83-xdist-cover (windows)'
python: '3.11'
toxpython: 'python3.11'
python_arch: 'x64'
tox_env: 'py311-pytest83-xdist-cover'
os: 'windows-latest'
- name: 'py311-pytest83-xdist-cover (macos)'
python: '3.11'
toxpython: 'python3.11'
python_arch: 'arm64'
tox_env: 'py311-pytest83-xdist-cover'
os: 'macos-latest'
- name: 'py311-pytest83-xdist-nocov (ubuntu)'
python: '3.11'
toxpython: 'python3.11'
python_arch: 'x64'
tox_env: 'py311-pytest83-xdist-nocov'
os: 'ubuntu-latest'
- name: 'py311-pytest83-xdist-nocov (windows)'
python: '3.11'
toxpython: 'python3.11'
python_arch: 'x64'
tox_env: 'py311-pytest83-xdist-nocov'
os: 'windows-latest'
- name: 'py311-pytest83-xdist-nocov (macos)'
python: '3.11'
toxpython: 'python3.11'
python_arch: 'arm64'
tox_env: 'py311-pytest83-xdist-nocov'
os: 'macos-latest'
- name: 'py312-pytest83-nodist-cover (ubuntu)'
python: '3.12'
toxpython: 'python3.12'
python_arch: 'x64'
tox_env: 'py312-pytest83-nodist-cover'
os: 'ubuntu-latest'
- name: 'py312-pytest83-nodist-cover (windows)'
python: '3.12'
toxpython: 'python3.12'
python_arch: 'x64'
tox_env: 'py312-pytest83-nodist-cover'
os: 'windows-latest'
- name: 'py312-pytest83-nodist-cover (macos)'
python: '3.12'
toxpython: 'python3.12'
python_arch: 'arm64'
tox_env: 'py312-pytest83-nodist-cover'
os: 'macos-latest'
- name: 'py312-pytest83-nodist-nocov (ubuntu)'
python: '3.12'
toxpython: 'python3.12'
python_arch: 'x64'
tox_env: 'py312-pytest83-nodist-nocov'
os: 'ubuntu-latest'
- name: 'py312-pytest83-nodist-nocov (windows)'
python: '3.12'
toxpython: 'python3.12'
python_arch: 'x64'
tox_env: 'py312-pytest83-nodist-nocov'
os: 'windows-latest'
- name: 'py312-pytest83-nodist-nocov (macos)'
python: '3.12'
toxpython: 'python3.12'
python_arch: 'arm64'
tox_env: 'py312-pytest83-nodist-nocov'
os: 'macos-latest'
- name: 'py312-pytest83-xdist-cover (ubuntu)'
python: '3.12'
toxpython: 'python3.12'
python_arch: 'x64'
tox_env: 'py312-pytest83-xdist-cover'
os: 'ubuntu-latest'
- name: 'py312-pytest83-xdist-cover (windows)'
python: '3.12'
toxpython: 'python3.12'
python_arch: 'x64'
tox_env: 'py312-pytest83-xdist-cover'
os: 'windows-latest'
- name: 'py312-pytest83-xdist-cover (macos)'
python: '3.12'
toxpython: 'python3.12'
python_arch: 'arm64'
tox_env: 'py312-pytest83-xdist-cover'
os: 'macos-latest'
- name: 'py312-pytest83-xdist-nocov (ubuntu)'
python: '3.12'
toxpython: 'python3.12'
python_arch: 'x64'
tox_env: 'py312-pytest83-xdist-nocov'
os: 'ubuntu-latest'
- name: 'py312-pytest83-xdist-nocov (windows)'
python: '3.12'
toxpython: 'python3.12'
python_arch: 'x64'
tox_env: 'py312-pytest83-xdist-nocov'
os: 'windows-latest'
- name: 'py312-pytest83-xdist-nocov (macos)'
python: '3.12'
toxpython: 'python3.12'
python_arch: 'arm64'
tox_env: 'py312-pytest83-xdist-nocov'
os: 'macos-latest'
- name: 'pypy39-pytest83-nodist-cover (ubuntu)'
python: 'pypy-3.9'
toxpython: 'pypy3.9'
python_arch: 'x64'
tox_env: 'pypy39-pytest83-nodist-cover'
os: 'ubuntu-latest'
- name: 'pypy39-pytest83-nodist-cover (windows)'
python: 'pypy-3.9'
toxpython: 'pypy3.9'
python_arch: 'x64'
tox_env: 'pypy39-pytest83-nodist-cover'
os: 'windows-latest'
- name: 'pypy39-pytest83-nodist-cover (macos)'
python: 'pypy-3.9'
toxpython: 'pypy3.9'
python_arch: 'arm64'
tox_env: 'pypy39-pytest83-nodist-cover'
os: 'macos-latest'
- name: 'pypy39-pytest83-nodist-nocov (ubuntu)'
python: 'pypy-3.9'
toxpython: 'pypy3.9'
python_arch: 'x64'
tox_env: 'pypy39-pytest83-nodist-nocov'
os: 'ubuntu-latest'
- name: 'pypy39-pytest83-nodist-nocov (windows)'
python: 'pypy-3.9'
toxpython: 'pypy3.9'
python_arch: 'x64'
tox_env: 'pypy39-pytest83-nodist-nocov'
os: 'windows-latest'
- name: 'pypy39-pytest83-nodist-nocov (macos)'
python: 'pypy-3.9'
toxpython: 'pypy3.9'
python_arch: 'arm64'
tox_env: 'pypy39-pytest83-nodist-nocov'
os: 'macos-latest'
- name: 'pypy39-pytest83-xdist-cover (ubuntu)'
python: 'pypy-3.9'
toxpython: 'pypy3.9'
python_arch: 'x64'
tox_env: 'pypy39-pytest83-xdist-cover'
os: 'ubuntu-latest'
- name: 'pypy39-pytest83-xdist-cover (windows)'
python: 'pypy-3.9'
toxpython: 'pypy3.9'
python_arch: 'x64'
tox_env: 'pypy39-pytest83-xdist-cover'
os: 'windows-latest'
- name: 'pypy39-pytest83-xdist-cover (macos)'
python: 'pypy-3.9'
toxpython: 'pypy3.9'
python_arch: 'arm64'
tox_env: 'pypy39-pytest83-xdist-cover'
os: 'macos-latest'
- name: 'pypy39-pytest83-xdist-nocov (ubuntu)'
python: 'pypy-3.9'
toxpython: 'pypy3.9'
python_arch: 'x64'
tox_env: 'pypy39-pytest83-xdist-nocov'
os: 'ubuntu-latest'
- name: 'pypy39-pytest83-xdist-nocov (windows)'
python: 'pypy-3.9'
toxpython: 'pypy3.9'
python_arch: 'x64'
tox_env: 'pypy39-pytest83-xdist-nocov'
os: 'windows-latest'
- name: 'pypy39-pytest83-xdist-nocov (macos)'
python: 'pypy-3.9'
toxpython: 'pypy3.9'
python_arch: 'arm64'
tox_env: 'pypy39-pytest83-xdist-nocov'
os: 'macos-latest'
- name: 'pypy310-pytest83-nodist-cover (ubuntu)'
python: 'pypy-3.10'
toxpython: 'pypy3.10'
python_arch: 'x64'
tox_env: 'pypy310-pytest83-nodist-cover'
os: 'ubuntu-latest'
- name: 'pypy310-pytest83-nodist-cover (windows)'
python: 'pypy-3.10'
toxpython: 'pypy3.10'
python_arch: 'x64'
tox_env: 'pypy310-pytest83-nodist-cover'
os: 'windows-latest'
- name: 'pypy310-pytest83-nodist-cover (macos)'
python: 'pypy-3.10'
toxpython: 'pypy3.10'
python_arch: 'arm64'
tox_env: 'pypy310-pytest83-nodist-cover'
os: 'macos-latest'
- name: 'pypy310-pytest83-nodist-nocov (ubuntu)'
python: 'pypy-3.10'
toxpython: 'pypy3.10'
python_arch: 'x64'
tox_env: 'pypy310-pytest83-nodist-nocov'
os: 'ubuntu-latest'
- name: 'pypy310-pytest83-nodist-nocov (windows)'
python: 'pypy-3.10'
toxpython: 'pypy3.10'
python_arch: 'x64'
tox_env: 'pypy310-pytest83-nodist-nocov'
os: 'windows-latest'
- name: 'pypy310-pytest83-nodist-nocov (macos)'
python: 'pypy-3.10'
toxpython: 'pypy3.10'
python_arch: 'arm64'
tox_env: 'pypy310-pytest83-nodist-nocov'
os: 'macos-latest'
- name: 'pypy310-pytest83-xdist-cover (ubuntu)'
python: 'pypy-3.10'
toxpython: 'pypy3.10'
python_arch: 'x64'
tox_env: 'pypy310-pytest83-xdist-cover'
os: 'ubuntu-latest'
- name: 'pypy310-pytest83-xdist-cover (windows)'
python: 'pypy-3.10'
toxpython: 'pypy3.10'
python_arch: 'x64'
tox_env: 'pypy310-pytest83-xdist-cover'
os: 'windows-latest'
- name: 'pypy310-pytest83-xdist-cover (macos)'
python: 'pypy-3.10'
toxpython: 'pypy3.10'
python_arch: 'arm64'
tox_env: 'pypy310-pytest83-xdist-cover'
os: 'macos-latest'
- name: 'pypy310-pytest83-xdist-nocov (ubuntu)'
python: 'pypy-3.10'
toxpython: 'pypy3.10'
python_arch: 'x64'
tox_env: 'pypy310-pytest83-xdist-nocov'
os: 'ubuntu-latest'
- name: 'pypy310-pytest83-xdist-nocov (windows)'
python: 'pypy-3.10'
toxpython: 'pypy3.10'
python_arch: 'x64'
tox_env: 'pypy310-pytest83-xdist-nocov'
os: 'windows-latest'
- name: 'pypy310-pytest83-xdist-nocov (macos)'
python: 'pypy-3.10'
toxpython: 'pypy3.10'
python_arch: 'arm64'
tox_env: 'pypy310-pytest83-xdist-nocov'
os: 'macos-latest'
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python }}
architecture: ${{ matrix.python_arch }}
- name: install dependencies
run: |
python -mpip install --progress-bar=off -r ci/requirements.txt
virtualenv --version
pip --version
tox --version
pip list --format=freeze
- name: test
env:
TOXPYTHON: '${{ matrix.toxpython }}'
run: >
tox -e ${{ matrix.tox_env }} -v
finish:
needs: test
if: ${{ always() }}
runs-on: ubuntu-latest
steps:
- uses: coverallsapp/github-action@v2
with:
parallel-finished: true
- uses: codecov/codecov-action@v3
with:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
ionelmc-pytest-benchmark-5edf251/.gitignore 0000664 0000000 0000000 00000001246 14710416711 0021030 0 ustar 00root root 0000000 0000000 *.py[cod]
__pycache__
# Temp files
.*.sw[po]
*~
*.bak
.DS_Store
# C extensions
*.so
# Build and package files
*.egg
*.egg-info
.bootstrap
.build
.cache
.eggs
.env
.installed.cfg
.ve
bin
build
develop-eggs
dist
eggs
lib
lib64
parts
pip-wheel-metadata/
pyvenv*/
sdist
var
venv*/
wheelhouse
# Installer logs
pip-log.txt
# Unit test / coverage reports
.benchmarks
.coverage
.coverage.*
.pytest
.pytest_cache/
.tox
coverage.xml
htmlcov
nosetests.xml
# Translations
*.mo
# Buildout
.mr.developer.cfg
# IDE project files
*.iml
*.komodoproject
.idea
.project
.pydevproject
.vscode
# Complexity
output/*.html
output/*/index.html
# Sphinx
docs/_build
# Mypy Cache
.mypy_cache/
ionelmc-pytest-benchmark-5edf251/.pre-commit-config.yaml 0000664 0000000 0000000 00000001202 14710416711 0023311 0 ustar 00root root 0000000 0000000 # To install the git pre-commit hooks run:
# pre-commit install --install-hooks
# To update the versions:
# pre-commit autoupdate
exclude: '^(\.tox|ci/templates|\.bumpversion\.cfg)(/|$)'
# Note the order is intentional to avoid multiple passes of the hooks
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.7.1
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix, --show-fixes, --unsafe-fixes]
- id: ruff-format
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: debug-statements
ionelmc-pytest-benchmark-5edf251/.readthedocs.yml 0000664 0000000 0000000 00000000432 14710416711 0022122 0 ustar 00root root 0000000 0000000 # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
version: 2
sphinx:
configuration: docs/conf.py
formats: all
build:
os: ubuntu-22.04
tools:
python: "3"
python:
install:
- requirements: docs/requirements.txt
- method: pip
path: .
ionelmc-pytest-benchmark-5edf251/AUTHORS.rst 0000664 0000000 0000000 00000003125 14710416711 0020715 0 ustar 00root root 0000000 0000000
Authors
=======
* Ionel Cristian Mărieș - https://blog.ionelmc.ro
* Marc Abramowitz - http://marc-abramowitz.com
* Dave Collins - https://github.com/thedavecollins
* Stefan Krastanov - http://blog.krastanov.org
* Thomas Waldmann - https://github.com/ThomasWaldmann
* Antonio Cuni - http://antocuni.eu
* Petr Šebek - https://github.com/Artimi
* Swen Kooij - https://github.com/Photonios
* "varac" - https://github.com/varac
* Andre Bianchi - https://github.com/drebs
* Jeremy Dobbins-Bucklad - https://github.com/jad-b
* Alexey Popravka - https://github.com/popravich
* Ken Crowell - https://github.com/oeuftete
* Matthew Feickert - https://github.com/matthewfeickert
* Julien Nicoulaud - https://github.com/nicoulaj
* Pablo Aguiar - https://github.com/scorphus
* Alex Ford - https://github.com/asford
* Francesco Ballarin - https://github.com/francesco-ballarin
* Lincoln de Sousa - https://github.com/clarete
* Jose Eduardo - https://github.com/JoseKilo
* Ofek Lev - https://github.com/ofek
* Anton Lodder - https://github.com/AnjoMan
* Alexander Duryagin - https://github.com/daa
* Stanislav Levin - https://github.com/stanislavlevin
* Grygorii Iermolenko - https://github.com/gyermolenko
* Jonathan Simon Prates - https://github.com/jonathansp
* Miroslav Šedivý - https://github.com/eumiro
* Dimitris Rozakis - https://github.com/dimrozakis
* Friedrich Delgado - https://github.com/TauPan
* Sam James - https://github.com/thesamesam
* Florian Bruhin - https://github.com/The-Compiler
* Johnny Huang - https://github.com/jnhyperion
* Tony Kuo - https://github.com/tony92151
* Eugeniy - https://github.com/zcoder
ionelmc-pytest-benchmark-5edf251/CHANGELOG.rst 0000664 0000000 0000000 00000044316 14710416711 0021066 0 ustar 00root root 0000000 0000000
Changelog
=========
5.1.0 (2024-10-30)
------------------
* Fixed broken hooks handling on pytest 8.1 or later (the ``TypeError: import_path() missing 1 required keyword-only argument: 'consider_namespace_packages'`` issue).
Unfortunately this sets the minimum supported pytest version to 8.1.
5.0.1 (2024-10-30)
------------------
* Fixed bad fixture check that broke down then `nbmake `_ was enabled.
5.0.0 (2024-10-29)
------------------
* Dropped support for now EOL Python 3.8. Also moved tests suite to only test the latest pytest versions (8.3.x).
* Fix generate parametrize tests benchmark csv report errors (issue `#268 `_).
Contributed by Johnny Huang in `#269 `_.
* Added the ``--benchmark-time-unit`` cli option for overriding the measurement unit used for display.
Contributed by Tony Kuo in `#257 `_.
* Fixes spelling in some help texts.
Contributed by Eugeniy in `#267 `_.
* Added new cprofile options:
- ``--benchmark-cprofile-loops=LOOPS`` - previously profiling only ran the function once, this allow customization.
- ``--benchmark-cprofile-top=COUNT`` - allows showing more rows.
- ``--benchmark-cprofile-dump=[FILENAME-PREFIX]`` - allows saving to a file (that you can load in `snakeviz `_, `RunSnakeRun `_ or other tools).
* Removed hidden dependency on `py.path `_ (replaced with pathlib).
4.0.0 (2022-10-26)
------------------
* Dropped support for legacy Pythons (2.7, 3.6 or older).
* Switched CI to GitHub Actions.
* Removed dependency on the ``py`` library (that was not properly specified as a dependency anyway).
* Fix skipping test in `test_utils.py` if appropriate VCS not available. Also fix typo.
Contributed by Sam James in `#211 `_.
* Added support for pytest 7.2.0 by using ``pytest.hookimpl`` and ``pytest.hookspec`` to configure hooks.
Contributed by Florian Bruhin in `#224 `_.
* Now no save is attempted if ``--benchmark-disable`` is used.
Fixes `#205 `_.
Contributed by Friedrich Delgado in `#207 `_.
3.4.1 (2021-04-17)
------------------
* Republished with updated changelog.
I intended to publish a ``3.3.0`` release but I messed it up because bumpversion doesn't work well with pre-commit
apparently... thus ``3.4.0`` was set in by accident.
3.4.0 (2021-04-17)
------------------
* Disable progress indication unless ``--benchmark-verbose`` is used.
Contributed by Dimitris Rozakis in `#149 `_.
* Added Python 3.9, dropped Python 3.5.
Contributed by Miroslav Šedivý in `#189 `_.
* Changed the "cpu" data in the json output to include everything that cpuinfo outputs, for better or worse as cpuinfo 6.0 changed some
fields. Users should now ensure they are an adequate cpuinfo package installed.
**MAY BE BACKWARDS INCOMPATIBLE**
* Changed behavior of ``--benchmark-skip`` and ``--benchmark-only`` to apply early in the collection phase.
This means skipped tests won't make pytest run fixtures for said tests unnecessarily, but unfortunately this also means
the skipping behavior will be applied to any tests that requires a "benchmark" fixture, regardless if it would come from pytest-benchmark
or not.
**MAY BE BACKWARDS INCOMPATIBLE**
* Added ``--benchmark-quiet`` - option to disable reporting and other information output.
* Squelched unnecessary warning when ``--benchmark-disable`` and save options are used.
Fixes `#199 `_.
* ``PerformanceRegression`` exception no longer inherits ``pytest.UsageError`` (apparently a *final* class).
3.2.3 (2020-01-10)
------------------
* Fixed "already-imported" pytest warning. Contributed by Jonathan Simon Prates in
`#151 `_.
* Fixed breakage that occurs when benchmark is disabled while using cprofile feature (by disabling cprofile too).
* Dropped Python 3.4 from the test suite and updated test deps.
* Fixed ``pytest_benchmark.utils.clonefunc`` to work on Python 3.8.
3.2.2 (2017-01-12)
------------------
* Added support for pytest items without funcargs. Fixes interoperability with other pytest plugins like pytest-flake8.
3.2.1 (2017-01-10)
------------------
* Updated changelog entries for 3.2.0. I made the release for pytest-cov on the same day and thought I updated the
changelogs for both plugins. Alas, I only updated pytest-cov.
* Added missing version constraint change. Now pytest >= 3.8 is required (due to pytest 4.1 support).
* Fixed couple CI/test issues.
* Fixed broken ``pytest_benchmark.__version__``.
3.2.0 (2017-01-07)
------------------
* Added support for simple ``trial`` x-axis histogram label. Contributed by Ken Crowell in
`#95 `_).
* Added support for Pytest 3.3+, Contributed by Julien Nicoulaud in
`#103 `_.
* Added support for Pytest 4.0. Contributed by Pablo Aguiar in
`#129 `_ and
`#130 `_.
* Added support for Pytest 4.1.
* Various formatting, spelling and documentation fixes. Contributed by
Ken Crowell, Ofek Lev, Matthew Feickert, Jose Eduardo, Anton Lodder, Alexander Duryagin and Grygorii Iermolenko in
`#97 `_,
`#105 `_,
`#110 `_,
`#111 `_,
`#115 `_,
`#123 `_,
`#131 `_ and
`#140 `_.
* Fixed broken ``pytest_benchmark_update_machine_info`` hook. Contributed by Alex Ford in
`#109 `_.
* Fixed bogus xdist warning when using ``--benchmark-disable``. Contributed by Francesco Ballarin in
`#113 `_.
* Added support for pathlib2. Contributed by Lincoln de Sousa in
`#114 `_.
* Changed handling so you can use ``--benchmark-skip`` and ``--benchmark-only``, with the later having priority.
Contributed by Ofek Lev in
`#116 `_.
* Fixed various CI/testing issues.
Contributed by Stanislav Levin in
`#134 `_,
`#136 `_ and
`#138 `_.
3.1.1 (2017-07-26)
------------------
* Fixed loading data from old json files (missing ``ops`` field, see
`#81 `_).
* Fixed regression on broken SCM (see
`#82 `_).
3.1.0 (2017-07-21)
------------------
* Added "operations per second" (``ops`` field in ``Stats``) metric --
shows the call rate of code being tested. Contributed by Alexey Popravka in
`#78 `_.
* Added a ``time`` field in ``commit_info``. Contributed by "varac" in
`#71 `_.
* Added a ``author_time`` field in ``commit_info``. Contributed by "varac" in
`#75 `_.
* Fixed the leaking of credentials by masking the URL printed when storing
data to elasticsearch.
* Added a ``--benchmark-netrc`` option to use credentials from a netrc file when
storing data to elasticsearch. Both contributed by Andre Bianchi in
`#73 `_.
* Fixed docs on hooks. Contributed by Andre Bianchi in `#74 `_.
* Remove ``git`` and ``hg`` as system dependencies when guessing the project name.
3.1.0a2 (2017-03-27)
--------------------
* ``machine_info`` now contains more detailed information about the CPU, in
particular the exact model. Contributed by Antonio Cuni in `#61 `_.
* Added ``benchmark.extra_info``, which you can use to save arbitrary stuff in
the JSON. Contributed by Antonio Cuni in the same PR as above.
* Fix support for latest PyGal version (histograms). Contributed by Swen Kooij in
`#68 `_.
* Added support for getting ``commit_info`` when not running in the root of the repository. Contributed by Vara Canero in
`#69 `_.
* Added short form for ``--storage``/``--verbose`` options in CLI.
* Added an alternate ``pytest-benchmark`` CLI bin (in addition to ``py.test-benchmark``) to match the madness in pytest.
* Fix some issues with ``--help`` in CLI.
* Improved git remote parsing (for ``commit_info`` in JSON outputs).
* Fixed default value for ``--benchmark-columns``.
* Fixed comparison mode (loading was done too late).
* Remove the project name from the autosave name. This will get the old brief naming from 3.0 back.
3.1.0a1 (2016-10-29)
--------------------
* Added ``--benchmark-columns`` command line option. It selects what columns are displayed in the result table. Contributed by
Antonio Cuni in `#34 `_.
* Added support for grouping by specific test parametrization (``--benchmark-group-by=param:NAME`` where ``NAME`` is your
param name). Contributed by Antonio Cuni in `#37 `__.
* Added support for ``name`` or ``fullname`` in ``--benchmark-sort``.
Contributed by Antonio Cuni in `#37 `_.
* Changed signature for ``pytest_benchmark_generate_json`` hook to take 2 new arguments: ``machine_info`` and ``commit_info``.
* Changed ``--benchmark-histogram`` to plot groups instead of name-matching runs.
* Changed ``--benchmark-histogram`` to plot exactly what you compared against. Now it's ``1:1`` with the compare feature.
* Changed ``--benchmark-compare`` to allow globs. You can compare against all the previous runs now.
* Changed ``--benchmark-group-by`` to allow multiple values separated by comma.
Example: ``--benchmark-group-by=param:foo,param:bar``
* Added a command line tool to compare previous data: ``py.test-benchmark``. It has two commands:
* ``list`` - Lists all the available files.
* ``compare`` - Displays result tables. Takes options:
* ``--sort=COL``
* ``--group-by=LABEL``
* ``--columns=LABELS``
* ``--histogram=[FILENAME-PREFIX]``
* Added ``--benchmark-cprofile`` that profiles last run of benchmarked function. Contributed by Petr Šebek.
* Changed ``--benchmark-storage`` so it now allows elasticsearch storage. It allows to store data to elasticsearch instead to
json files. Contributed by Petr Šebek in `#58 `_.
3.0.0 (2015-11-08)
------------------
* Improved ``--help`` text for ``--benchmark-histogram``, ``--benchmark-save`` and ``--benchmark-autosave``.
* Benchmarks that raised exceptions during test now have special highlighting in result table (red background).
* Benchmarks that raised exceptions are not included in the saved data anymore (you can still get the old behavior back
by implementing ``pytest_benchmark_generate_json`` in your ``conftest.py``).
* The plugin will use pytest's warning system for warnings. There are 2 categories: ``WBENCHMARK-C`` (compare mode
issues) and ``WBENCHMARK-U`` (usage issues).
* The red warnings are only shown if ``--benchmark-verbose`` is used. They still will be always be shown in the
pytest-warnings section.
* Using the benchmark fixture more than one time is disallowed (will raise exception).
* Not using the benchmark fixture (but requiring it) will issue a warning (``WBENCHMARK-U1``).
3.0.0rc1 (2015-10-25)
---------------------
* Changed ``--benchmark-warmup`` to take optional value and automatically activate on PyPy (default value is ``auto``).
**MAY BE BACKWARDS INCOMPATIBLE**
* Removed the version check in compare mode (previously there was a warning if current version is lower than what's in
the file).
3.0.0b3 (2015-10-22)
---------------------
* Changed how comparison is displayed in the result table. Now previous runs are shown as normal runs and names get a
special suffix indicating the origin. Eg: "test_foobar (NOW)" or "test_foobar (0123)".
* Fixed sorting in the result table. Now rows are sorted by the sort column, and then by name.
* Show the plugin version in the header section.
* Moved the display of default options in the header section.
3.0.0b2 (2015-10-17)
---------------------
* Add a ``--benchmark-disable`` option. It's automatically activated when xdist is on
* When xdist is on or ``statistics`` can't be imported then ``--benchmark-disable`` is automatically activated (instead
of ``--benchmark-skip``). **BACKWARDS INCOMPATIBLE**
* Replace the deprecated ``__multicall__`` with the new hookwrapper system.
* Improved description for ``--benchmark-max-time``.
3.0.0b1 (2015-10-13)
--------------------
* Tests are sorted alphabetically in the results table.
* Failing to import ``statistics`` doesn't create hard failures anymore. Benchmarks are automatically skipped if import
failure occurs. This would happen on Python 3.2 (or earlier Python 3).
3.0.0a4 (2015-10-08)
--------------------
* Changed how failures to get commit info are handled: now they are soft failures. Previously it made the whole
test suite fail, just because you didn't have ``git/hg`` installed.
3.0.0a3 (2015-10-02)
--------------------
* Added progress indication when computing stats.
3.0.0a2 (2015-09-30)
--------------------
* Fixed accidental output capturing caused by capturemanager misuse.
3.0.0a1 (2015-09-13)
--------------------
* Added JSON report saving (the ``--benchmark-json`` command line arguments). Based on initial work from Dave Collins in
`#8 `_.
* Added benchmark data storage(the ``--benchmark-save`` and ``--benchmark-autosave`` command line arguments).
* Added comparison to previous runs (the ``--benchmark-compare`` command line argument).
* Added performance regression checks (the ``--benchmark-compare-fail`` command line argument).
* Added possibility to group by various parts of test name (the ``--benchmark-compare-group-by`` command line argument).
* Added historical plotting (the ``--benchmark-histogram`` command line argument).
* Added option to fine tune the calibration (the ``--benchmark-calibration-precision`` command line argument and
``calibration_precision`` marker option).
* Changed ``benchmark_weave`` to no longer be a context manager. Cleanup is performed automatically.
**BACKWARDS INCOMPATIBLE**
* Added ``benchmark.weave`` method (alternative to ``benchmark_weave`` fixture).
* Added new hooks to allow customization:
* ``pytest_benchmark_generate_machine_info(config)``
* ``pytest_benchmark_update_machine_info(config, info)``
* ``pytest_benchmark_generate_commit_info(config)``
* ``pytest_benchmark_update_commit_info(config, info)``
* ``pytest_benchmark_group_stats(config, benchmarks, group_by)``
* ``pytest_benchmark_generate_json(config, benchmarks, include_data)``
* ``pytest_benchmark_update_json(config, benchmarks, output_json)``
* ``pytest_benchmark_compare_machine_info(config, benchmarksession, machine_info, compared_benchmark)``
* Changed the timing code to:
* Tracers are automatically disabled when running the test function (like coverage tracers).
* Fixed an issue with calibration code getting stuck.
* Added ``pedantic mode`` via ``benchmark.pedantic()``. This mode disables calibration and allows a setup function.
2.5.0 (2015-06-20)
------------------
* Improved test suite a bit (not using ``cram`` anymore).
* Improved help text on the ``--benchmark-warmup`` option.
* Made ``warmup_iterations`` available as a marker argument (eg: ``@pytest.mark.benchmark(warmup_iterations=1234)``).
* Fixed ``--benchmark-verbose``'s printouts to work properly with output capturing.
* Changed how warmup iterations are computed (now number of total iterations is used, instead of just the rounds).
* Fixed a bug where calibration would run forever.
* Disabled red/green coloring (it was kinda random) when there's a single test in the results table.
2.4.1 (2015-03-16)
------------------
* Fix regression, plugin was raising ``ValueError: no option named 'dist'`` when xdist wasn't installed.
2.4.0 (2015-03-12)
------------------
* Add a ``benchmark_weave`` experimental fixture.
* Fix internal failures when ``xdist`` plugin is active.
* Automatically disable benchmarks if ``xdist`` is active.
2.3.0 (2014-12-27)
------------------
* Moved the warmup in the calibration phase. Solves issues with benchmarking on PyPy.
Added a ``--benchmark-warmup-iterations`` option to fine-tune that.
2.2.0 (2014-12-26)
------------------
* Make the default rounds smaller (so that variance is more accurate).
* Show the defaults in the ``--help`` section.
2.1.0 (2014-12-20)
------------------
* Simplify the calibration code so that the round is smaller.
* Add diagnostic output for calibration code (``--benchmark-verbose``).
2.0.0 (2014-12-19)
------------------
* Replace the context-manager based API with a simple callback interface. **BACKWARDS INCOMPATIBLE**
* Implement timer calibration for precise measurements.
1.0.0 (2014-12-15)
------------------
* Use a precise default timer for PyPy.
? (?)
-----
* README and styling fixes. Contributed by Marc Abramowitz in `#4 `_.
* Lots of wild changes.
ionelmc-pytest-benchmark-5edf251/CONTRIBUTING.rst 0000664 0000000 0000000 00000004513 14710416711 0021501 0 ustar 00root root 0000000 0000000 ============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
Bug reports
===========
When `reporting a bug `_ please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Documentation improvements
==========================
pytest-benchmark could always use more documentation, whether as part of the
official pytest-benchmark docs, in docstrings, or even on the web in blog posts,
articles, and such.
Feature requests and feedback
=============================
The best way to send feedback is to file an issue at https://github.com/ionelmc/pytest-benchmark/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that code contributions are welcome :)
Development
===========
To set up `pytest-benchmark` for local development:
1. Fork `pytest-benchmark `_
(look for the "Fork" button).
2. Clone your fork locally::
git clone git@github.com:YOURGITHUBNAME/pytest-benchmark.git
3. Create a branch for local development::
git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
4. When you're done making changes run all the checks and docs builder with one command::
tox
5. Commit your changes and push your branch to GitHub::
git add .
git commit -m "Your detailed description of your changes."
git push origin name-of-your-bugfix-or-feature
6. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
If you need some code review or feedback while you're developing the code just make the pull request.
For merging, you should:
1. Include passing tests (run ``tox``).
2. Update documentation when there's new API, functionality etc.
3. Add a note to ``CHANGELOG.rst`` about the changes.
4. Add yourself to ``AUTHORS.rst``.
Tips
----
To run a subset of tests::
tox -e envname -- pytest -k test_myfeature
To run all the test environments in *parallel*::
tox -p auto
ionelmc-pytest-benchmark-5edf251/LICENSE 0000664 0000000 0000000 00000002462 14710416711 0020046 0 ustar 00root root 0000000 0000000 BSD 2-Clause License
Copyright (c) 2014-2023, Ionel Cristian Mărieș. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
ionelmc-pytest-benchmark-5edf251/MANIFEST.in 0000664 0000000 0000000 00000000653 14710416711 0020577 0 ustar 00root root 0000000 0000000 graft docs
graft src
graft ci
graft tests
include .bumpversion.cfg
include .cookiecutterrc
include .coveragerc
include .editorconfig
include .github/workflows/github-actions.yml
include .pre-commit-config.yaml
include .readthedocs.yml
include pytest.ini
include tox.ini
include AUTHORS.rst
include CHANGELOG.rst
include CONTRIBUTING.rst
include LICENSE
include README.rst
global-exclude *.py[cod] __pycache__/* *.so *.dylib
ionelmc-pytest-benchmark-5edf251/README.rst 0000664 0000000 0000000 00000015067 14710416711 0020535 0 ustar 00root root 0000000 0000000 ========
Overview
========
.. start-badges
.. list-table::
:stub-columns: 1
* - docs
- |docs| |gitter|
* - tests
- |github-actions| |coveralls| |codecov|
* - package
- |version| |wheel| |supported-versions| |supported-implementations| |commits-since|
.. |docs| image:: https://readthedocs.org/projects/pytest-benchmark/badge/?style=flat
:target: https://readthedocs.org/projects/pytest-benchmark/
:alt: Documentation Status
.. |github-actions| image:: https://github.com/ionelmc/pytest-benchmark/actions/workflows/github-actions.yml/badge.svg
:alt: GitHub Actions Build Status
:target: https://github.com/ionelmc/pytest-benchmark/actions
.. |gitter| image:: https://badges.gitter.im/ionelmc/pytest-benchmark.svg
:alt: Join the chat at https://gitter.im/ionelmc/pytest-benchmark
:target: https://gitter.im/ionelmc/pytest-benchmark
.. |coveralls| image:: https://coveralls.io/repos/github/ionelmc/pytest-benchmark/badge.svg?branch=main
:alt: Coverage Status
:target: https://coveralls.io/github/ionelmc/pytest-benchmark?branch=main
.. |codecov| image:: https://codecov.io/gh/ionelmc/pytest-benchmark/branch/main/graphs/badge.svg?branch=main
:alt: Coverage Status
:target: https://app.codecov.io/github/ionelmc/pytest-benchmark
.. |version| image:: https://img.shields.io/pypi/v/pytest-benchmark.svg
:alt: PyPI Package latest release
:target: https://pypi.org/project/pytest-benchmark
.. |wheel| image:: https://img.shields.io/pypi/wheel/pytest-benchmark.svg
:alt: PyPI Wheel
:target: https://pypi.org/project/pytest-benchmark
.. |supported-versions| image:: https://img.shields.io/pypi/pyversions/pytest-benchmark.svg
:alt: Supported versions
:target: https://pypi.org/project/pytest-benchmark
.. |supported-implementations| image:: https://img.shields.io/pypi/implementation/pytest-benchmark.svg
:alt: Supported implementations
:target: https://pypi.org/project/pytest-benchmark
.. |commits-since| image:: https://img.shields.io/github/commits-since/ionelmc/pytest-benchmark/v5.1.0.svg
:alt: Commits since latest release
:target: https://github.com/ionelmc/pytest-benchmark/compare/v5.1.0...main
.. end-badges
A ``pytest`` fixture for benchmarking code. It will group the tests into rounds that are calibrated to the chosen
timer.
See calibration_ and FAQ_.
* Free software: BSD 2-Clause License
Installation
============
::
pip install pytest-benchmark
Documentation
=============
For latest release: `pytest-benchmark.readthedocs.org/en/stable `_.
For master branch (may include documentation fixes): `pytest-benchmark.readthedocs.io/en/latest `_.
Examples
========
But first, a prologue:
This plugin tightly integrates into pytest. To use this effectively you should know a thing or two about pytest first.
Take a look at the `introductory material `_
or watch `talks `_.
Few notes:
* This plugin benchmarks functions and only that. If you want to measure block of code
or whole programs you will need to write a wrapper function.
* In a test you can only benchmark one function. If you want to benchmark many functions write more tests or
use `parametrization `_.
* To run the benchmarks you simply use `pytest` to run your "tests". The plugin will automatically do the
benchmarking and generate a result table. Run ``pytest --help`` for more details.
This plugin provides a `benchmark` fixture. This fixture is a callable object that will benchmark any function passed
to it.
Example:
.. code-block:: python
def something(duration=0.000001):
"""
Function that needs some serious benchmarking.
"""
time.sleep(duration)
# You may return anything you want, like the result of a computation
return 123
def test_my_stuff(benchmark):
# benchmark something
result = benchmark(something)
# Extra code, to verify that the run completed correctly.
# Sometimes you may want to check the result, fast functions
# are no good if they return incorrect results :-)
assert result == 123
You can also pass extra arguments:
.. code-block:: python
def test_my_stuff(benchmark):
benchmark(time.sleep, 0.02)
Or even keyword arguments:
.. code-block:: python
def test_my_stuff(benchmark):
benchmark(time.sleep, duration=0.02)
Another pattern seen in the wild, that is not recommended for micro-benchmarks (very fast code) but may be convenient:
.. code-block:: python
def test_my_stuff(benchmark):
@benchmark
def something(): # unnecessary function call
time.sleep(0.000001)
A better way is to just benchmark the final function:
.. code-block:: python
def test_my_stuff(benchmark):
benchmark(time.sleep, 0.000001) # way more accurate results!
If you need to do fine control over how the benchmark is run (like a `setup` function, exact control of `iterations` and
`rounds`) there's a special mode - pedantic_:
.. code-block:: python
def my_special_setup():
...
def test_with_setup(benchmark):
benchmark.pedantic(something, setup=my_special_setup, args=(1, 2, 3), kwargs={'foo': 'bar'}, iterations=10, rounds=100)
Screenshots
-----------
Normal run:
.. image:: https://github.com/ionelmc/pytest-benchmark/raw/master/docs/screenshot.png
:alt: Screenshot of pytest summary
Compare mode (``--benchmark-compare``):
.. image:: https://github.com/ionelmc/pytest-benchmark/raw/master/docs/screenshot-compare.png
:alt: Screenshot of pytest summary in compare mode
Histogram (``--benchmark-histogram``):
.. image:: https://cdn.rawgit.com/ionelmc/pytest-benchmark/94860cc8f47aed7ba4f9c7e1380c2195342613f6/docs/sample-tests_test_normal.py_test_xfast_parametrized%5B0%5D.svg
:alt: Histogram sample
..
Also, it has `nice tooltips `_.
Development
===========
To run the all tests run::
tox
Credits
=======
* Timing code and ideas taken from: https://github.com/vstinner/misc/blob/34d3128468e450dad15b6581af96a790f8bd58ce/python/benchmark.py
.. _FAQ: http://pytest-benchmark.readthedocs.org/en/latest/faq.html
.. _calibration: http://pytest-benchmark.readthedocs.org/en/latest/calibration.html
.. _pedantic: http://pytest-benchmark.readthedocs.org/en/latest/pedantic.html
ionelmc-pytest-benchmark-5edf251/ci/ 0000775 0000000 0000000 00000000000 14710416711 0017430 5 ustar 00root root 0000000 0000000 ionelmc-pytest-benchmark-5edf251/ci/bootstrap.py 0000775 0000000 0000000 00000005463 14710416711 0022032 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
import os
import pathlib
import subprocess
import sys
base_path: pathlib.Path = pathlib.Path(__file__).resolve().parent.parent
templates_path = base_path / 'ci' / 'templates'
def check_call(args):
print('+', *args)
subprocess.check_call(args)
def exec_in_env():
env_path = base_path / '.tox' / 'bootstrap'
if sys.platform == 'win32':
bin_path = env_path / 'Scripts'
else:
bin_path = env_path / 'bin'
if not env_path.exists():
import subprocess
print(f'Making bootstrap env in: {env_path} ...')
try:
check_call([sys.executable, '-m', 'venv', env_path])
except subprocess.CalledProcessError:
try:
check_call([sys.executable, '-m', 'virtualenv', env_path])
except subprocess.CalledProcessError:
check_call(['virtualenv', env_path])
print('Installing `jinja2` into bootstrap environment...')
check_call([bin_path / 'pip', 'install', 'jinja2', 'tox'])
python_executable = bin_path / 'python'
if not python_executable.exists():
python_executable = python_executable.with_suffix('.exe')
print(f'Re-executing with: {python_executable}')
print('+ exec', python_executable, __file__, '--no-env')
os.execv(python_executable, [python_executable, __file__, '--no-env'])
def main():
import jinja2
print(f'Project path: {base_path}')
jinja = jinja2.Environment(
loader=jinja2.FileSystemLoader(str(templates_path)),
trim_blocks=True,
lstrip_blocks=True,
keep_trailing_newline=True,
)
tox_environments = [
line.strip()
# 'tox' need not be installed globally, but must be importable
# by the Python that is running this script.
# This uses sys.executable the same way that the call in
# cookiecutter-pylibrary/hooks/post_gen_project.py
# invokes this bootstrap.py itself.
for line in subprocess.check_output([sys.executable, '-m', 'tox', '--listenvs'], universal_newlines=True).splitlines()
]
tox_environments = [line for line in tox_environments if line.startswith('py')]
for template in templates_path.rglob('*'):
if template.is_file():
template_path = template.relative_to(templates_path).as_posix()
destination = base_path / template_path
destination.parent.mkdir(parents=True, exist_ok=True)
destination.write_text(jinja.get_template(template_path).render(tox_environments=tox_environments))
print(f'Wrote {template_path}')
print('DONE.')
if __name__ == '__main__':
args = sys.argv[1:]
if args == ['--no-env']:
main()
elif not args:
exec_in_env()
else:
print(f'Unexpected arguments: {args}', file=sys.stderr)
sys.exit(1)
ionelmc-pytest-benchmark-5edf251/ci/requirements.txt 0000664 0000000 0000000 00000000074 14710416711 0022715 0 ustar 00root root 0000000 0000000 virtualenv>=16.6.0
pip>=19.1.1
setuptools>=18.0.1
tox
twine
ionelmc-pytest-benchmark-5edf251/ci/templates/ 0000775 0000000 0000000 00000000000 14710416711 0021426 5 ustar 00root root 0000000 0000000 ionelmc-pytest-benchmark-5edf251/ci/templates/.github/ 0000775 0000000 0000000 00000000000 14710416711 0022766 5 ustar 00root root 0000000 0000000 ionelmc-pytest-benchmark-5edf251/ci/templates/.github/workflows/ 0000775 0000000 0000000 00000000000 14710416711 0025023 5 ustar 00root root 0000000 0000000 ionelmc-pytest-benchmark-5edf251/ci/templates/.github/workflows/github-actions.yml 0000664 0000000 0000000 00000004363 14710416711 0030474 0 ustar 00root root 0000000 0000000 name: build
on: [push, pull_request, workflow_dispatch]
jobs:
test:
name: {{ '${{ matrix.name }}' }}
runs-on: {{ '${{ matrix.os }}' }}
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
include:
- name: 'check'
python: '3.11'
toxpython: 'python3.11'
tox_env: 'check'
os: 'ubuntu-latest'
- name: 'docs'
python: '3.11'
toxpython: 'python3.11'
tox_env: 'docs'
os: 'ubuntu-latest'
{% for env in tox_environments %}
{% set prefix = env.split('-')[0] -%}
{% if prefix.startswith('pypy') %}
{% set python %}pypy-{{ prefix[4] }}.{{ prefix[5:] }}{% endset %}
{% set cpython %}pp{{ prefix[4:5] }}{% endset %}
{% set toxpython %}pypy{{ prefix[4] }}.{{ prefix[5:] }}{% endset %}
{% else %}
{% set python %}{{ prefix[2] }}.{{ prefix[3:] }}{% endset %}
{% set cpython %}cp{{ prefix[2:] }}{% endset %}
{% set toxpython %}python{{ prefix[2] }}.{{ prefix[3:] }}{% endset %}
{% endif %}
{% for os, python_arch in [
['ubuntu', 'x64'],
['windows', 'x64'],
['macos', 'arm64'],
] %}
- name: '{{ env }} ({{ os }})'
python: '{{ python }}'
toxpython: '{{ toxpython }}'
python_arch: '{{ python_arch }}'
tox_env: '{{ env }}'
os: '{{ os }}-latest'
{% endfor %}
{% endfor %}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/setup-python@v5
with:
python-version: {{ '${{ matrix.python }}' }}
architecture: {{ '${{ matrix.python_arch }}' }}
- name: install dependencies
run: |
python -mpip install --progress-bar=off -r ci/requirements.txt
virtualenv --version
pip --version
tox --version
pip list --format=freeze
- name: test
env:
TOXPYTHON: '{{ '${{ matrix.toxpython }}' }}'
run: >
tox -e {{ '${{ matrix.tox_env }}' }} -v
finish:
needs: test
if: {{ '${{ always() }}' }}
runs-on: ubuntu-latest
steps:
- uses: coverallsapp/github-action@v2
with:
parallel-finished: true
- uses: codecov/codecov-action@v3
with:
CODECOV_TOKEN: {% raw %}${{ secrets.CODECOV_TOKEN }}{% endraw %}
ionelmc-pytest-benchmark-5edf251/docs/ 0000775 0000000 0000000 00000000000 14710416711 0017765 5 ustar 00root root 0000000 0000000 ionelmc-pytest-benchmark-5edf251/docs/authors.rst 0000664 0000000 0000000 00000000034 14710416711 0022201 0 ustar 00root root 0000000 0000000 .. include:: ../AUTHORS.rst
ionelmc-pytest-benchmark-5edf251/docs/calibration.rst 0000664 0000000 0000000 00000002154 14710416711 0023010 0 ustar 00root root 0000000 0000000 Calibration
===========
``pytest-benchmark`` will run your function multiple times between measurements. A `round` is that set of runs done between
measurements. This is quite similar to the builtin ``timeit`` module but it's more robust.
The problem with measuring single runs appears when you have very fast code. To illustrate:
.. image:: https://github.com/ionelmc/pytest-benchmark/raw/master/docs/measurement-issues.png
:alt: Diagram illustrating issues with measuring very fast code
In other words, a `round` is a set of runs that are averaged together, those resulting numbers are then used to compute the
result tables. The default settings will try to keep the round small enough (so that you get to see variance), but not too
small, because then you have the timer calibration issues illustrated above (your test function is faster than or as fast
as the resolution of the timer).
By default ``pytest-benchmark`` will try to run your function as many times needed to fit a `10 x TIMER_RESOLUTION`
period. You can fine tune this with the ``--benchmark-min-time`` and ``--benchmark-calibration-precision`` options.
ionelmc-pytest-benchmark-5edf251/docs/changelog.rst 0000664 0000000 0000000 00000000036 14710416711 0022445 0 ustar 00root root 0000000 0000000 .. include:: ../CHANGELOG.rst
ionelmc-pytest-benchmark-5edf251/docs/comparing.rst 0000664 0000000 0000000 00000004374 14710416711 0022506 0 ustar 00root root 0000000 0000000 Comparing past runs
===================
Before comparing different runs it's ideal to make your tests as consistent as possible, see :doc:`faq` for more details.
`pytest-benchmark` has support for storing stats and data for the previous runs.
To store a run just add ``--benchmark-autosave`` or ``--benchmark-save=some-name`` to your pytest arguments. All the files are
saved in a path like ``.benchmarks/Linux-CPython-3.4-64bit``.
* ``--benchmark-autosave`` saves a file like ``0001_c9cca5de6a4c7eb2_20150815_215724.json`` where:
* ``0001`` is an automatically incremented id, much like how django migrations have a number.
* ``c9cca5de6a4c7eb2`` is the commit id (if you use Git or Mercurial)
* ``20150815_215724`` is the current time
You should add ``--benchmark-autosave`` to ``addopts`` in you pytest configuration so you dont have to specify it all
the time.
* ``--benchmark-save=foobar`` works similarly, but saves a file like ``0001_foobar.json``. It's there in case you want to
give specific name to the run.
After you have saved your first run you can compare against it with ``--benchmark-compare=0001``. You will get an additional
row for each test in the result table, showing the differences.
You can also make the suite fail with ``--benchmark-compare-fail=:%`` or ``--benchmark-compare-fail=:``.
Examples:
* ``--benchmark-compare-fail=min:5%`` will make the suite fail if ``Min`` is 5% slower for any test.
* ``--benchmark-compare-fail=mean:0.001`` will make the suite fail if ``Mean`` is 0.001 seconds slower for any test.
Comparing outside of pytest
---------------------------
There is a convenience CLI for listing/comparing past runs: ``pytest-benchmark`` (:ref:`comparison-cli`).
Example::
pytest-benchmark compare 0001 0002
Plotting
--------
.. note::
To use plotting you need to ``pip install pygal pygaljs`` or ``pip install pytest-benchmark[histogram]``.
You can also get a nice plot with ``--benchmark-histogram``. The result is a modified Tukey box and whisker plot where the
outliers (the small bullets) are ``Min`` and ``Max``. Note that if you do not supply a name for the plot it is recommended
that ``--benchmark-histogram`` is the last option passed.
Example output:
.. image:: screenshot-histogram.png
ionelmc-pytest-benchmark-5edf251/docs/conf.py 0000664 0000000 0000000 00000001732 14710416711 0021267 0 ustar 00root root 0000000 0000000 extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
source_suffix = '.rst'
master_doc = 'index'
project = 'pytest-benchmark'
year = '2014-2023'
author = 'Ionel Cristian Mărieș'
copyright = f'{year}, {author}'
version = release = '5.1.0'
pygments_style = 'trac'
templates_path = ['.']
extlinks = {
'issue': ('https://github.com/ionelmc/pytest-benchmark/issues/%s', '#%s'),
'pr': ('https://github.com/ionelmc/pytest-benchmark/pull/%s', 'PR #%s'),
}
html_theme = 'furo'
html_theme_options = {
'githuburl': 'https://github.com/ionelmc/pytest-benchmark/',
}
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = False
html_short_title = f'{project}-{version}'
napoleon_use_ivar = True
napoleon_use_rtype = False
napoleon_use_param = False
ionelmc-pytest-benchmark-5edf251/docs/contributing.rst 0000664 0000000 0000000 00000000041 14710416711 0023221 0 ustar 00root root 0000000 0000000 .. include:: ../CONTRIBUTING.rst
ionelmc-pytest-benchmark-5edf251/docs/faq.rst 0000664 0000000 0000000 00000005217 14710416711 0021273 0 ustar 00root root 0000000 0000000 Frequently Asked Questions
==========================
Why is my ``StdDev`` so high?
There can be few causes for this:
* Bad isolation. You run other services in your machine that eat up your cpu or you run in a VM and that makes
machine performance inconsistent. Ideally you'd avoid such setups, stop all services and applications and use bare
metal machines.
* Bad tests or too much complexity. The function you're testing is doing I/O, using external resources, has
side-effects or doing other non-deterministic things. Ideally you'd avoid testing huge chunks of code.
One special situation is PyPy: it's GC and JIT can add unpredictable overhead - you'll see it as huge spikes all
over the place. You should make sure that you have a good amount of warmup (using ``--benchmark-warmup`` and
``--benchmark-warmup-iterations``) to prime the JIT as much as possible. Unfortunately not much can be done about
GC overhead.
If you cannot make your tests more predictable and remove overhead you should look at different stats like: IQR and
Median. IQR is often better than StdDev.
Why is my ``Min`` way lower than ``Q1-1.5IQR``?
You may see this issue in the histogram plot. This is another instance of *bad isolation*.
For example, Intel CPUs have a feature called `Turbo Boost `_ wich
overclocks your CPU depending on how many cores you have at that time and how hot your CPU is. If your CPU is too hot you get
no Turbo Boost. If you get Turbo Boost active then the CPU quickly gets hot. You can see how this won't work for sustained
workloads.
When Turbo Boost kicks in you may see "speed spikes" - and you'd get this strange outlier ``Min``.
When you have other programs running on your machine you may also see the "speed spikes" - the other programs idle for a
brief moment and that allows your function to run way faster in that brief moment.
I can't avoid using VMs or running other programs. What can I do?
As a last ditch effort pytest-benchmark allows you to plugin in custom timers (``--benchmark-timer``). You could use
something like ``time.process_time`` (Python 3.3+ only) as the timer. Process time `doesn't include sleeping or waiting
for I/O `_.
The histogram doesn't show ``Max`` time. What gives?!
The height of the plot is limited to ``Q3+1.5IQR`` because ``Max`` has the nasty tendency to be way higher and making
everything else small and undiscerning. For this reason ``Max`` is *plotted outside*.
Most people don't care about ``Max`` at all so this is fine.
ionelmc-pytest-benchmark-5edf251/docs/glossary.rst 0000664 0000000 0000000 00000001022 14710416711 0022355 0 ustar 00root root 0000000 0000000 Glossary
========
Iteration
A single run of your benchmarked function.
Round
A set of iterations. The size of a `round` is computed in the calibration phase.
Stats are computed with rounds, not with iterations. The duration for a round is an average of all the iterations in that round.
See: :doc:`calibration` for an explanation of why it's like this.
Mean
TODO
Median
TODO
IQR
InterQuertile Range. This is a different way to measure variance.
StdDev
TODO: Standard Deviation
Outliers
TODO
ionelmc-pytest-benchmark-5edf251/docs/hooks.rst 0000664 0000000 0000000 00000000236 14710416711 0021643 0 ustar 00root root 0000000 0000000 Hooks
=====
Hooks for customizing various parts of ``pytest-benchmark``.
-----
.. automodule:: pytest_benchmark.hookspec
:members:
:undoc-members:
ionelmc-pytest-benchmark-5edf251/docs/index.rst 0000664 0000000 0000000 00000004213 14710416711 0021626 0 ustar 00root root 0000000 0000000 Welcome to pytest-benchmark's documentation!
============================================
This plugin provides a `benchmark` fixture. This fixture is a callable object that will benchmark any function passed
to it.
Notable features and goals:
* Sensible defaults and automatic calibration for microbenchmarks
* Good integration with pytest
* Comparison and regression tracking
* Exhausive statistics
* JSON export
Examples:
.. code-block:: python
def something(duration=0.000001):
"""
Function that needs some serious benchmarking.
"""
time.sleep(duration)
# You may return anything you want, like the result of a computation
return 123
def test_my_stuff(benchmark):
# benchmark something
result = benchmark(something)
# Extra code, to verify that the run completed correctly.
# Sometimes you may want to check the result, fast functions
# are no good if they return incorrect results :-)
assert result == 123
def test_my_stuff_different_arg(benchmark):
# benchmark something, but add some arguments
result = benchmark(something, 0.001)
assert result == 123
Screenshots
-----------
Normal run:
.. image:: https://github.com/ionelmc/pytest-benchmark/raw/master/docs/screenshot.png
:alt: Screenshot of py.test summary
Compare mode (``--benchmark-compare``):
.. image:: https://github.com/ionelmc/pytest-benchmark/raw/master/docs/screenshot-compare.png
:alt: Screenshot of py.test summary in compare mode
Histogram (``--benchmark-histogram``):
.. image:: https://cdn.rawgit.com/ionelmc/pytest-benchmark/94860cc8f47aed7ba4f9c7e1380c2195342613f6/docs/sample-tests_test_normal.py_test_xfast_parametrized%5B0%5D.svg
:alt: Histogram sample
..
Also, it has `nice tooltips `_.
User guide
==========
.. toctree::
:maxdepth: 2
installation
usage
calibration
pedantic
comparing
hooks
faq
glossary
contributing
authors
changelog
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
ionelmc-pytest-benchmark-5edf251/docs/installation.rst 0000664 0000000 0000000 00000000140 14710416711 0023213 0 ustar 00root root 0000000 0000000 ============
Installation
============
At the command line::
pip install pytest-benchmark
ionelmc-pytest-benchmark-5edf251/docs/measurement-issues.png 0000664 0000000 0000000 00000140411 14710416711 0024332 0 ustar 00root root 0000000 0000000 PNG
IHDR 3
pHYs M Mħ IDATxweuʶ$ tH&
R
">ObGE)*"b.B-dl;q_=lMXM~k3g93sfs=#""""*h(((((((((((((((((((((((((((((((((((U$ffٿ9&P"P
ft+""#~m2rXEWVb ts~˪,)ZLM >
lvѲbՀnw%>̊@]^Q%,0ǁoG;
ԁT`g`
"p"$jûa7
ܤC2̖&-iEf2{Ew%׳ L%ݔ3uϨM4
iKv{̚I'
HEbTyk~B&wB,
îꤶu]idx+2j}@+""#*KVn;\MVN.wͬ˰*M%M BDR&" ,vl4r.v4ww5H
nnܽ0ʘNZ4*qEFNsh01C%_.˰/Xļkh>,_S6ϗfwmf;cjw.""
8RB^`O_;ʬMep tBC8ɪRhx-|mϚ
SBX7Bn:4~7`sgcrMbn|u~30ؗtb>O~|\k>ưmfg|tq:0 {>9Q_kX>jaoQ`}3+{U}d9˹ PjًREey40?NoaXX1n3ksyџ0ؓ#7#}b.]L\-7}7mځ-)q+UkLj9-ڟvGI1]~CnSP7|:Xw0mSͅ ulzہ_.g` v`/1+1mGR{PgG2M(JBN4L v!U6>uNa'ٵ^AjfWGxljfG6s"Bl`E 4`K3P/@_;>Nnf=Rƅk6f&jZz`l-y+0{wU\"kJ RӖoK||0?OF؏Tݾafٺ_M=ӿ5Zq`wof^-]DDDFXsm7RK" |.'5;Xm,RW)scom.TeG:|^A#1/0>?a"]g߿E*?Ǟ1Gu%x[q:`*kc3#4νI1Ⱬ"gOO1p|̿6Rwo҉{Ĵ/f~A`xosO_?MG\efF|gⳍ䲘c~fwuM7t[oEb
%ݲ*h,E%tlT<# e˹Ce`:Ta#M&3] |'ƕ _vyyAA0S+st|FXG:\dQ\+U|,i#⻟efcsL/fT zO<>/B<<{Njx6
070=ƵC4t_V3>w\Wb~ryB|rQQϚe1cݽn-^U-vyqx2F@,,m=?i~m*y_Ej<_NdR[6uf6.rK"y i~?Alc3~T*5<`}R3chWsIj/&5a"k7;+olܽ7<\waZ:p>05Q8ہIDO"znMQY8 f8tfwhΆ[M룒Ejf98,ƴUGmxInigsuLQ;҉n [Gw-R;jY`_ؑTy>%;0v\˰(CpJv&BWV ?UlJXR ;~qrϪlf_#(SwR\R;Y.^Dj
xrR{a UJa:[73lR{IϽ_V} kz&Nm)"Ԥdz{ ~Cj|7
ʤm~磔?㱗H=?Oj05{ ^ˠa3ùi_&U!"O#]D>tcYDfRU!`F>E:\H\F pU Rm_si|IM27!"uKͭ&FR3cN`{t?I'͏i1
xzLF!"Rǧҿ*+""2PE3q
qaH˯W+[RPIv8?ځfW˪}+*d֗{fs@jZ$40?nYu^?kR/6{~O;1.?/O`GX+]Y5t5 t+uY5+-j1P
+?L
{s#|Ub{Qa|a\vrXsssAʞ{m>?S|,ZC0-j0.Bda^t0VkӍ|Ow|rii1i
R|X+Y}U^78^r>6o"^Jࢆi%-75|^\H/.'[^5NLӁEiϦMa=owmi4;\Y^6h)_*>`_.1[ijޏ6٫v0HPȂ5gޑv=84]-M'ʤFjCK$kH
`&Ro֭ӕ|AUkȒ>d}8 HmwEDDWz!5e
2W*w.)Iϟ%]7+wQņkE;4}Yaw"[F7qS^YR]lc ҉@"}yH]EDDWzBM$]
hz_x_-j+""
Wgp\>K@Ջ(ʫ mW&""+""""#z^^^^^^Q$UYI@DDDVfV k"pЕDDDdY䜺Ianfǘٹf6Of٢
a"&S>GSTT:
# xpuU{xEDDDVvUJL`p]if{
YWF,ˮdj/"""+^16cH@;}`oRw73;W5xe[PW'"""F%#@ԖpR@j;2NZ|m=~P
J$an]hc
te-~|~AlUUc/Zʍ&H'AZ
b;~%*J끭k{IU%g)g%yey[qwi|bCC]{Vd8VuHZOӀq+*zHyj*[Lr+4e0
@+«`6uU-k
|`~wVe(Dmd
Uj{5Iݙ}J^ܢ)C
o3=č2#qJW[uϪx5Υӿ `ƙU{,Ұ;W9?P|[u9N:
ofutө;xEDDV~YEjCj ྆5xG
u#""jߖ|;H^Wi*m
""""+{5W{cfGګ+"""ߖjf'3@^^U!B_,ޫ
""""RͪWE*b7߶,ޣ
""""Rͪ;ԶJUYUB/h`:#Rm,bMdCY^q[>ay/>ٗGP.
̰4<>{/no#UwѥJ;Y+/tX6-5Rw>m@Sl,
Áh ,t뱼˫Ƀq=WuBSn'xQŘ'MSn'܁x}>G!/!B_Im{4ϸ
/{M~tmfWfMr~x䆱=\=nUhՎA끇/r [lZ-]@t8)X j,EŘ^tOlԀ
I[xށ5bruB
x49;['
010!-!k<gf 'Yݵ=V@{ǛGWmg?3kϪ,嚕"D(P)D}7o۬;cs}