pax_global_header 0000666 0000000 0000000 00000000064 14562240250 0014512 g ustar 00root root 0000000 0000000 52 comment=9b7a26f8d24d91a0cf040b367d1a37f680f01bbd
asv_runner-0.2.1/ 0000775 0000000 0000000 00000000000 14562240250 0013674 5 ustar 00root root 0000000 0000000 asv_runner-0.2.1/.github/ 0000775 0000000 0000000 00000000000 14562240250 0015234 5 ustar 00root root 0000000 0000000 asv_runner-0.2.1/.github/workflows/ 0000775 0000000 0000000 00000000000 14562240250 0017271 5 ustar 00root root 0000000 0000000 asv_runner-0.2.1/.github/workflows/build_wheels.yml 0000664 0000000 0000000 00000003254 14562240250 0022466 0 ustar 00root root 0000000 0000000 # Build on every branch push, tag push, and pull request change:
# From: https://github.com/pypa/cibuildwheel/blob/main/examples/github-deploy.yml
name: Build wheels
on: [push, pull_request]
jobs:
build_wheels:
name: Build wheel for ${{ matrix.python }}
runs-on: ubuntu-latest
strategy:
fail-fast: false
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.8'
- run: pip wheel -w ./wheelhouse/ .
- uses: actions/upload-artifact@v3
with:
path: ./wheelhouse/*.whl
build_sdist:
name: Build source distribution
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Build sdist
shell: bash -l {0}
run: pipx run build --sdist
- uses: actions/upload-artifact@v3
with:
path: dist/*.tar.gz
upload_pypi:
needs: [build_wheels, build_sdist]
runs-on: ubuntu-latest
environment:
name: pypi
url: https://pypi.org/p/asv_runner
permissions:
id-token: write # for trusted publishing
# upload to PyPI on every tag starting with 'v'
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v')
# alternatively, to publish when a GitHub Release is created, use the following rule:
# if: github.event_name == 'release' && github.event.action == 'published'
steps:
- uses: actions/download-artifact@v3
with:
# unpacks default artifact into dist/
# if `name: artifact` is omitted, the action will create extra parent dir
name: artifact
path: dist
- uses: pypa/gh-action-pypi-publish@release/v1
asv_runner-0.2.1/.github/workflows/pre_commit.yml 0000664 0000000 0000000 00000000421 14562240250 0022147 0 ustar 00root root 0000000 0000000 name: pre-commit
on:
pull_request:
push:
branches: [main]
jobs:
pre-commit:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v3
with:
python-version: '3.9'
- uses: pre-commit/action@v2.0.3
asv_runner-0.2.1/.github/workflows/slashdot_trigger.yml 0000664 0000000 0000000 00000000630 14562240250 0023357 0 ustar 00root root 0000000 0000000 name: Slash Command Dispatch
on:
issue_comment:
types: [created]
jobs:
slashCommandDispatch:
runs-on: ubuntu-latest
steps:
- name: Slash Command Dispatch
uses: peter-evans/slash-command-dispatch@v3
with:
token: ${{ secrets.ASV_TOK }}
commands: |
trigger-asv
static-args: |
pr_number=${{ github.event.issue.number }}
asv_runner-0.2.1/.github/workflows/trigger_asv.yml 0000664 0000000 0000000 00000001541 14562240250 0022331 0 ustar 00root root 0000000 0000000 name: Trigger asv
on:
repository_dispatch:
types: [trigger-asv-command]
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
triggerasv:
runs-on: ${{ matrix.config.os }}
name: ${{ matrix.config.os }}
strategy:
fail-fast: false
matrix:
config:
- {os: ubuntu-latest}
steps:
- uses: actions/checkout@v3
with:
submodules: "recursive"
fetch-depth: 0
- uses: convictional/trigger-workflow-and-wait@v1.6.5
with:
owner: airspeed-velocity
repo: asv
github_token: ${{ secrets.ASV_TOK }}
workflow_file_name: triggered.yml
ref: master
wait_workflow: true
client_payload: '{"pr_number": "${{ github.event.client_payload.slash_command.args.named.pr_number }}"}'
asv_runner-0.2.1/.gitignore 0000664 0000000 0000000 00000006217 14562240250 0015672 0 ustar 00root root 0000000 0000000 # Additionally
docs/source/apidocs/*
docs/build/*
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
.pdm-python
/docs/source/CHANGELOG.md
/docs/html/
/asv_runner/_version.py
/.pdm-build/
asv_runner-0.2.1/.pre-commit-config.yaml 0000664 0000000 0000000 00000001203 14562240250 0020151 0 ustar 00root root 0000000 0000000 repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: trailing-whitespace
exclude: ^(test/example_results)
- id: end-of-file-fixer
exclude: ^(test/example_results/cheetah)
- id: check-yaml
- id: check-added-large-files
- repo: https://github.com/pycqa/isort
rev: 5.12.0
hooks:
- id: isort
name: isort (python)
- repo: https://github.com/psf/black
rev: 23.3.0
hooks:
- id: black
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.0.272
hooks:
- id: ruff
args: ["--fix", "--show-source"]
asv_runner-0.2.1/.readthedocs.yaml 0000664 0000000 0000000 00000000416 14562240250 0017124 0 ustar 00root root 0000000 0000000 version: 2
build:
os: ubuntu-22.04
tools:
python: "3.10"
sphinx:
configuration: docs/source/conf.py
formats:
- pdf
- epub
python:
install:
- requirements: docs/requirements.txt
- method: pip
path: .
extra_requirements:
- docs
asv_runner-0.2.1/CHANGELOG.md 0000664 0000000 0000000 00000013510 14562240250 0015505 0 ustar 00root root 0000000 0000000 # Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
This project uses [*towncrier*](https://towncrier.readthedocs.io/) and the changes for the upcoming release can be found in .
## [0.2.1](https://github.com/airspeed-velocity/asv_runner/tree/0.2.1) - 11-02-2024
No significant changes.
## [0.2.0](https://github.com/airspeed-velocity/asv_runner/tree/0.2.0) - 11-02-2024
### Other Changes and Additions
- `asv_runner` now uses `towncrier` to manage the changelog, also adds the
changeglog to the generated documentation.
([#38](https://github.com/airspeed-velocity/asv_runner/issues/38))
- The lowest supported version of `python` for building the `asv_runner`
documentation is now `3.8`, since `3.7` has been EOL for [many months
now](https://endoflife.date/python).
([#39](https://github.com/airspeed-velocity/asv_runner/issues/39))
## [0.1.0](https://github.com/airspeed-velocity/asv_runner/tree/0.1.0) - 11-09-2023
### Bug Fixes
- Default `max_time` is set to `60.0` seconds to fix `--quick`.
([#29](https://github.com/airspeed-velocity/asv_runner/issues/29))
- `asv` will not try to access a missing `colorama` attribute.
([#32](https://github.com/airspeed-velocity/asv_runner/issues/32))
### Other Changes and Additions
- `pip-tools` and `pip-compile` are used to pin transitive dependencies for
read the docs.
([#31](https://github.com/airspeed-velocity/asv_runner/issues/31))
## [0.0.9](https://github.com/airspeed-velocity/asv_runner/tree/0.0.9) - 20-08-2023
### New Features
- Adds a `skip_benchmark` decorator.
```python
from asv_runner.benchmarks.helpers import skip_benchmark
@skip_benchmark
class TimeSuite:
"""
An example benchmark that times the performance of various kinds
of iterating over dictionaries in Python.
"""
def setup(self):
self.d = {}
for x in range(500):
self.d[x] = None
def time_keys(self):
for key in self.d.keys():
pass
def time_values(self):
for value in self.d.values():
pass
def time_range(self):
d = self.d
for key in range(500):
d[key]
```
Usage requires `asv 0.6.0`.
([#13](https://github.com/airspeed-velocity/asv_runner/issues/13))
- Finely grained `skip_benchmark_if` and `skip_params_if` have been added.
```python
from asv_runner.benchmarks.mark import skip_benchmark_if, skip_params_if
import datetime
class TimeSuite:
"""
An example benchmark that times the performance of various kinds
of iterating over dictionaries in Python.
"""
params = [100, 200, 300, 400, 500]
param_names = ["size"]
def setup(self, size):
self.d = {}
for x in range(size):
self.d[x] = None
@skip_benchmark_if(datetime.datetime.now().hour >= 12)
def time_keys(self, size):
for key in self.d.keys():
pass
@skip_benchmark_if(datetime.datetime.now().hour >= 12)
def time_values(self, size):
for value in self.d.values():
pass
@skip_benchmark_if(datetime.datetime.now().hour >= 12)
def time_range(self, size):
d = self.d
for key in range(size):
d[key]
# Skip benchmarking when size is either 100 or 200 and the current hour is
12 or later.
@skip_params_if([(100,), (200,)],
datetime.datetime.now().hour >= 12)
def time_dict_update(self, size):
d = self.d
for i in range(size):
d[i] = i
```
Usage requires `asv 0.6.0`.
([#17](https://github.com/airspeed-velocity/asv_runner/issues/17))
- Benchmarks can now be parameterized using decorators.
```python
import numpy as np
from asv_runner.benchmarks.mark import parameterize
@parameterize({"n":[10, 100]})
def time_sort(n):
np.sort(np.random.rand(n))
@parameterize({'n': [10, 100], 'func_name': ['range', 'arange']})
def time_ranges_multi(n, func_name):
f = {'range': range, 'arange': np.arange}[func_name]
for i in f(n):
pass
@parameterize({"size": [10, 100, 200]})
class TimeSuiteDecoratorSingle:
def setup(self, size):
self.d = {}
for x in range(size):
self.d[x] = None
def time_keys(self, size):
for key in self.d.keys():
pass
def time_values(self, size):
for value in self.d.values():
pass
@parameterize({'n': [10, 100], 'func_name': ['range', 'arange']})
class TimeSuiteMultiDecorator:
def time_ranges(self, n, func_name):
f = {'range': range, 'arange': np.arange}[func_name]
for i in f(n):
pass
```
Usage requires `asv 0.6.0`.
([#18](https://github.com/airspeed-velocity/asv_runner/issues/18))
- Benchmarks can now be skipped during execution.
```python
from asv_runner.benchmarks.mark import skip_for_params, parameterize,
SkipNotImplemented
# Fast because no setup is called
class SimpleFast:
params = ([False, True])
param_names = ["ok"]
@skip_for_params([(False, )])
def time_failure(self, ok):
if ok:
x = 34.2**4.2
@parameterize({"ok": [False, True]})
class SimpleSlow:
def time_failure(self, ok):
if ok:
x = 34.2**4.2
else:
raise SkipNotImplemented(f"{ok} is skipped")
```
Usage requires `asv 0.6.0`.
([#20](https://github.com/airspeed-velocity/asv_runner/issues/20))
### Bug Fixes
- It is possible to set a default timeout from `asv`.
([#19](https://github.com/airspeed-velocity/asv_runner/issues/19))
### Other Changes and Additions
- Documentation, both long-form and API level has been added.
([#6](https://github.com/airspeed-velocity/asv_runner/issues/6))
asv_runner-0.2.1/CODEOWNERS 0000664 0000000 0000000 00000001043 14562240250 0015265 0 ustar 00root root 0000000 0000000 # This is a comment.
# Each line is a file pattern followed by one or more owners.
# These owners will be the default owners for everything in
# the repo. Unless a later match takes precedence,
# @global-owner1 and @global-owner2 will be requested for
# review when someone opens a pull request.
* @HaoZeke
# Order is important; the last matching pattern takes the most
# precedence. When someone opens a pull request that only
# modifies JS files, for example, only @js-owner and not the global
# owner(s) will be requested for a review.
asv_runner-0.2.1/CODE_OF_CONDUCT.md 0000664 0000000 0000000 00000012560 14562240250 0016477 0 ustar 00root root 0000000 0000000 # Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, caste, color, religion, or sexual
identity and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the overall
community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or advances of
any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email address,
without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
[INSERT CONTACT METHOD].
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series of
actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or permanent
ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within the
community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.1, available at
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
Community Impact Guidelines were inspired by
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
For answers to common questions about this code of conduct, see the FAQ at
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
[https://www.contributor-covenant.org/translations][translations].
[homepage]: https://www.contributor-covenant.org
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
[Mozilla CoC]: https://github.com/mozilla/diversity
[FAQ]: https://www.contributor-covenant.org/faq
[translations]: https://www.contributor-covenant.org/translations
asv_runner-0.2.1/LICENSE.md 0000664 0000000 0000000 00000003064 14562240250 0015303 0 ustar 00root root 0000000 0000000 Copyright (c) 2018-2023, asv Developers.
Copyright (c) 2011-2018, Michael Droettboom, Space Telescope Science Institute, Pauli Virtanen
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the name of the Astropy Team nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
asv_runner-0.2.1/README.md 0000664 0000000 0000000 00000002057 14562240250 0015157 0 ustar 00root root 0000000 0000000 # About [](https://asv.readthedocs.io/projects/asv-runner/en/latest/)
Core Python benchmark code for `asv`.
**This package shall not have any dependencies on external packages and must be
compatible with all Python versions greater than or equal to `3.7`.**
For other functionality, refer to the `asv` package or consider writing an extension.
# Contributions
All contributions are welcome, this includes code and documentation
contributions but also questions or other clarifications. Note that we expect
all contributors to follow our [Code of
Conduct](https://github.com/airspeed-velocity/asv_runner/blob/main/CODE_OF_CONDUCT.md).
## Developing locally
A `pre-commit` job is setup on CI to enforce consistent styles, so it is best to
set it up locally as well (using [pipx](https://pypa.github.io/pipx/) for isolation):
```sh
# Run before commiting
pipx run pre-commit run --all-files
# Or install the git hook to enforce this
pipx run pre-commit install
```
asv_runner-0.2.1/asv_runner/ 0000775 0000000 0000000 00000000000 14562240250 0016056 5 ustar 00root root 0000000 0000000 asv_runner-0.2.1/asv_runner/__init__.py 0000664 0000000 0000000 00000000000 14562240250 0020155 0 ustar 00root root 0000000 0000000 asv_runner-0.2.1/asv_runner/_aux.py 0000664 0000000 0000000 00000015044 14562240250 0017370 0 ustar 00root root 0000000 0000000 import contextlib
import importlib
import os
import sys
import tempfile
from .benchmarks._maxrss import set_cpu_affinity
class SpecificImporter:
"""
Module importer that only allows loading a given module from the
given path.
#### Notes
Using this enables importing the asv benchmark suite without
adding its parent directory to sys.path. The parent directory can
in principle contain anything, including some version of the
project module (common situation if asv.conf.json is on project
repository top level).
"""
def __init__(self, name, root):
"""
Initialize a new instance of `SpecificImporter`.
#### Parameters
**name** (`str`)
: The name of the module to load.
**root** (`str`)
: The path to the directory containing the module.
"""
self._name = name
self._root = root
def find_spec(self, fullname, path, target):
"""
Find the module specification for the given module.
#### Parameters
**fullname** (`str`)
: The fully qualified name of the module.
**path** (list or None)
: The path for module search, or None if unavailable.
**target** (object)
: The target object to import.
#### Returns
**spec** (`ModuleSpec` or None)
: The module specification if the module is found, or None otherwise.
#### Notes
This method is called by the import system to find the module
specification for the requested module. If the requested module matches
the name of the SpecificImporter instance, it returns the module
specification using the `importlib.machinery.PathFinder`.
"""
if fullname == self._name:
if path is not None:
raise ValueError()
finder = importlib.machinery.PathFinder()
return finder.find_spec(fullname, [self._root], target)
return None
def update_sys_path(root):
"""
Update sys.meta_path to include the SpecificImporter.
##### Parameters
`root` (`str`): The path to the root directory.
##### Notes
This function inserts the SpecificImporter into the `sys.meta_path` at the
beginning, allowing the module to be imported using the SpecificImporter
when it is encountered during the import process.
"""
sys.meta_path.insert(
0, SpecificImporter(os.path.basename(root), os.path.dirname(root))
)
@contextlib.contextmanager
def posix_redirect_output(filename=None, permanent=True):
"""
Redirect stdout/stderr to a file, using posix `dup2`.
#### Parameters
**filename** (`str` or None, optional)
: The name of the file to redirect the output to. If None, a temporary
file will be created.
**permanent** (`bool`, optional)
: Indicates whether the redirection is permanent or temporary. If False,
the original stdout/stderr will be restored after the context is exited.
#### Yields
**filename** (`str`)
: The name of the file where the output is redirected.
#### Notes
The function redirects the `stdout` and `stderr` streams to a file using
the posix `dup2` function. It is typically used within a `with` statement to
encapsulate the code block where the redirection is desired.
If `filename` is not provided, a temporary file will be created and used for
redirection.
If `permanent` is `True`, the redirection will persist after the context is
exited. If `False`, the original `stdout`/`stderr` will be restored.
"""
sys.stdout.flush()
sys.stderr.flush()
stdout_fd = sys.stdout.fileno()
stderr_fd = sys.stderr.fileno()
if not permanent:
stdout_fd_copy = os.dup(stdout_fd)
stderr_fd_copy = os.dup(stderr_fd)
if filename is None:
out_fd, filename = tempfile.mkstemp()
else:
out_fd = os.open(filename, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
try:
# Redirect stdout and stderr to file
os.dup2(out_fd, stdout_fd)
os.dup2(out_fd, stderr_fd)
yield filename
finally:
sys.stdout.flush()
sys.stderr.flush()
os.close(out_fd)
if not permanent:
os.dup2(stdout_fd_copy, stdout_fd)
os.dup2(stderr_fd_copy, stderr_fd)
os.close(stdout_fd_copy)
os.close(stderr_fd_copy)
def recvall(sock, size):
"""
Receive data of given size from a socket connection.
#### Parameters
**sock** (socket object)
: The socket connection to receive data from.
**size** (`int`)
: The size of the data to receive, in bytes.
#### Returns
**data** (`bytes`)
: The received data.
#### Raises
**RuntimeError**
: If the data received from the socket is less than the specified size.
#### Notes
The function receives data from a socket connection in multiple chunks until
the specified size is reached. It ensures that all the required data is received
before returning.
If the received data size is less than the specified size, a `RuntimeError`
is raised indicating the failure to receive the complete data.
"""
data = b""
while len(data) < size:
s = sock.recv(size - len(data))
data += s
if not s:
raise RuntimeError(
"did not receive data from socket " f"(size {size}, got only {data!r})"
)
return data
def set_cpu_affinity_from_params(extra_params):
"""
Set CPU affinity based on the provided parameters.
#### Parameters
**extra_params** (`dict` or `None`)
: Additional parameters containing CPU affinity information.
#### Notes
This function attempts to set the CPU affinity for the current process
based on the provided parameters. It uses the `set_cpu_affinity` function
internally to perform the actual affinity setting.
If the `extra_params` dictionary contains a key "cpu_affinity" with a
valid affinity list, the CPU affinity will be set accordingly.
#### Raises
**BaseException**
: If setting the CPU affinity fails, an exception is raised and an error
message is printed.
#### Example
```{code-block} python
extra_params = {"cpu_affinity": [0, 1]}
set_cpu_affinity_from_params(extra_params)
```
"""
affinity_list = extra_params.get("cpu_affinity", None)
if affinity_list is not None:
try:
set_cpu_affinity(affinity_list)
except BaseException as exc:
print(f"asv: setting cpu affinity {affinity_list !r} failed: {exc !r}")
asv_runner-0.2.1/asv_runner/benchmarks/ 0000775 0000000 0000000 00000000000 14562240250 0020173 5 ustar 00root root 0000000 0000000 asv_runner-0.2.1/asv_runner/benchmarks/__init__.py 0000664 0000000 0000000 00000005302 14562240250 0022304 0 ustar 00root root 0000000 0000000 """
Automatically discovers and imports benchmark classes from all submodules in
the current package.
#### Variables
**pkgname** (`str`)
: The name of the current package.
**pkgpath** (`_frozen_importlib_external._NamespacePath`)
: The path of the current package.
**module_names** (`List[str]`)
: The names of all submodules in the current package that don't contain an underscore.
**benchmark_types** (`List[Type]`)
: A list to hold all benchmark classes from the submodules.
#### Raises
**NotRequired** (`Exception`)
: If a submodule raises a `NotRequired` exception during import, it is ignored.
#### Notes
This module first identifies all submodules in the current package that don't contain
an underscore in their names. It then iterates over these submodules, imports each one,
and checks if it contains an attribute named "export_as_benchmark".
If such an attribute exists, its contents (which should be a list of benchmark classes)
are added to the `benchmark_types` list. If a submodule raises a `NotRequired` exception
during the import, it is ignored, and the loop continues with the next submodule.
This code is useful in a benchmarking suite where new benchmarks can be added simply by
adding a new submodule with an "export_as_benchmark" attribute.
"""
import importlib
import pkgutil
from pathlib import Path
# py37 doesn't have importlib.metadata
from importlib_metadata import distributions
from ._exceptions import NotRequired
pkgname = __name__
pkgpath = __path__
submodule_names = [
name for _, name, _ in pkgutil.iter_modules(pkgpath) if "_" not in name
]
asv_modules = [
dist.metadata["Name"]
for dist in distributions()
if dist.metadata["Name"].startswith("asv_bench")
]
benchmark_types = []
# Builtin modules
for module_name in submodule_names:
try:
module = importlib.import_module(f"{pkgname}.{module_name}")
if "export_as_benchmark" in dir(module):
benchmark_types.extend(iter(getattr(module, "export_as_benchmark")))
except NotRequired:
# Ignored.
pass
# External asv_bench modules
for module_name in asv_modules:
try:
module = importlib.import_module(module_name)
benchmarks_path = Path(module.__file__).parent / "benchmarks"
benchmark_submodules = [
name for _, name, _ in pkgutil.iter_modules([str(benchmarks_path)])
]
for submodule_name in benchmark_submodules:
submodule = importlib.import_module(
f"{module_name}.benchmarks.{submodule_name}"
)
if "export_as_benchmark" in dir(submodule):
benchmark_types.extend(iter(getattr(submodule, "export_as_benchmark")))
except (ImportError, NotRequired):
pass
asv_runner-0.2.1/asv_runner/benchmarks/_base.py 0000664 0000000 0000000 00000055664 14562240250 0021636 0 ustar 00root root 0000000 0000000 import cProfile as profile
import inspect
import itertools
import math
import os
import re
import textwrap
from collections import Counter
from hashlib import sha256
def _get_attr(source, name, ignore_case=False):
"""
Retrieves an attribute from a source by its name.
#### Parameters
**source** (`object`)
: The source from which to get the attribute.
**name** (`str`)
: The name of the attribute.
**ignore_case** (`bool`, optional)
: Whether to ignore case when comparing attribute names. Defaults to `False`.
#### Returns
**attr** (`object` or `None`)
: The attribute if it is found, else `None`.
#### Raises
**ValueError**
: If more than one attribute with the given name exists and `ignore_case` is `True`.
"""
if not ignore_case:
return getattr(source, name, None)
attrs = [getattr(source, key) for key in dir(source) if key.lower() == name.lower()]
if len(attrs) > 1:
raise ValueError(f"{source.__name__} contains multiple {name} functions.")
elif len(attrs) == 1:
return attrs[0]
else:
return None
def _get_all_attrs(sources, name, ignore_case=False):
"""
Yields attributes from a list of sources by their name.
#### Parameters
**sources** (`List[object]`)
: The list of sources from which to get the attribute.
**name** (`str`)
: The name of the attribute.
**ignore_case** (`bool`, optional)
: Whether to ignore case when comparing attribute names. Defaults to `False`.
#### Yields
**val** (`object`)
: The attribute if it is found in the source.
"""
for source in sources:
val = _get_attr(source, name, ignore_case=ignore_case)
if val is not None:
yield val
def _get_first_attr(sources, name, default, ignore_case=False):
"""
Retrieves the first attribute from a list of sources by its name.
#### Parameters
**sources** (`List[object]`)
: The list of sources from which to get the attribute.
**name** (`str`)
: The name of the attribute.
**default** (`object`)
: The default value to return if no attribute is found.
**ignore_case** (`bool`, optional)
: Whether to ignore case when comparing attribute names. Defaults to `False`.
#### Returns
**attr** (`object`)
: The first attribute found or the default value if no attribute is found.
"""
for val in _get_all_attrs(sources, name, ignore_case=ignore_case):
return val
return default
def get_setup_cache_key(func):
"""
Retrieves the cache key for a function's setup.
#### Parameters
**func** (`function`)
: The function for which to get the cache key.
#### Returns
**cache_key** (`str` or `None`)
: The cache key if the function is not `None`, else `None`.
#### Notes
The cache key is a string composed of the function's module name and the line
number where the function's source code starts.
"""
if func is None:
return None
module = inspect.getmodule(func)
mname = ".".join(module.__name__.split(".", 1)[1:])
if not mname:
mname = inspect.getsourcefile(func)
return f"{mname}:{inspect.getsourcelines(func)[1]}"
def get_source_code(items):
"""
Extracts, concatenates, and dedents the source code of the given items.
#### Parameters
**items** (`Iterable[object]`)
: An iterable of items, typically functions or methods, for which to extract the
source code.
#### Returns
**source_code** (`str`)
: The concatenated and dedented source code of the items.
#### Notes
The function retrieves the source code of each item. If the item has a
`pretty_source` attribute, it uses that as the source code. Otherwise, it
attempts to use the `inspect` module's `getsourcelines` function to extract
the source code.
The function also adds class names to methods and properly indents the
source code. If the source code belongs to a method, the function retrieves
the class name and prepends it to the source code, properly indenting it to
reflect its position within the class. If the source code belongs to the
same class as the previous item, only the indentation is adjusted.
"""
sources = []
prev_class_name = None
for func in items:
# custom source
if hasattr(func, "pretty_source"):
src = textwrap.dedent(func.pretty_source).lstrip()
# original source
else:
try:
lines, _ = inspect.getsourcelines(func)
except TypeError:
continue
if not lines:
continue
src = "\n".join(line.rstrip() for line in lines)
src = textwrap.dedent(src)
class_name = None
if inspect.ismethod(func):
# Add class name
if hasattr(func, "im_class"):
class_name = func.im_class.__name__
elif hasattr(func, "__qualname__"):
names = func.__qualname__.split(".")
if len(names) > 1:
class_name = names[-2]
if class_name and prev_class_name != class_name:
src = "class {}:\n {}".format(class_name, src.replace("\n", "\n "))
elif class_name:
src = " " + src.replace("\n", "\n ")
sources.append(src)
prev_class_name = class_name
return "\n\n".join(sources).rstrip()
def _get_sourceline_info(obj, basedir):
"""
Retrieves the source file and line number information of the given object.
#### Parameters
**obj** (`object`)
: The object for which to retrieve source file and line number information. This is
typically a function or a method.
**basedir** (`str`)
: The base directory relative to which the source file path should be expressed.
#### Returns
**sourceline_info** (`str`)
: A string containing the relative path of the source file and the line number where
the object is defined, in the format `' in {filename}:{lineno}'`. If the source file
or line number cannot be determined, an empty string is returned.
#### Notes
The function uses the `inspect` module's `getsourcefile` and
`getsourcelines` functions to determine the source file and line number of
the object, respectively. The source file path is converted to a path
relative to `basedir` using `os.path.relpath`.
"""
try:
fn = inspect.getsourcefile(obj)
fn = os.path.relpath(fn, basedir)
_, lineno = inspect.getsourcelines(obj)
return f" in {fn !s}:{lineno !s}"
except Exception:
return ""
def _check_num_args(root, benchmark_name, func, min_num_args, max_num_args=None):
"""
Verifies if the function under benchmarking accepts a correct number of arguments.
#### Parameters
**root** (`str`)
: The root directory for the function's source file (used to print detailed error
messages).
**benchmark_name** (`str`)
: The name of the benchmark for which the function is being checked (used in error
messages).
**func** (`function`)
: The function to check for correct number of arguments.
**min_num_args** (`int`)
: The minimum number of arguments the function should accept.
**max_num_args** (`int`, optional)
: The maximum number of arguments the function should accept. If not provided,
`max_num_args` is assumed to be the same as `min_num_args`.
#### Returns
**validity** (`bool`)
: True if the function accepts a correct number of arguments, False otherwise.
#### Notes
The function uses the `inspect` module's `getfullargspec` function to determine the
number of arguments the function accepts. It correctly handles functions, methods,
variable argument lists, and functions with default argument values. In case of any
error or if the function does not accept a correct number of arguments, an error
message is printed to standard output.
"""
if max_num_args is None:
max_num_args = min_num_args
try:
info = inspect.getfullargspec(func)
except Exception as exc:
print(
f"{benchmark_name !s}: failed to check "
f"({func !r}{_get_sourceline_info(func, root) !s}): {exc !s}"
)
return True
max_args = len(info.args)
if inspect.ismethod(func):
max_args -= 1
min_args = max_args if info.defaults is None else max_args - len(info.defaults)
if info.varargs is not None:
max_args = math.inf
ok = (min_args <= max_num_args) and (min_num_args <= max_args)
if not ok:
args_str = min_args if min_args == max_args else f"{min_args}-{max_args}"
if min_num_args == max_num_args:
num_args_str = min_num_args
else:
num_args_str = f"{min_num_args}-{max_num_args}"
print(
f"{benchmark_name !s}: wrong number of arguments "
f"(for {func !r}{_get_sourceline_info(func, root) !s}):",
f"expected {num_args_str}, " f"has {args_str}",
)
return ok
def _repr_no_address(obj):
"""
Returns a string representing the object, but without its memory address.
#### Parameters
**obj** (`object`)
: The object to represent.
#### Returns
**representation** (`str`)
: A string representation of the object without its memory address.
#### Notes
When Python's built-in `repr` function is used on an object, it often includes
the memory address of the object. In some cases, this might not be desirable
(for example, when comparing object representations in unit tests, where the
memory address is not relevant). This function provides a way to get a string
representation of an object without its memory address.
The function works by first getting the `repr` of the object, then using a
regular expression to detect and remove the memory address if it's present.
To avoid false positives, the function also gets the `repr` of the object
using the `object` class's `__repr__` method (which always includes the
address), and only removes the address from the original `repr` if it matches
the address in the `object.__repr__`.
Please note, this function is not guaranteed to remove the memory address for
all objects. It is primarily intended to work for objects that have a `repr`
similar to the default one provided by the `object` class.
"""
result = repr(obj)
address_regex = re.compile(r"^(<.*) at (0x[\da-fA-F]*)(>)$")
match = address_regex.match(result)
if match:
suspected_address = match[2]
# Double check this is the actual address
default_result = object.__repr__(obj)
match2 = address_regex.match(default_result)
if match2:
known_address = match2[2]
if known_address == suspected_address:
result = match[1] + match[3]
return result
def _validate_params(params, param_names, name):
"""
Validates the params and param_names attributes and returns validated lists.
#### Parameters
**params** (`list`)
: List of parameters for the function to be benchmarked.
**param_names** (`list`)
: List of names for the parameters.
**name** (`str`)
: The name of the benchmark.
#### Returns
**params**, **param_names** (`list`, `list`)
: The validated parameter and parameter name lists.
"""
try:
param_names = [str(x) for x in list(param_names)]
except ValueError:
raise ValueError(f"{name}.param_names is not a list of strings")
try:
params = list(params)
except ValueError:
raise ValueError(f"{name}.params is not a list")
if params and not isinstance(params[0], (tuple, list)):
params = [params]
else:
params = [list(entry) for entry in params]
if len(param_names) != len(params):
param_names = param_names[: len(params)]
param_names += [
"param%d" % (k + 1,) for k in range(len(param_names), len(params))
]
return params, param_names
def _unique_param_ids(params):
"""
Processes the params list to handle duplicate names within parameter sets,
ensuring unique IDs.
#### Parameters
**params** (`list`)
: List of parameters. Each entry is a list representing a set of parameters.
#### Returns
**params** (`list`)
: List of parameters with duplicate names within each set handled.
If there are duplicate names, they are renamed with a numerical suffix to
ensure unique IDs.
"""
params = [[_repr_no_address(item) for item in entry] for entry in params]
for i, param in enumerate(params):
if len(param) != len(set(param)):
counter = Counter(param)
dupe_dict = {name: 0 for name, count in counter.items() if count > 1}
for j in range(len(param)):
name = param[j]
if name in dupe_dict:
param[j] = f"{name} ({dupe_dict[name]})"
dupe_dict[name] += 1
params[i] = param
return params
class Benchmark:
"""
Class representing a single benchmark. The class encapsulates
functions and methods that can be marked as benchmarks, along with
setup and teardown methods, timing and other configuration.
#### Notes
The class uses regex to match method names that will be considered
as benchmarks. The matched functions are then processed for
benchmarking using various helper methods.
By default, a benchmark's timeout is set to 60 seconds.
"""
# The regex of the name of function or method to be considered as
# this type of benchmark. The default in the base class, will
# match nothing.
name_regex = re.compile("^$")
def __init__(self, name, func, attr_sources):
"""
Initialize a new instance of `Benchmark`.
#### Parameters
**name** (`str`)
: The name of the benchmark.
**func** (`function`)
: The function to benchmark.
**attr_sources** (`list`)
: List of sources from which attributes of the benchmark will be drawn.
These attributes include setup, teardown, timeout, etc.
#### Attributes
**pretty_name** (`str`)
: A user-friendly name for the function being benchmarked, if available.
**_setups** (`list`)
: List of setup methods to be executed before the benchmark.
**_teardowns** (`list`)
: List of teardown methods to be executed after the benchmark.
**_setup_cache** (`function`)
: A special setup method that is only run once per parameter set.
**setup_cache_key** (`str`)
: A unique key for the setup cache.
**setup_cache_timeout** (`float`)
: The time after which the setup cache should be invalidated.
**timeout** (`float`)
: The maximum time the benchmark is allowed to run before it is aborted.
**code** (`str`)
: The source code of the function to be benchmarked and its setup methods.
**version** (`str`)
: A version string derived from a hash of the code.
**_params** (`list`)
: List of parameters for the function to be benchmarked.
**param_names** (`list`)
: List of names for the parameters.
**_current_params** (`tuple`)
: The current set of parameters to be passed to the function during the
benchmark.
**params** (`list`)
: The list of parameters with unique representations for exporting.
**_skip_tuples** (`list`)
: List of tuples representing parameter combinations to be skipped
before calling the setup method.
#### Raises
**ValueError**
: If `param_names` or `_params` is not a list or if the number of
parameters does not match the number of parameter names.
"""
self.name = name
self.func = func
self.pretty_name = getattr(func, "pretty_name", None)
self._attr_sources = attr_sources
self._setups = list(_get_all_attrs(attr_sources, "setup", True))[::-1]
self._teardowns = list(_get_all_attrs(attr_sources, "teardown", True))
self._setup_cache = _get_first_attr(attr_sources, "setup_cache", None)
self.setup_cache_key = get_setup_cache_key(self._setup_cache)
self.setup_cache_timeout = _get_first_attr([self._setup_cache], "timeout", None)
self.timeout = _get_first_attr(attr_sources, "timeout", None)
self.code = get_source_code([self.func] + self._setups + [self._setup_cache])
code_text = self.code.encode("utf-8")
code_hash = sha256(code_text).hexdigest()
self.version = str(_get_first_attr(attr_sources, "version", code_hash))
self.type = "base"
self.unit = "unit"
self._redo_setup_next = False
self._params = _get_first_attr(attr_sources, "params", [])
self.param_names = _get_first_attr(attr_sources, "param_names", [])
self._current_params = ()
self._params, self.param_names = _validate_params(
self._params, self.param_names, self.name
)
# Fetch skip parameters
self._skip_tuples = _get_first_attr(attr_sources, "skip_params", [])
# Exported parameter representations
self.params = _unique_param_ids(self._params)
def __repr__(self):
return f"<{self.__class__.__name__} {self.name}>"
def set_param_idx(self, param_idx):
"""
Set the current parameter values for the benchmark based on a parameter
index.
This method updates the `_current_params` attribute with the set of
parameter values that correspond to the provided parameter index.
#### Parameters
**param_idx** (`int`)
: The index of the desired parameter set in the Cartesian product of
`_params` attribute list.
#### Raises
**ValueError**
: If the provided parameter index is not valid. This could occur if the
index does not correspond to any element in the Cartesian product of the
`_params` list.
"""
try:
(self._current_params,) = itertools.islice(
itertools.product(*self._params), param_idx, param_idx + 1
)
except ValueError:
raise ValueError(
f"Invalid benchmark parameter permutation index: {param_idx!r}"
)
def insert_param(self, param):
"""
Inserts a parameter at the beginning of the current parameter list.
This method modifies the `_current_params` attribute, inserting the provided
parameter value at the front of the parameter tuple.
#### Parameters
**param** (`Any`)
: The parameter value to insert at the front of `_current_params`.
"""
self._current_params = tuple([param] + list(self._current_params))
def check(self, root):
"""
Checks call syntax (argument count) for benchmark's setup, call, and teardown.
#### Parameters
**root** (`Any`)
: The root context for checking argument count in setup, call and teardown.
#### Returns
**result** (`bool`)
: `True` if correct argument count is used in all methods, `False` otherwise.
#### Notes
The call syntax is checked only based on the number of arguments. It
also sets the current parameters for the benchmark if they exist. The
number of arguments required by setup, call, and teardown methods may
increase if a setup cache is defined.
"""
# Check call syntax (number of arguments only...)
ok = True
if self._params:
self.set_param_idx(0)
min_num_args = len(self._current_params)
max_num_args = min_num_args
if self.setup_cache_key is not None:
ok = ok and _check_num_args(
root, f"{self.name}: setup_cache", self._setup_cache, 0
)
max_num_args += 1
for setup in self._setups:
ok = ok and _check_num_args(
root, f"{self.name}: setup", setup, min_num_args, max_num_args
)
ok = ok and _check_num_args(
root, f"{self.name}: call", self.func, min_num_args, max_num_args
)
for teardown in self._teardowns:
ok = ok and _check_num_args(
root,
f"{self.name}: teardown",
teardown,
min_num_args,
max_num_args,
)
return ok
def do_setup(self):
if tuple(self._current_params) in self._skip_tuples:
# Skip
return True
try:
for setup in self._setups:
setup(*self._current_params)
except NotImplementedError as e:
# allow skipping test
print(f"asv: skipped: {e !r} ")
return True
return False
def redo_setup(self):
if not self._redo_setup_next:
self._redo_setup_next = True
return
self.do_teardown()
self.do_setup()
def do_teardown(self):
if tuple(self._current_params) in self._skip_tuples:
# Skip
return
for teardown in self._teardowns:
teardown(*self._current_params)
def do_setup_cache(self):
if self._setup_cache is not None:
return self._setup_cache()
def do_run(self):
if tuple(self._current_params) in self._skip_tuples:
# Skip
return
return self.run(*self._current_params)
def do_profile(self, filename=None):
"""
Executes the benchmark's function with profiling using `cProfile`.
#### Parameters
**filename** (`str`, optional)
: The name of the file where the profiling data should be saved. If not
provided, the profiling data will not be saved.
#### Raises
**RuntimeError**
: If the `cProfile` module couldn't be imported.
#### Notes
The method uses an inner function `method_caller` to call the function
to be profiled. The function and its parameters should be available in
the scope where `method_caller` is called.
The `cProfile` module should be available, or else a `RuntimeError` is
raised. If a `filename` is provided, the profiling results will be saved
to that file.
"""
if tuple(self._current_params) in self._skip_tuples:
# Skip
return
def method_caller():
run(*params) # noqa:F821 undefined name
if profile is None:
raise RuntimeError("cProfile could not be imported")
if filename is not None:
if hasattr(method_caller, "func_code"):
code = method_caller.func_code
else:
code = method_caller.__code__
self.redo_setup()
profile.runctx(
code, {"run": self.func, "params": self._current_params}, {}, filename
)
asv_runner-0.2.1/asv_runner/benchmarks/_exceptions.py 0000664 0000000 0000000 00000002107 14562240250 0023065 0 ustar 00root root 0000000 0000000 class NotRequired(ImportError):
"""
Exception raised when a requirement is not met.
This exception inherits from `ImportError`. It's typically used when a particular
package, module or other dependency that is not essential for the overall function
of the program is not found or doesn't meet specific requirements.
#### Attributes
**message** (`str`)
: A string that provides a more detailed explanation of the error.
#### Example
This exception might be used in a scenario where an optional feature of a program
relies on a specific package that is not installed:
```{code-block} python
try:
import optional_package
except ImportError:
raise NotRequired("optional_package is not installed.")
```
"""
def __init__(self, message):
"""
Initialize a new instance of `NotRequired`.
#### Parameters
**message** (`str`)
: A string that provides a more detailed explanation of the error.
"""
self.message = message
super().__init__(self.message)
asv_runner-0.2.1/asv_runner/benchmarks/_maxrss.py 0000664 0000000 0000000 00000013063 14562240250 0022224 0 ustar 00root root 0000000 0000000 import os
import sys
ON_PYPY = hasattr(sys, "pypy_version_info")
if sys.platform.startswith("win"):
import ctypes.wintypes
SIZE_T = ctypes.c_size_t
class PROCESS_MEMORY_COUNTERS(ctypes.Structure):
"""
The PROCESS_MEMORY_COUNTERS structure is used by the
GetProcessMemoryInfo function to store performance information. It's
used here to retrieve the peak working set size, which is the maximum
amount of memory in the working set of the process at any point in time.
"""
_fields_ = [
("cb", ctypes.wintypes.DWORD),
("PageFaultCount", ctypes.wintypes.DWORD),
("PeakWorkingSetSize", SIZE_T),
("WorkingSetSize", SIZE_T),
("QuotaPeakPagedPoolUsage", SIZE_T),
("QuotaPagedPoolUsage", SIZE_T),
("QuotaPeakNonPagedPoolUsage", SIZE_T),
("QuotaNonPagedPoolUsage", SIZE_T),
("PagefileUsage", SIZE_T),
("PeakPagefileUsage", SIZE_T),
]
GetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
GetCurrentProcess.argtypes = []
GetCurrentProcess.restype = ctypes.wintypes.HANDLE
GetProcessMemoryInfo = ctypes.windll.psapi.GetProcessMemoryInfo
GetProcessMemoryInfo.argtypes = (
ctypes.wintypes.HANDLE,
ctypes.POINTER(PROCESS_MEMORY_COUNTERS),
ctypes.wintypes.DWORD,
)
GetProcessMemoryInfo.restype = ctypes.wintypes.BOOL
def get_maxrss():
"""
Returns the peak working set size for the current process. On Windows,
the peak working set size is the maximum amount of physical memory
used by the process.
#### Returns
**peak_working_set_size** (`int`)
: The peak working set size for the current process.
"""
proc_hnd = GetCurrentProcess()
counters = PROCESS_MEMORY_COUNTERS()
info = GetProcessMemoryInfo(
proc_hnd, ctypes.byref(counters), ctypes.sizeof(counters)
)
if info == 0:
raise ctypes.WinError()
return counters.PeakWorkingSetSize
# Determine correct DWORD_PTR type for current Python version (32 or 64 bit)
if ctypes.sizeof(ctypes.c_void_p) == ctypes.sizeof(ctypes.c_uint64):
DWORD_PTR = ctypes.c_uint64
elif ctypes.sizeof(ctypes.c_void_p) == ctypes.sizeof(ctypes.c_uint32):
DWORD_PTR = ctypes.c_uint32
SetProcessAffinityMask = ctypes.windll.kernel32.SetProcessAffinityMask
SetProcessAffinityMask.argtypes = [ctypes.wintypes.HANDLE, DWORD_PTR]
SetProcessAffinityMask.restype = bool
GetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
GetCurrentProcess.argtypes = []
GetCurrentProcess.restype = ctypes.wintypes.HANDLE
def set_cpu_affinity(affinity_list):
"""
Set CPU affinity to CPUs listed (numbered 0...n-1). CPU affinity
is about binding and unbinding a process to a physical CPU or a
range of CPUs, so that the process in question uses only a subset
of the available CPUs.
#### Parameters
**affinity_list** (`list`)
: A list of CPU cores to which the current process will be bound.
"""
mask = 0
for num in affinity_list:
mask |= 2**num
# Pseudohandle, doesn't need to be closed
handle = GetCurrentProcess()
ok = SetProcessAffinityMask(handle, mask)
if not ok:
raise RuntimeError("SetProcessAffinityMask failed")
else:
try:
import resource
# POSIX
if sys.platform == "darwin":
def get_maxrss():
"""
Returns the peak resident set size for the current process. On macOS,
the peak resident set size is the maximum amount of memory occupied by
the process's resident set at any point in time.
#### Returns
**peak_resident_set_size** (`int`)
: The peak resident set size for the current process.
"""
# OSX getrusage returns maxrss in bytes
# https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/getrusage.2.html
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
else:
def get_maxrss():
"""
Returns the peak resident set size for the current process. On Linux,
the peak resident set size is the maximum amount of memory occupied by
the process's resident set at any point in time.
#### Returns
**peak_resident_set_size** (`int`)
: The peak resident set size for the current process.
"""
# Linux, *BSD return maxrss in kilobytes
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * 1024
except ImportError:
pass
def set_cpu_affinity(affinity_list):
"""
Set CPU affinity to CPUs listed (numbered 0...n-1). CPU affinity
is about binding and unbinding a process to a physical CPU or a
range of CPUs, so that the process in question uses only a subset
of the available CPUs.
#### Parameters
**affinity_list** (`list`)
: A list of CPU cores to which the current process will be bound.
"""
if hasattr(os, "sched_setaffinity"):
os.sched_setaffinity(0, affinity_list)
else:
import psutil
p = psutil.Process()
if hasattr(p, "cpu_affinity"):
p.cpu_affinity(affinity_list)
asv_runner-0.2.1/asv_runner/benchmarks/mark.py 0000664 0000000 0000000 00000033075 14562240250 0021507 0 ustar 00root root 0000000 0000000 import functools
import inspect
class SkipNotImplemented(NotImplementedError):
"""
Exception raised to indicate a skipped benchmark.
This exception inherits from `NotImplementedError`. It's used within an ASV
benchmark to skip the current benchmark for certain parameters or conditions
that are not implemented or do not apply.
#### Attributes
**message** (`str`)
: A string that provides a more detailed explanation of the skip reason.
#### Warning
Use of `SkipNotImplemented` is less efficient than the `@skip_for_params`
decorator as the setup for the benchmarks and the benchmarks themselves are
run before the error is raised, thus consuming unnecessary resources. Use
`@skip_for_params` where possible to avoid running the benchmarks that
should be skipped.
#### Notes
This is mainly provided for backwards compatibility with the behavior of asv
before 0.5 wherein individual benchmarks could raise and be skipped. From
0.5 onwards, only the setup function is meant to raise `NotImplemented` for
skipping parameter sets.
#### Example
This exception might be used in a scenario where a benchmark should be
skipped for certain conditions or parameters:
```{code-block} python
class Simple:
params = ([False, True])
param_names = ["ok"]
def time_failure(self, ok):
if ok:
x = 34.2**4.2
else:
raise SkipNotImplemented
```
"""
def __init__(self, message=""):
"""
Initialize a new instance of `SkipNotImplemented`.
#### Parameters
**message** (`str`)
: A string that provides a more detailed explanation of the skip reason.
Optional; if not provided, defaults to an empty string.
"""
self.message = message
super().__init__(self.message)
def skip_for_params(skip_params_list):
"""
Decorator to set skip parameters for a benchmark function.
#### Parameters
**skip_params_list** (`list`):
A list of tuples, each specifying a combination of parameter values that
should cause the benchmark function to be skipped.
#### Returns
**decorator** (`function`):
A decorator function that sets the skip parameters for the benchmark
function.
#### Notes
The `skip_for_params` decorator can be used to specify conditions under
which a benchmark function should be skipped. Each tuple in the list
represents a combination of parameter values which, if received by the
benchmark function, will cause that function to be skipped during the
benchmarking process.
The decorated function's `skip_params` attribute will be set with the
provided skip parameters, which will be used during the benchmarking
process.
Using this decorator is always more efficient than raising a
`SkipNotImplemented` exception within the benchmark function, as the
function setup and execution can be avoided entirely for skipped parameters.
#### Example
```{code-block} python
class Simple:
params = ([False, True])
param_names = ["ok"]
@skip_for_params([(False, )])
def time_failure(self, ok):
if ok:
x = 34.2**4.2
```
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
setattr(wrapper, "skip_params", skip_params_list)
return wrapper
return decorator
def skip_benchmark(func):
"""
Decorator to mark a function as skipped for benchmarking.
#### Parameters
**func** (function)
: The function to be marked as skipped.
#### Returns
**wrapper** (function)
: A wrapped function that is marked to be skipped for benchmarking.
#### Notes
The `skip_benchmark` decorator can be used to mark a specific function as
skipped for benchmarking. When the decorated function is encountered during
benchmarking, it will be skipped and not included in the benchmarking
process.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
setattr(wrapper, "skip_benchmark", True)
return wrapper
def skip_benchmark_if(condition):
"""
Decorator to skip benchmarking of a function if a condition is met.
#### Parameters
**condition** (`bool`)
: A boolean that indicates whether to skip benchmarking. If `True`,
the decorated function will be skipped for benchmarking. If `False`,
the decorated function will be benchmarked as usual.
#### Returns
**decorator** (function)
: A decorator function that sets the condition under which the decorated function
will be skipped for benchmarking.
#### Notes
The `skip_if` decorator can be used to skip the benchmarking of a specific
function if a condition is met. It is faster than raising
`SkipNotImplemented` as it skips the `setup()` as well.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
if condition:
setattr(wrapper, "skip_benchmark", True)
return wrapper
return decorator
def skip_params_if(skip_params_list, condition):
"""
Decorator to set skip parameters for a benchmark function if a condition is met.
#### Parameters
**skip_params_list** (`list`):
A list specifying the skip parameters for the benchmark function.
**condition** (`bool`)
: A boolean that indicates whether to set the skip parameters. If `True`,
the skip parameters will be set for the decorated function. If `False`,
no parameters will be skipped.
#### Returns
**decorator** (function):
A decorator function that sets the skip parameters for the benchmark function
if the condition is met.
#### Notes
The `skip_params_if` decorator can be used to specify skip parameters for a
benchmark function if a condition is met.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
if condition:
setattr(wrapper, "skip_params", skip_params_list)
return wrapper
return decorator
def parameterize_class_with(param_dict):
"""
Class Decorator to set benchmark parameters for a class.
#### Parameters
**param_dict** (`dict`):
A dictionary specifying the parameters for the benchmark class.
The keys represent the parameter names, and the values are lists
of values for those parameters.
#### Returns
**decorator** (function):
A class decorator that sets the parameters for the benchmark functions.
#### Notes
The `parameterize_class_with` decorator can be used to specify parameters for a
benchmark class. The parameters are defined as a dictionary, where keys are
the parameter names and values are lists of respective values. The decorated
class's `params` and `param_names` attributes will be set with the provided
parameters and names, which will be used during the benchmarking process.
This decorator will overwrite any existing `params` and `param_names`
attributes in the class.
"""
def decorator(cls):
if not inspect.isclass(cls):
raise TypeError(
"The parameterize_class_with decorator can only be used with classes"
)
# Handle the single parameter case separately.
if len(param_dict) > 1:
cls.params = list(param_dict.values())
else:
cls.params = list(param_dict.values())[0]
cls.param_names = list(param_dict.keys())
return cls
return decorator
def parameterize_func_with(param_dict):
"""
Function Decorator to set benchmark parameters for a function.
#### Parameters
**param_dict** (`dict`):
A dictionary specifying the parameters for the benchmark function.
The keys represent the parameter names, and the values are lists
of values for those parameters.
#### Returns
**decorator** (function):
A function decorator that sets the parameters for the benchmark function.
#### Notes
The `parameterize_func_with` decorator can be used to specify parameters for a
benchmark function. The parameters are defined as a dictionary, where keys are
the parameter names and values are lists of respective values. The decorated
function's `params` and `param_names` attributes will be set with the provided
parameters and names, which will be used during the benchmarking process.
This decorator will overwrite any existing `params` and `param_names`
attributes in the function, and it should not be used with methods of a class.
"""
def decorator(func):
if inspect.isclass(func) or inspect.ismethod(func):
raise TypeError(
"The parameterize_func_with decorator can only be used with functions"
)
if len(param_dict) > 1:
func.params = list(param_dict.values())
else:
func.params = list(param_dict.values())[0]
func.param_names = list(param_dict.keys())
return func
return decorator
def parameterize(param_dict):
"""
Decorator to set benchmark parameters for a function or a class.
#### Parameters
**param_dict** (`dict`):
A dictionary specifying the parameters for the benchmark.
The keys represent the parameter names, and the values are lists
of values for those parameters.
#### Returns
**decorator** (function):
A function or class decorator that sets the parameters for the benchmark.
#### Notes
The `parameterize` decorator can be used to specify parameters for a
benchmark function or class. The parameters are defined as a dictionary,
where keys are the parameter names and values are lists of respective values.
The decorated function or class's `params` and `param_names` attributes
will be set with the provided parameters and names, which will be used
during the benchmarking process.
"""
def decorator(obj):
if inspect.isclass(obj):
return parameterize_class_with(param_dict)(obj)
elif callable(obj):
return parameterize_func_with(param_dict)(obj)
else:
raise TypeError(
"The parameterize decorator can only be used with functions or classes"
)
return decorator
def timeout_class_at(seconds):
"""
Class Decorator to set timeout for a class.
#### Parameters
**seconds** (`float`)
: The number of seconds after which the class methods should be timed out.
#### Returns
**decorator** (function)
: A class decorator that sets the timeout for the class.
#### Notes
The `timeout_class_at` decorator can be used to specify a timeout for all
methods in a class. The timeout is stored as an attribute on the class and
applies to all its methods. Individual methods can override this timeout by
using the `timeout_func_at` or `timeout_at` decorators.
"""
def decorator(cls):
if not inspect.isclass(cls):
raise TypeError(
"The timeout_class_with decorator can only be used with classes"
)
cls.timeout = seconds
return cls
return decorator
def timeout_func_at(seconds):
"""
Function Decorator to set timeout for a function.
#### Parameters
**seconds** (`float`)
: The number of seconds after which the function should be timed out.
#### Returns
**decorator** (function)
: A function decorator that sets the timeout for the function.
#### Notes
The `timeout_func_at` decorator can be used to specify a timeout for a
specific function. This is particularly useful for benchmarking, where you
might want to stop execution of functions that take too long. The timeout is
stored as an attribute on the function.
"""
def decorator(func):
if inspect.isclass(func):
raise TypeError(
"The timeout_func_with decorator can only be used with functions"
)
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
setattr(wrapper, "timeout", seconds)
return wrapper
return decorator
def timeout_at(seconds):
"""
Decorator to set a timeout for a function or a class.
#### Parameters
**seconds** (`float`)
: The number of seconds after which the function or the class methods should
be timed out.
#### Returns
**decorator** (function)
: A decorator that sets the timeout for the function or the class.
#### Notes
The `timeout_at` decorator can be used to set a specific timeout for a
function or all methods in a class. If applied to a class, the timeout is
stored as an attribute on the class and applies to all its methods.
Individual methods can override this timeout by using the `timeout_func_at`
or `timeout_at` decorators. If applied to a function, the timeout is stored
directly on the function.
"""
def decorator(obj):
if inspect.isclass(obj):
return timeout_class_at(seconds)(obj)
elif callable(obj):
return timeout_func_at(seconds)(obj)
else:
raise TypeError(
"The parameterize decorator can only be used with functions or classes"
)
return decorator
__all__ = [
"parameterize",
"skip_benchmark",
"skip_benchmark_if",
"skip_for_params",
"skip_params_if",
"timeout_at",
]
asv_runner-0.2.1/asv_runner/benchmarks/mem.py 0000664 0000000 0000000 00000004310 14562240250 0021321 0 ustar 00root root 0000000 0000000 import copy
import re
from ._base import Benchmark
from ._exceptions import NotRequired
try:
from pympler.asizeof import asizeof
except ImportError:
raise NotRequired("MemBenchmarks not requested or pympler not found")
class MemBenchmark(Benchmark):
"""
Represents a single benchmark for tracking the memory consumption of an object.
The MemBenchmark class provides a benchmark type for tracking the memory
consumption of the object returned by the benchmark function.
#### Attributes
**name_regex** (`re.Pattern`)
: The regular expression used to match the names of functions that should be
considered as memory benchmarks.
**type** (`str`)
: The type of the benchmark. The default type is "memory".
**unit** (`str`)
: The unit of the value that's being tracked. By default, this is "bytes".
#### Methods
**run(*param)**
: Runs the benchmark function and returns the memory consumption of the object
returned by the function.
"""
name_regex = re.compile("^(Mem[A-Z_].+)|(mem_.+)$")
def __init__(self, name, func, attr_sources):
"""
Initializes a new instance of the MemBenchmark class.
#### Parameters
**name** (`str`)
: The name of the benchmark.
**func** (`callable`)
: The function to benchmark.
**attr_sources** (`list`)
: A list of objects to search for attributes that might be used by the
benchmark.
"""
Benchmark.__init__(self, name, func, attr_sources)
self.type = "memory"
self.unit = "bytes"
def run(self, *param):
"""
Runs the benchmark function and measures the memory consumption of the object
returned by the function.
#### Parameters
**param** (`tuple`)
: The parameters to pass to the benchmark function.
#### Returns
**result** (`int`)
: The memory consumption in bytes of the object returned by the
benchmark function.
"""
obj = self.func(*param)
sizeof2 = asizeof([obj, obj])
sizeofcopy = asizeof([obj, copy.copy(obj)])
return sizeofcopy - sizeof2
export_as_benchmark = [MemBenchmark]
asv_runner-0.2.1/asv_runner/benchmarks/peakmem.py 0000664 0000000 0000000 00000003655 14562240250 0022175 0 ustar 00root root 0000000 0000000 import re
from ._base import Benchmark
from ._maxrss import get_maxrss
class PeakMemBenchmark(Benchmark):
"""
Represents a single benchmark for tracking the peak memory consumption
of the whole program.
The PeakMemBenchmark class provides a benchmark type for tracking the peak
memory consumption of the program while the benchmark function is running.
#### Attributes
**name_regex** (`re.Pattern`)
: The regular expression used to match the names of functions that should be
considered as peak memory benchmarks.
**type** (`str`)
: The type of the benchmark. The default type is "peakmemory".
**unit** (`str`)
: The unit of the value that's being tracked. By default, this is "bytes".
#### Methods
**run(*param)**
: Runs the benchmark function and returns its result.
"""
name_regex = re.compile("^(PeakMem[A-Z_].+)|(peakmem_.+)$")
def __init__(self, name, func, attr_sources):
"""
Initializes a new instance of the PeakMemBenchmark class.
#### Parameters
**name** (`str`)
: The name of the benchmark.
**func** (`callable`)
: The function to benchmark.
**attr_sources** (`list`)
: A list of objects to search for attributes that might be used by the
benchmark.
"""
Benchmark.__init__(self, name, func, attr_sources)
self.type = "peakmemory"
self.unit = "bytes"
def run(self, *param):
"""
Runs the benchmark function and measures its peak memory consumption.
#### Parameters
**param** (`tuple`)
: The parameters to pass to the benchmark function.
#### Returns
**result** (`int`)
: The peak memory consumption in bytes of the program while the
benchmark function was running.
"""
self.func(*param)
return get_maxrss()
export_as_benchmark = [PeakMemBenchmark]
asv_runner-0.2.1/asv_runner/benchmarks/time.py 0000664 0000000 0000000 00000023512 14562240250 0021506 0 ustar 00root root 0000000 0000000 import re
import sys
import timeit
from ._base import Benchmark, _get_first_attr
wall_timer = timeit.default_timer
class TimeBenchmark(Benchmark):
"""
Represents a single benchmark for timing.
This class inherits from `Benchmark` and is specialized for timing benchmarks.
#### Attributes
**name_regex** (`re.Pattern`)
: Regular expression that matches the name of the timing benchmarks.
**rounds** (`int`)
: Number of rounds to execute the benchmark.
**repeat** (`int`)
: Number of times the code will be repeated during each round.
**min_run_count** (`int`)
: Minimum number of runs required for the benchmark.
**number** (`int`)
: The argument to `timeit.timeit`, specifying the number of executions of
the setup statement.
**sample_time** (`float`)
: The target time for each sample.
**warmup_time** (`float`)
: The time spent warming up the benchmark.
**timer** (`callable`)
: The timer to use, by default it uses `timeit.default_timer`.
"""
name_regex = re.compile("^(Time[A-Z_].+)|(time_.+)$")
def __init__(self, name, func, attr_sources):
"""
Initialize a new instance of `TimeBenchmark`.
#### Parameters
**name** (`str`)
: The name of the benchmark.
**func** (`callable`)
: The function to benchmark.
**attr_sources** (`list`)
: A list of objects from which to draw attributes.
"""
Benchmark.__init__(self, name, func, attr_sources)
self.type = "time"
self.unit = "seconds"
self._attr_sources = attr_sources
old = int(
_get_first_attr(self._attr_sources, "processes", 2)
) # backward compat.
self.rounds = int(_get_first_attr(self._attr_sources, "rounds", old))
self._load_vars()
def _load_vars(self):
"""Loads benchmark variables from attribute sources."""
self.repeat = _get_first_attr(self._attr_sources, "repeat", 0)
self.min_run_count = _get_first_attr(self._attr_sources, "min_run_count", 2)
self.number = int(_get_first_attr(self._attr_sources, "number", 0))
self.sample_time = _get_first_attr(self._attr_sources, "sample_time", 0.01)
self.warmup_time = _get_first_attr(self._attr_sources, "warmup_time", -1)
self.timer = _get_first_attr(self._attr_sources, "timer", wall_timer)
def do_setup(self):
"""Execute the setup method and load variables."""
result = Benchmark.do_setup(self)
# For parameterized tests, setup() is allowed to change these
self._load_vars()
return result
def _get_timer(self, *param):
"""Get a `timeit.Timer` for the current benchmark."""
if param:
def func():
self.func(*param)
else:
func = self.func
timer = timeit.Timer(stmt=func, setup=self.redo_setup, timer=self.timer)
return timer
def run(self, *param):
"""
Run the benchmark with the given parameters.
#### Parameters
**param** (`tuple`)
: The parameters to pass to the benchmark function.
#### Returns
**result** (`dict`)
: A dictionary with the benchmark results. It contains the samples taken
and the number of times the function was called in each sample.
#### Notes
The benchmark timing method is designed to adaptively find an optimal
`number` of function executions to time based on the estimated
performance. This number is then used for the final timings.
The warmup time is determined based on the Python interpreter in use.
PyPy and GraalPython need longer warmup times due to their JIT
compilers. For CPython, a short warmup time is used to account for
transient effects such as OS scheduling.
The `repeat` attribute specifies how many times to run the function for
timing. It can be an integer, meaning the function is run that many
times, or a tuple of three values, specifying the minimum number of
runs, the maximum number of runs, and the maximum total time to spend on
runs.
After obtaining the timing samples, each sample is divided by the
`number` of function executions to get the average time per function
call, and these values are returned as the "samples" in the result.
"""
warmup_time = self.warmup_time
if warmup_time < 0:
if "__pypy__" in sys.modules:
warmup_time = 1.0
elif "__graalpython__" in sys.modules:
warmup_time = 5.0
else:
# Transient effects exist also on CPython, e.g. from
# OS scheduling
warmup_time = 0.1
timer = self._get_timer(*param)
try:
min_repeat, max_repeat, max_time = self.repeat
except (ValueError, TypeError):
if self.repeat == 0:
min_repeat = 1
max_repeat = 10
max_time = 20.0
if self.rounds > 1:
max_repeat //= 2
max_time /= 2.0
else:
min_repeat = self.repeat
max_repeat = self.repeat
max_time = self.timeout
# XXX: This is a bug, needed for --quick
# gh-1308 in asv
if max_time is None:
max_time = 60.0
min_repeat = int(min_repeat)
max_repeat = int(max_repeat)
max_time = float(max_time)
samples, number = self.benchmark_timing(
timer,
min_repeat,
max_repeat,
max_time=max_time,
warmup_time=warmup_time,
number=self.number,
min_run_count=self.min_run_count,
)
samples = [s / number for s in samples]
return {"samples": samples, "number": number}
def benchmark_timing(
self,
timer,
min_repeat,
max_repeat,
max_time,
warmup_time,
number,
min_run_count,
):
"""
Benchmark the timing of the function execution.
#### Parameters
**timer** (`timeit.Timer`)
: The timer to use for the benchmarking.
**min_repeat** (`int`)
: The minimum number of times to repeat the function execution.
**max_repeat** (`int`)
: The maximum number of times to repeat the function execution.
**max_time** (`float`)
: The maximum total time to spend on the benchmarking.
**warmup_time** (`float`)
: The time spent warming up the benchmark.
**number** (`int`)
: The number of executions of the setup statement.
**min_run_count** (`int`)
: The minimum number of runs required for the benchmark.
#### Returns
**result** (`tuple`)
: A tuple with the samples taken and the number of times the function
was called in each sample.
#### Notes
The `too_slow` internal function is used to stop taking samples when
certain limits are exceeded. These limits are the minimum run count, the
minimum repeat count, and the maximum time.
If `number` is zero, a suitable number of function executions is
estimated, and the system is warmed up at the same time.
If the warmup time is greater than zero, a warmup phase is initiated
where the function is called repeatedly until the warmup time has
passed.
After these initial steps, the function execution times are sampled and
added to the `samples` list, stopping when reaching the maximum repeat
count or when the `too_slow` function indicates to stop.
"""
sample_time = self.sample_time
start_time = wall_timer()
run_count = 0
samples = []
def too_slow(num_samples):
# stop taking samples if limits exceeded
if run_count < min_run_count:
return False
if num_samples < min_repeat:
return False
return wall_timer() > start_time + warmup_time + max_time
if number == 0:
# Select number & warmup.
#
# This needs to be done at the same time, because the
# benchmark timings at the beginning can be larger, and
# lead to too small number being selected.
number = 1
while True:
self._redo_setup_next = False
start = wall_timer()
timing = timer.timeit(number)
wall_time = wall_timer() - start
actual_timing = max(wall_time, timing)
run_count += number
if actual_timing >= sample_time:
if wall_timer() > start_time + warmup_time:
break
else:
try:
p = min(10.0, max(1.1, sample_time / actual_timing))
except ZeroDivisionError:
p = 10.0
number = max(number + 1, int(p * number))
if too_slow(1):
return [timing], number
elif warmup_time > 0:
# Warmup
while True:
self._redo_setup_next = False
timing = timer.timeit(number)
run_count += number
if wall_timer() >= start_time + warmup_time:
break
if too_slow(1):
return [timing], number
# Collect samples
while len(samples) < max_repeat:
timing = timer.timeit(number)
run_count += number
samples.append(timing)
if too_slow(len(samples)):
break
return samples, number
export_as_benchmark = [TimeBenchmark]
asv_runner-0.2.1/asv_runner/benchmarks/timeraw.py 0000664 0000000 0000000 00000011717 14562240250 0022224 0 ustar 00root root 0000000 0000000 import re
import subprocess
import sys
import textwrap
from ._base import _get_first_attr
from .time import TimeBenchmark
class _SeparateProcessTimer:
"""
This class provides a timer that runs a given function in a separate Python
process.
The function should return the statement to be timed. This statement is
executed using the Python timeit module in a new Python process. The
execution time is then returned.
#### Attributes
**subprocess_tmpl** (`str`)
: The template Python code to be run in the subprocess. It imports necessary
modules and prints the execution time of the statement.
**func** (`callable`)
: The function to be timed. This function should return a string of Python
code to be executed, or a tuple of two strings: the code to be executed and
the setup code to be run before timing.
#### Methods
**timeit(number)**
: Run the function's code `number` times in a separate Python process, and
return the execution time.
"""
subprocess_tmpl = textwrap.dedent(
'''
from __future__ import print_function
from timeit import timeit, default_timer as timer
print(repr(timeit(stmt="""{stmt}""", setup="""{setup}""",
number={number}, timer=timer)))
'''
).strip()
def __init__(self, func):
self.func = func
def timeit(self, number):
"""
Run the function's code `number` times in a separate Python process, and
return the execution time.
#### Parameters
**number** (`int`)
: The number of times to execute the function's code.
#### Returns
**time** (`float`)
: The time it took to execute the function's code `number` times.
#### Notes
The function's code is executed in a separate Python process to avoid
interference from the parent process. The function can return either a
single string of code to be executed, or a tuple of two strings: the
code to be executed and the setup code to be run before timing.
"""
stmt = self.func()
if isinstance(stmt, tuple):
stmt, setup = stmt
else:
setup = ""
stmt = textwrap.dedent(stmt)
setup = textwrap.dedent(setup)
stmt = stmt.replace(r'"""', r"\"\"\"")
setup = setup.replace(r'"""', r"\"\"\"")
code = self.subprocess_tmpl.format(stmt=stmt, setup=setup, number=number)
res = subprocess.check_output([sys.executable, "-c", code])
return float(res.strip())
class TimerawBenchmark(TimeBenchmark):
"""
Represents a benchmark for tracking timing benchmarks run once in
a separate process.
This class inherits from `TimeBenchmark` and modifies it to run the
benchmark function in a separate process. This is useful for isolating the
benchmark from any potential side effects caused by other Python code
running in the same process.
#### Attributes
**name_regex** (`re.Pattern`)
: The regular expression used to match the names of functions that should be
considered as raw timing benchmarks.
**number** (`int`)
: The number of times to execute the function's code. By default, the
function's code is executed once.
#### Methods
**_load_vars()**
: Loads variables for the benchmark from the function's attributes or from
default values.
**_get_timer(*param)**
: Returns a timer that runs the benchmark function in a separate process.
**do_profile(filename=None)**
: Raises a ValueError. Raw timing benchmarks cannot be profiled.
"""
name_regex = re.compile("^(Timeraw[A-Z_].+)|(timeraw_.+)$")
def _load_vars(self):
"""
Loads variables for the benchmark from the function's attributes or from
default values.
"""
TimeBenchmark._load_vars(self)
self.number = int(_get_first_attr(self._attr_sources, "number", 1))
del self.timer
def _get_timer(self, *param):
"""
Returns a timer that runs the benchmark function in a separate process.
#### Parameters
**param** (`tuple`)
: The parameters to pass to the benchmark function.
#### Returns
**timer** (`_SeparateProcessTimer`)
: A timer that runs the function in a separate process.
"""
if param:
def func():
self.func(*param)
else:
func = self.func
return _SeparateProcessTimer(func)
def do_profile(self, filename=None):
"""
Raises a ValueError. Raw timing benchmarks cannot be profiled.
#### Parameters
**filename** (`str`, optional)
: The name of the file to which to save the profile. Default is None.
#### Raises
**ValueError**
: Always. Raw timing benchmarks cannot be profiled.
"""
raise ValueError("Raw timing benchmarks cannot be profiled")
export_as_benchmark = [TimerawBenchmark]
asv_runner-0.2.1/asv_runner/benchmarks/track.py 0000664 0000000 0000000 00000003570 14562240250 0021656 0 ustar 00root root 0000000 0000000 import re
from ._base import Benchmark, _get_first_attr
class TrackBenchmark(Benchmark):
"""
Represents a single benchmark for tracking an arbitrary value.
The TrackBenchmark class provides a benchmark type for tracking any arbitrary
value that your code produces. This can be useful when you need to track a value
that isn't related to time or memory usage.
#### Attributes
**name_regex** (`re.Pattern`)
: The regular expression used to match the names of functions that should be
considered as track benchmarks.
**type** (`str`)
: The type of the benchmark. The default type is "track".
**unit** (`str`)
: The unit of the value that's being tracked. By default, this is "unit".
#### Methods
**run(*param)**
: Runs the benchmark function and returns its result.
"""
name_regex = re.compile("^(Track[A-Z_].+)|(track_.+)$")
def __init__(self, name, func, attr_sources):
"""
Initializes a new instance of the TrackBenchmark class.
#### Parameters
**name** (`str`)
: The name of the benchmark.
**func** (`callable`)
: The function to benchmark.
**attr_sources** (`list`)
: A list of objects to search for attributes that might be used by the
benchmark.
"""
Benchmark.__init__(self, name, func, attr_sources)
self.type = _get_first_attr(attr_sources, "type", "track")
self.unit = _get_first_attr(attr_sources, "unit", "unit")
def run(self, *param):
"""
Runs the benchmark function and returns its result.
#### Parameters
**param** (`tuple`)
: The parameters to pass to the benchmark function.
#### Returns
**result**
: The result of the benchmark function.
"""
return self.func(*param)
export_as_benchmark = [TrackBenchmark]
asv_runner-0.2.1/asv_runner/check.py 0000664 0000000 0000000 00000001623 14562240250 0017507 0 ustar 00root root 0000000 0000000 import sys
from ._aux import update_sys_path
from .discovery import disc_benchmarks
def _check(args):
"""
Checks all the discovered benchmarks in the provided benchmark directory.
#### Parameters
**args** (`tuple`)
: A tuple containing the benchmark directory.
#### Notes
This function updates the system path with the root directory of the
benchmark suite. Then, it iterates over all benchmarks discovered in the
root directory. For each benchmark, it calls the check method of the
benchmark and updates the 'ok' flag.
If all benchmarks pass the check, it exits with a status code 0. If any
benchmark fails, it exits with a status code 1.
"""
(benchmark_dir,) = args
update_sys_path(benchmark_dir)
ok = True
for benchmark in disc_benchmarks(benchmark_dir):
ok = ok and benchmark.check(benchmark_dir)
sys.exit(0 if ok else 1)
asv_runner-0.2.1/asv_runner/console.py 0000664 0000000 0000000 00000033734 14562240250 0020104 0 ustar 00root root 0000000 0000000 # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A set of utilities for writing output to the console.
"""
import contextlib
import locale
import logging
import os
import sys
import textwrap
import time
from asv_runner import util
WIN = os.name == "nt"
def isatty(file):
"""
Determines if a file is a tty.
#### Parameters
**file** (`file-like object`)
: The file-like object to check.
#### Returns
**isatty** (`bool`)
: Returns `True` if the file is a tty, `False` otherwise.
#### Notes
Most built-in Python file-like objects have an `isatty` member,
but some user-defined types may not. In such cases, this function
assumes those are not ttys.
"""
return file.isatty() if hasattr(file, "isatty") else False
def _color_text(text, color):
"""
Returns a string wrapped in ANSI color codes for coloring the text in a terminal.
#### Parameters
**text** (`str`)
: The string to colorize.
**color** (`str`)
: An ANSI terminal color name. Must be one of the following:
'black', 'red', 'green', 'brown', 'blue', 'magenta', 'cyan', 'lightgrey',
'default', 'darkgrey', 'lightred', 'lightgreen', 'yellow', 'lightblue',
'lightmagenta', 'lightcyan', 'white', or '' (the empty string).
#### Returns
**colored_text** (`str`)
: The input string, bounded by the appropriate ANSI color codes.
#### Notes
This function wraps the input text with ANSI color codes based on the given color.
It won't actually affect the text until it is printed to the terminal.
"""
color_mapping = {
"black": "0;30",
"red": "0;31",
"green": "0;32",
"brown": "0;33",
"blue": "0;34",
"magenta": "0;35",
"cyan": "0;36",
"lightgrey": "0;37",
"default": "0;39",
"darkgrey": "1;30",
"lightred": "1;31",
"lightgreen": "1;32",
"yellow": "1;33",
"lightblue": "1;34",
"lightmagenta": "1;35",
"lightcyan": "1;36",
"white": "1;37",
}
color_code = color_mapping.get(color, "0;39")
return f"\033[{color_code}m{text}\033[0m"
# A dictionary of Unicode characters that have reasonable representations in ASCII.
# This dictionary contains Unicode characters as keys and their corresponding ASCII
# representations as values. This allows for convenient replacement of these specific
# Unicode characters with ASCII ones to prevent them from being replaced by '?'.
#
# The mapping currently includes:
# - 'μ' maps to 'u'
# - '·' maps to '-'
# - '±' maps to '~'
#
# You can find additional characters that might need an entry using:
# `grep -P -n '[^\x00-\x7F]' -r *`
# in the `asv` source directory.
_unicode_translations = {ord("μ"): "u", ord("·"): "-", ord("±"): "~"}
def _write_with_fallback(s, fileobj):
"""
Writes the supplied string to the given file-like object, handling potential
UnicodeEncodeErrors by falling back to the locale's preferred encoding.
#### Parameters
`s` (`str`):
The Unicode string to be written to the file-like object. Raises a `ValueError`
if `s` is not a Unicode string.
`fileobj` (file-like object):
The file-like object to which the string `s` is to be written. On Python 3,
this must be a text stream. On Python 2, this must be a `file` byte stream.
#### Notes
This function first tries to write the input string `s` to the file object
`fileobj`. If a `UnicodeError` occurs during this process (indicating that the
string contains characters not representable in the file's encoding), the function
falls back to encoding the string in the locale's preferred encoding before writing.
If the string `s` still cannot be encoded in the locale's preferred encoding, the
function translates the string to replace problematic Unicode characters with
ASCII ones using the `_unicode_translations` dictionary, and then encodes and
writes the resulting string to `fileobj` using the "replace" error handling scheme
(which replaces any non-encodable characters with a suitable replacement marker).
After the write operation, the function flushes the file object's output buffer to
ensure that the written data is actually saved to the file.
"""
if not isinstance(s, str):
raise ValueError("Input string is not a Unicode string")
with contextlib.suppress(UnicodeError):
fileobj.write(s)
return
# Fall back to writing bytes
enc = locale.getpreferredencoding()
try:
b = s.encode(enc)
except UnicodeError:
s = s.translate(_unicode_translations)
b = s.encode(enc, errors="replace")
fileobj.flush()
fileobj.buffer.write(b)
def color_print(*args, **kwargs):
"""
Prints colored and styled text to the terminal using ANSI escape sequences.
#### Parameters
*args (`tuple` of `str`):
The positional arguments should come in pairs (`msg`, `color`), where `msg`
is the string to display and `color` is the color to display it in. `color`
is an ANSI terminal color name. Must be one of: black, red, green, brown,
blue, magenta, cyan, lightgrey, default, darkgrey, lightred, lightgreen,
yellow, lightblue, lightmagenta, lightcyan, white, or '' (the empty string).
`file` (writable file-like object, optional):
Where to write to. Defaults to `sys.stdout`. If `file` is not a tty (as determined
by calling its `isatty` member, if one exists), no coloring will be included. It's
passed as a keyword argument.
`end` (`str`, optional):
The ending of the message. Defaults to "\n". The `end` will be printed after
resetting any color or font state. It's passed as a keyword argument.
#### Notes
This function allows you to print text in various colors to the console, which can
be helpful for distinguishing different kinds of output or for drawing attention to
particular messages.
It works by applying ANSI escape sequences to the input strings according to the
specified colors. These escape sequences are interpreted by the terminal emulator
to apply the specified colors and styles.
#### Example
```{code-block} python
color_print('This is the color ', 'default', 'GREEN', 'green')
```
"""
file = kwargs.get("file", sys.stdout)
end = kwargs.get("end", "\n")
if isatty(file) and not WIN:
for i in range(0, len(args), 2):
msg = args[i]
color = "" if i + 1 == len(args) else args[i + 1]
if color:
msg = _color_text(msg, color)
_write_with_fallback(msg, file)
else:
for i in range(0, len(args), 2):
msg = args[i]
_write_with_fallback(msg, file)
_write_with_fallback(end, file)
def get_answer_default(prompt, default, use_defaults=False):
"""
Prompts the user for input and returns the entered value or a default.
#### Parameters
`prompt` (`str`):
The string that is presented to the user.
`default` (any):
The value returned if the user doesn't enter anything and just hits Enter. This
value is also shown in the prompt to indicate to the user what the default is.
`use_defaults` (`bool`, optional):
If True, the function will immediately return the default value without prompting
the user for input. Defaults to False.
#### Returns
The user's input, or the provided default value if the user didn't enter anything.
#### Notes
This function enhances the built-in `input` function by allowing a default value
to be specified, which is returned if the user doesn't enter anything.
"""
color_print(f"{prompt} [{default}]: ", end="")
if use_defaults:
return default
x = input()
return default if x.strip() == "" else x
def truncate_left(s, l):
return f"...{s[-(l - 3):]}" if len(s) > l else s
class Log:
def __init__(self):
self._indent = 1
self._total = 0
self._count = 0
self._logger = logging.getLogger()
self._needs_newline = False
self._last_dot = time.time()
self._colorama = False
if sys.platform in {"win32", "cli"}:
try:
import colorama
colorama.init()
self._colorama = True
except Exception as exc:
print(f"On Windows or cli, colorama is suggested, but got {exc}")
def _stream_formatter(self, record):
"""
The formatter for standard output
"""
if self._needs_newline:
color_print("")
parts = record.msg.split("\n", 1)
first_line = parts[0]
rest = None if len(parts) == 1 else parts[1]
indent = self._indent + 1
continued = getattr(record, "continued", False)
if self._total:
progress_msg = f"[{self._count / self._total:6.02%}] "
if not continued:
color_print(progress_msg, end="")
indent += len(progress_msg)
if not continued:
color_print("·" * self._indent, end="")
color_print(" ", end="")
else:
color_print(" " * indent, end="")
if hasattr(record, "color"):
color = record.color
elif record.levelno < logging.DEBUG:
color = "default"
elif record.levelno < logging.INFO:
color = "default"
elif record.levelno < logging.WARN:
if self._indent == 1:
color = "green"
elif self._indent == 2:
color = "blue"
else:
color = "default"
elif record.levelno < logging.ERROR:
color = "brown"
else:
color = "red"
color_print(first_line, color, end="")
if rest is not None:
color_print("")
detail = textwrap.dedent(rest)
spaces = " " * indent
for line in detail.split("\n"):
color_print(spaces, end="")
color_print(line)
self._needs_newline = True
sys.stdout.flush()
@contextlib.contextmanager
def indent(self):
"""
A context manager to increase the indentation level.
"""
self._indent += 1
yield
self._indent -= 1
def dot(self):
if isatty(sys.stdout):
if time.time() > self._last_dot + 1.0:
color_print(".", "darkgrey", end="")
sys.stdout.flush()
self._last_dot = time.time()
def set_nitems(self, n):
"""
Set the number of remaining items to process. Each of these
steps should be incremented through using `step`.
Can be called multiple times. The progress percentage is ensured
to be non-decreasing, except if 100% was already reached in which
case it is restarted from 0%.
"""
try:
# Ensure count/total is nondecreasing
self._total = util.ceildiv(n * self._total, self._total - self._count)
self._count = self._total - n
except ZeroDivisionError:
# Reset counting from start
self._total = n
self._count = 0
def step(self):
"""
Write that a step has been completed. A percentage is
displayed along with it.
If we are stepping beyond the number of items, stop counting.
"""
self._count = min(self._total, self._count + 1)
def enable(self, verbose=False):
sh = logging.StreamHandler()
sh.emit = self._stream_formatter
self._logger.addHandler(sh)
if verbose:
self._logger.setLevel(logging.DEBUG)
else:
self._logger.setLevel(logging.INFO)
@contextlib.contextmanager
def set_level(self, level):
orig_level = self._logger.level
if not self.is_debug_enabled():
self._logger.setLevel(level)
try:
yield
finally:
self._logger.setLevel(orig_level)
def is_debug_enabled(self):
return self._logger.getEffectiveLevel() <= logging.DEBUG
def _message(
self, routine, message, reserve_space=False, color=None, continued=False
):
kwargs = {}
extra = {}
if color is not None:
extra["color"] = color
if continued:
extra["continued"] = True
if extra:
kwargs["extra"] = extra
if reserve_space:
max_width = max(16, util.terminal_width - 33)
message = truncate_left(message, max_width)
self._prev_message = message
routine(message, **kwargs)
def info(self, *args, **kwargs):
self._message(self._logger.info, *args, **kwargs)
def warning(self, *args, **kwargs):
self._message(self._logger.warning, *args, **kwargs)
def debug(self, *args, **kwargs):
self._message(self._logger.debug, *args, **kwargs)
def error(self, *args, **kwargs):
self._message(self._logger.error, *args, **kwargs)
def add(self, msg):
if self._needs_newline:
_write_with_fallback(msg, sys.stdout)
sys.stdout.flush()
else:
self.info(msg)
def add_padded(self, msg):
"""
Final part of two-part info message.
Should be preceded by a call to info/warn/...(msg, reserve_space=True)
"""
if self._prev_message is None:
# No previous part: print as an info message
self.info(msg)
return
padding_length = (
util.terminal_width - len(self._prev_message) - 14 - 1 - len(msg)
)
if WIN:
padding_length -= 1
padding = " " * padding_length
self._prev_message = None
self.add(f" {padding}{msg}")
def flush(self):
"""
Flush any trailing newlines. Needs to be called before printing
to stdout via other means, after using Log.
"""
if self._needs_newline:
color_print("")
self._needs_newline = False
sys.stdout.flush()
asv_runner-0.2.1/asv_runner/discovery.py 0000664 0000000 0000000 00000025420 14562240250 0020442 0 ustar 00root root 0000000 0000000 import importlib
import inspect
import json
import os
import pkgutil
import traceback
from ._aux import update_sys_path
from .benchmarks import benchmark_types
def _get_benchmark(attr_name, module, klass, func):
"""
Retrieves benchmark function based on attribute name, module, class, and
function.
#### Parameters
**attr_name** (`str`)
: The attribute name of the function.
**module** (module)
: The module where the function resides.
**klass** (class or None)
: The class defining the function, or None if not applicable.
**func** (function)
: The function to be benchmarked.
#### Returns
**benchmark** (Benchmark instance or None)
: A benchmark instance with the name of the benchmark, the function to be
benchmarked, and its sources. Returns None if no matching benchmark is found
or the function is marked to be skipped.
#### Notes
The function tries to get the `benchmark_name` from `func`. If it fails, it
uses `attr_name` to match with the name regex in the benchmark types. If a
match is found, it creates a new benchmark instance and returns it. If no
match is found or the function is marked to be skipped, it returns None.
"""
# Check if the function has been marked to be skipped
if getattr(func, "skip_benchmark", False):
return
try:
name = func.benchmark_name
except AttributeError:
name = None
search = attr_name
else:
search = name.split(".")[-1]
for cls in benchmark_types:
if cls.name_regex.match(search):
break
else:
return
# relative to benchmark_dir
mname_parts = module.__name__.split(".", 1)[1:]
if klass is None:
if name is None:
name = ".".join(mname_parts + [func.__name__])
sources = [func, module]
else:
instance = klass()
func = getattr(instance, attr_name)
if name is None:
name = ".".join(mname_parts + [klass.__name__, attr_name])
sources = [func, instance, module]
return cls(name, func, sources)
def disc_modules(module_name, ignore_import_errors=False):
"""
Recursively imports a module and all sub-modules in the package.
#### Parameters
**module_name** (`str`)
: The name of the module to import.
**ignore_import_errors** (`bool`, optional)
: Whether to ignore import errors. Default is False.
#### Yields
**module** (module)
: The imported module in the package tree.
#### Notes
This function imports the given module and yields it. If `ignore_import_errors`
is set to True, the function will continue executing even if the import fails
and will print the traceback. If `ignore_import_errors` is set to False and
the import fails, the function will raise the error. After yielding the
imported module, the function looks for sub-modules within the package of
the imported module and recursively imports and yields them.
"""
if not ignore_import_errors:
module = importlib.import_module(module_name)
else:
try:
module = importlib.import_module(module_name)
except BaseException:
traceback.print_exc()
return
yield module
if getattr(module, "__path__", None):
for _, name, _ in pkgutil.iter_modules(module.__path__, f"{module_name}."):
yield from disc_modules(name, ignore_import_errors)
def disc_benchmarks(root, ignore_import_errors=False):
"""
Discovers all benchmarks in a given directory tree, yielding Benchmark
objects.
#### Parameters
**root** (`str`)
: The root of the directory tree where the function begins to search for
benchmarks.
**ignore_import_errors** (`bool`, optional)
: Specifies if import errors should be ignored. Default is False.
#### Yields
**benchmark** (Benchmark instance or None)
: A benchmark instance containing the benchmark's name, the function to
be benchmarked, and its sources if a matching benchmark is found.
#### Notes
For each class definition, the function searches for methods with a
specific name. For each free function, it yields all functions with a
specific name. The function initially imports all modules and submodules
in the directory tree using the `disc_modules` function. Then, for each
imported module, it searches for classes and functions that might be
benchmarks. If it finds a class, it looks for methods within that class
that could be benchmarks. If it finds a free function, it considers it as
a potential benchmark. A potential benchmark is confirmed by the
`_get_benchmark` function. If this function returns a benchmark instance,
the instance is yielded.
"""
root_name = os.path.basename(root)
for module in disc_modules(root_name, ignore_import_errors=ignore_import_errors):
for attr_name, module_attr in (
(k, v) for k, v in module.__dict__.items() if not k.startswith("_")
):
if inspect.isclass(module_attr) and not inspect.isabstract(module_attr):
for name, class_attr in inspect.getmembers(module_attr):
if inspect.isfunction(class_attr) or inspect.ismethod(class_attr):
benchmark = _get_benchmark(
name, module, module_attr, class_attr
)
if benchmark is not None:
yield benchmark
elif inspect.isfunction(module_attr):
benchmark = _get_benchmark(attr_name, module, None, module_attr)
if benchmark is not None:
yield benchmark
def get_benchmark_from_name(root, name, extra_params=None):
"""
Creates a benchmark from a fully-qualified benchmark name.
#### Parameters
**root** (`str`)
: Path to the root of a benchmark suite.
**name** (`str`)
: Fully-qualified name of a specific benchmark.
**extra_params** (`dict`, optional)
: Extra parameters to be added to the benchmark.
#### Returns
**benchmark** (Benchmark instance)
: A benchmark instance created from the given fully-qualified benchmark name.
#### Raises
**ValueError**
: If the provided benchmark ID is invalid or if the benchmark could not be found.
#### Notes
This function aims to create a benchmark from the given fully-qualified
name. It splits the name using the "-" character. If "-" is present in the
name, the string after the "-" is converted to an integer and is considered as
the parameter index. If "-" is not present, the parameter index is set to
None. The function then tries to directly import the benchmark function by
guessing its import module name. If the benchmark is not found this way, the
function searches for the benchmark in the directory tree root using
`disc_benchmarks`. If the benchmark is still not found, it raises a
ValueError. If extra parameters are provided, they are added to the
benchmark.
"""
if "-" in name:
try:
name, param_idx = name.split("-", 1)
param_idx = int(param_idx)
except ValueError:
raise ValueError(f"Benchmark id {name!r} is invalid")
else:
param_idx = None
update_sys_path(root)
benchmark = None
# try to directly import benchmark function by guessing its import module name
parts = name.split(".")
for i in [1, 2]:
path = f"{os.path.join(root, *parts[:-i])}.py"
if not os.path.isfile(path):
continue
modname = ".".join([os.path.basename(root)] + parts[:-i])
module = importlib.import_module(modname)
try:
module_attr = getattr(module, parts[-i])
except AttributeError:
break
if i == 1 and inspect.isfunction(module_attr):
benchmark = _get_benchmark(parts[-i], module, None, module_attr)
break
elif i == 2 and inspect.isclass(module_attr):
try:
class_attr = getattr(module_attr, parts[-1])
except AttributeError:
break
if inspect.isfunction(class_attr) or inspect.ismethod(class_attr):
benchmark = _get_benchmark(parts[-1], module, module_attr, class_attr)
break
if benchmark is None:
for benchmark in disc_benchmarks(root):
if benchmark.name == name:
break
else:
raise ValueError(f"Could not find benchmark '{name}'")
if param_idx is not None:
benchmark.set_param_idx(param_idx)
if extra_params:
class ExtraBenchmarkAttrs:
pass
for key, value in extra_params.items():
setattr(ExtraBenchmarkAttrs, key, value)
benchmark._attr_sources.insert(0, ExtraBenchmarkAttrs)
return benchmark
def list_benchmarks(root, fp):
"""
Lists all discovered benchmarks to a file pointer as JSON.
#### Parameters
**root** (`str`)
: Path to the root of a benchmark suite.
**fp** (file object)
: File pointer where the JSON list of benchmarks should be written.
#### Notes
The function updates the system path with the root directory of the
benchmark suite. Then, it iterates over all benchmarks discovered in the
root directory. For each benchmark, it creates a dictionary containing all
attributes of the benchmark that are of types `str`, `int`, `float`, `list`,
`dict`, `bool` and don't start with an underscore `_`. These attribute
dictionaries are then dumped as JSON into the file pointed by `fp`.
"""
update_sys_path(root)
# Streaming of JSON back out to the master process
fp.write("[")
first = True
for benchmark in disc_benchmarks(root):
if not first:
fp.write(", ")
clean = {
k: v
for (k, v) in benchmark.__dict__.items()
if isinstance(v, (str, int, float, list, dict, bool))
and not k.startswith("_")
}
json.dump(clean, fp, skipkeys=True)
first = False
fp.write("]")
def _discover(args):
"""
Discovers all benchmarks in the provided benchmark directory and lists them
to a file.
#### Parameters
**args** (`tuple`)
: A tuple containing benchmark directory and result file path.
#### Notes
The function takes a tuple as an argument. The first element of the tuple
should be the path to the benchmark directory, and the second element should
be the path to the result file. It opens the result file for writing and
calls the `list_benchmarks` function with the benchmark directory and the
file pointer of the result file.
"""
benchmark_dir, result_file = args
with open(result_file, "w") as fp:
list_benchmarks(benchmark_dir, fp)
asv_runner-0.2.1/asv_runner/run.py 0000664 0000000 0000000 00000005132 14562240250 0017235 0 ustar 00root root 0000000 0000000 import json
import math
import pickle
from ._aux import set_cpu_affinity_from_params
from .benchmarks.mark import SkipNotImplemented
from .discovery import get_benchmark_from_name
def _run(args):
"""
Runs a specified benchmark and writes the result to a file.
#### Parameters
**args** (`tuple`)
: A tuple containing benchmark directory, benchmark id,
parameters string, profile path, and result file path.
#### Notes
This function first loads the extra parameters and sets the
CPU affinity based on them. It then creates a benchmark
from the `benchmark_id`. If the benchmark has a setup
cache key, it loads the cache from a file and inserts it
into the benchmark parameters.
Then, the function runs the setup for the benchmark. If
the setup indicates that the benchmark should be skipped,
it sets the result as `math.nan`. Otherwise, it runs the
benchmark and profiles it if a `profile_path` is provided.
After running the benchmark, it performs the teardown for
the benchmark and writes the result to the `result_file`.
The `args` tuple contains:
- **benchmark_dir** (`str`)
: The directory where the benchmarks are located.
- **benchmark_id** (`str`)
: The id of the benchmark to run.
- **params_str** (`str`)
: A string containing JSON-encoded extra parameters.
- **profile_path** (`str`)
: The path for profile data. "None" implies no profiling.
- **result_file** (`str`)
: The path to the file where the result should be written.
"""
(benchmark_dir, benchmark_id, params_str, profile_path, result_file) = args
extra_params = json.loads(params_str)
set_cpu_affinity_from_params(extra_params)
extra_params.pop("cpu_affinity", None)
if profile_path == "None":
profile_path = None
benchmark = get_benchmark_from_name(
benchmark_dir, benchmark_id, extra_params=extra_params
)
if benchmark.setup_cache_key is not None:
with open("cache.pickle", "rb") as fd:
cache = pickle.load(fd)
if cache is not None:
benchmark.insert_param(cache)
skip = benchmark.do_setup()
try:
if skip:
result = math.nan
else:
try:
result = benchmark.do_run()
if profile_path is not None:
benchmark.do_profile(profile_path)
except SkipNotImplemented:
# Still runs setup() though
result = math.nan
finally:
benchmark.do_teardown()
with open(result_file, "w") as fp:
json.dump(result, fp)
asv_runner-0.2.1/asv_runner/server.py 0000664 0000000 0000000 00000017336 14562240250 0017750 0 ustar 00root root 0000000 0000000 import json
import os
import struct
import sys
import tempfile
import time
import timeit
from ._aux import posix_redirect_output, update_sys_path
from .discovery import disc_benchmarks
from .run import _run
wall_timer = timeit.default_timer
def recvall(sock, size):
"""
Receives data from a socket until the specified size of data has been received.
#### Parameters
**sock** (`socket`)
: The socket from which the data will be received. This socket should
already be connected to the other end from which data is to be received.
**size** (`int`)
: The total size of data to be received from the socket.
#### Returns
**data** (`bytes`)
: The data received from the socket. The length of this data will be equal
to the size specified.
#### Raises
**RuntimeError**
: If the socket closed before the specified size of data could be received.
#### Notes
This function continuously receives data from the provided socket in a loop
until the total length of the received data is equal to the specified size.
If the socket closes before the specified size of data could be received, a
`RuntimeError` is raised. The function returns the received data as a byte
string.
"""
data = b""
while len(data) < size:
s = sock.recv(size - len(data))
data += s
if not s:
raise RuntimeError(
"did not receive data from socket " f"(size {size}, got only {data !r})"
)
return data
def _run_server(args):
"""
Runs a server that executes benchmarks based on the received commands.
#### Parameters
**args** (`tuple`)
: A tuple containing the benchmark directory and socket name.
- `benchmark_dir` (`str`): The directory where the benchmarks are located.
- `socket_name` (`str`): The name of the UNIX socket to be used for
- communication.
#### Raises
**RuntimeError**
: If the received command contains unknown data.
#### Notes
This function creates a server that listens on a UNIX socket for commands.
It can perform two actions based on the received command: quit or preimport
benchmarks.
If the command is "quit", the server stops running. If the command is
"preimport", the function imports all the benchmarks in the specified
directory, capturing all the I/O to a file during import. After the
benchmarks are imported, the function sends the contents of the output file
back through the socket.
If the action is not "quit" or "preimport", the function assumes it is a
command to run a specific benchmark. It then runs the benchmark and waits
for the results. It also handles a timeout for the benchmark execution and
sends the results back through the socket.
The function continuously accepts new commands until it receives a "quit"
command or a KeyboardInterrupt.
It uses UNIX domain sockets for inter-process communication. The name of the
socket is passed as a parameter in `args`. The socket is created, bound to
the socket name, and set to listen for connections. When a connection is
accepted, the command is read from the socket, parsed, and executed
accordingly. After executing the command, the server sends back the result
through the socket and waits for the next command.
"""
import signal
import socket
(
benchmark_dir,
socket_name,
) = args
update_sys_path(benchmark_dir)
# Socket I/O
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.bind(socket_name)
s.listen(1)
# Read and act on commands from socket
while True:
stdout_file = None
try:
conn, addr = s.accept()
except KeyboardInterrupt:
break
try:
fd, stdout_file = tempfile.mkstemp()
os.close(fd)
# Read command
(read_size,) = struct.unpack(" start_time + timeout:
# Timeout
if is_timeout:
os.kill(pid, signal.SIGKILL)
else:
os.kill(pid, signal.SIGTERM)
is_timeout = True
time2sleep *= 1e1
time.sleep(min(time2sleep, 0.001))
# Report result
with open(stdout_file, errors="replace") as f:
out = f.read()
# Emulate subprocess
if os.WIFSIGNALED(status):
retcode = -os.WTERMSIG(status)
elif os.WIFEXITED(status):
retcode = os.WEXITSTATUS(status)
elif os.WIFSTOPPED(status):
retcode = -os.WSTOPSIG(status)
else:
# shouldn't happen, but fail silently
retcode = -128
info = {"out": out, "errcode": -256 if is_timeout else retcode}
result_text = json.dumps(info)
result_text = result_text.encode("utf-8")
conn.sendall(struct.pack("