pax_global_header00006660000000000000000000000064145622402500014512gustar00rootroot0000000000000052 comment=9b7a26f8d24d91a0cf040b367d1a37f680f01bbd asv_runner-0.2.1/000077500000000000000000000000001456224025000136745ustar00rootroot00000000000000asv_runner-0.2.1/.github/000077500000000000000000000000001456224025000152345ustar00rootroot00000000000000asv_runner-0.2.1/.github/workflows/000077500000000000000000000000001456224025000172715ustar00rootroot00000000000000asv_runner-0.2.1/.github/workflows/build_wheels.yml000066400000000000000000000032541456224025000224660ustar00rootroot00000000000000# Build on every branch push, tag push, and pull request change: # From: https://github.com/pypa/cibuildwheel/blob/main/examples/github-deploy.yml name: Build wheels on: [push, pull_request] jobs: build_wheels: name: Build wheel for ${{ matrix.python }} runs-on: ubuntu-latest strategy: fail-fast: false steps: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 with: python-version: '3.8' - run: pip wheel -w ./wheelhouse/ . - uses: actions/upload-artifact@v3 with: path: ./wheelhouse/*.whl build_sdist: name: Build source distribution runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Build sdist shell: bash -l {0} run: pipx run build --sdist - uses: actions/upload-artifact@v3 with: path: dist/*.tar.gz upload_pypi: needs: [build_wheels, build_sdist] runs-on: ubuntu-latest environment: name: pypi url: https://pypi.org/p/asv_runner permissions: id-token: write # for trusted publishing # upload to PyPI on every tag starting with 'v' if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') # alternatively, to publish when a GitHub Release is created, use the following rule: # if: github.event_name == 'release' && github.event.action == 'published' steps: - uses: actions/download-artifact@v3 with: # unpacks default artifact into dist/ # if `name: artifact` is omitted, the action will create extra parent dir name: artifact path: dist - uses: pypa/gh-action-pypi-publish@release/v1 asv_runner-0.2.1/.github/workflows/pre_commit.yml000066400000000000000000000004211456224025000221470ustar00rootroot00000000000000name: pre-commit on: pull_request: push: branches: [main] jobs: pre-commit: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - uses: actions/setup-python@v3 with: python-version: '3.9' - uses: pre-commit/action@v2.0.3 asv_runner-0.2.1/.github/workflows/slashdot_trigger.yml000066400000000000000000000006301456224025000233570ustar00rootroot00000000000000name: Slash Command Dispatch on: issue_comment: types: [created] jobs: slashCommandDispatch: runs-on: ubuntu-latest steps: - name: Slash Command Dispatch uses: peter-evans/slash-command-dispatch@v3 with: token: ${{ secrets.ASV_TOK }} commands: | trigger-asv static-args: | pr_number=${{ github.event.issue.number }} asv_runner-0.2.1/.github/workflows/trigger_asv.yml000066400000000000000000000015411456224025000223310ustar00rootroot00000000000000name: Trigger asv on: repository_dispatch: types: [trigger-asv-command] concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: triggerasv: runs-on: ${{ matrix.config.os }} name: ${{ matrix.config.os }} strategy: fail-fast: false matrix: config: - {os: ubuntu-latest} steps: - uses: actions/checkout@v3 with: submodules: "recursive" fetch-depth: 0 - uses: convictional/trigger-workflow-and-wait@v1.6.5 with: owner: airspeed-velocity repo: asv github_token: ${{ secrets.ASV_TOK }} workflow_file_name: triggered.yml ref: master wait_workflow: true client_payload: '{"pr_number": "${{ github.event.client_payload.slash_command.args.named.pr_number }}"}' asv_runner-0.2.1/.gitignore000066400000000000000000000062171456224025000156720ustar00rootroot00000000000000# Additionally docs/source/apidocs/* docs/build/* # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ share/python-wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .nox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover *.py,cover .hypothesis/ .pytest_cache/ cover/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 db.sqlite3-journal # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder .pybuilder/ target/ # Jupyter Notebook .ipynb_checkpoints # IPython profile_default/ ipython_config.py # pyenv # For a library or package, you might want to ignore these files since the code is # intended to run in multiple environments; otherwise, check them in: # .python-version # pipenv # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # However, in case of collaboration, if having platform-specific dependencies or dependencies # having no cross-platform support, pipenv may install dependencies that don't work, or not # install all needed dependencies. #Pipfile.lock # poetry # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. # This is especially recommended for binary packages to ensure reproducibility, and is more # commonly ignored for libraries. # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control #poetry.lock # pdm # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. #pdm.lock # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it # in version control. # https://pdm.fming.dev/#use-with-ide .pdm.toml # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm __pypackages__/ # Celery stuff celerybeat-schedule celerybeat.pid # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ .dmypy.json dmypy.json # Pyre type checker .pyre/ # pytype static type analyzer .pytype/ # Cython debug symbols cython_debug/ # PyCharm # JetBrains specific template is maintained in a separate JetBrains.gitignore that can # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ .pdm-python /docs/source/CHANGELOG.md /docs/html/ /asv_runner/_version.py /.pdm-build/ asv_runner-0.2.1/.pre-commit-config.yaml000066400000000000000000000012031456224025000201510ustar00rootroot00000000000000repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 hooks: - id: trailing-whitespace exclude: ^(test/example_results) - id: end-of-file-fixer exclude: ^(test/example_results/cheetah) - id: check-yaml - id: check-added-large-files - repo: https://github.com/pycqa/isort rev: 5.12.0 hooks: - id: isort name: isort (python) - repo: https://github.com/psf/black rev: 23.3.0 hooks: - id: black - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.0.272 hooks: - id: ruff args: ["--fix", "--show-source"] asv_runner-0.2.1/.readthedocs.yaml000066400000000000000000000004161456224025000171240ustar00rootroot00000000000000version: 2 build: os: ubuntu-22.04 tools: python: "3.10" sphinx: configuration: docs/source/conf.py formats: - pdf - epub python: install: - requirements: docs/requirements.txt - method: pip path: . extra_requirements: - docs asv_runner-0.2.1/CHANGELOG.md000066400000000000000000000135101456224025000155050ustar00rootroot00000000000000# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). This project uses [*towncrier*](https://towncrier.readthedocs.io/) and the changes for the upcoming release can be found in . ## [0.2.1](https://github.com/airspeed-velocity/asv_runner/tree/0.2.1) - 11-02-2024 No significant changes. ## [0.2.0](https://github.com/airspeed-velocity/asv_runner/tree/0.2.0) - 11-02-2024 ### Other Changes and Additions - `asv_runner` now uses `towncrier` to manage the changelog, also adds the changeglog to the generated documentation. ([#38](https://github.com/airspeed-velocity/asv_runner/issues/38)) - The lowest supported version of `python` for building the `asv_runner` documentation is now `3.8`, since `3.7` has been EOL for [many months now](https://endoflife.date/python). ([#39](https://github.com/airspeed-velocity/asv_runner/issues/39)) ## [0.1.0](https://github.com/airspeed-velocity/asv_runner/tree/0.1.0) - 11-09-2023 ### Bug Fixes - Default `max_time` is set to `60.0` seconds to fix `--quick`. ([#29](https://github.com/airspeed-velocity/asv_runner/issues/29)) - `asv` will not try to access a missing `colorama` attribute. ([#32](https://github.com/airspeed-velocity/asv_runner/issues/32)) ### Other Changes and Additions - `pip-tools` and `pip-compile` are used to pin transitive dependencies for read the docs. ([#31](https://github.com/airspeed-velocity/asv_runner/issues/31)) ## [0.0.9](https://github.com/airspeed-velocity/asv_runner/tree/0.0.9) - 20-08-2023 ### New Features - Adds a `skip_benchmark` decorator. ```python from asv_runner.benchmarks.helpers import skip_benchmark @skip_benchmark class TimeSuite: """ An example benchmark that times the performance of various kinds of iterating over dictionaries in Python. """ def setup(self): self.d = {} for x in range(500): self.d[x] = None def time_keys(self): for key in self.d.keys(): pass def time_values(self): for value in self.d.values(): pass def time_range(self): d = self.d for key in range(500): d[key] ``` Usage requires `asv 0.6.0`. ([#13](https://github.com/airspeed-velocity/asv_runner/issues/13)) - Finely grained `skip_benchmark_if` and `skip_params_if` have been added. ```python from asv_runner.benchmarks.mark import skip_benchmark_if, skip_params_if import datetime class TimeSuite: """ An example benchmark that times the performance of various kinds of iterating over dictionaries in Python. """ params = [100, 200, 300, 400, 500] param_names = ["size"] def setup(self, size): self.d = {} for x in range(size): self.d[x] = None @skip_benchmark_if(datetime.datetime.now().hour >= 12) def time_keys(self, size): for key in self.d.keys(): pass @skip_benchmark_if(datetime.datetime.now().hour >= 12) def time_values(self, size): for value in self.d.values(): pass @skip_benchmark_if(datetime.datetime.now().hour >= 12) def time_range(self, size): d = self.d for key in range(size): d[key] # Skip benchmarking when size is either 100 or 200 and the current hour is 12 or later. @skip_params_if([(100,), (200,)], datetime.datetime.now().hour >= 12) def time_dict_update(self, size): d = self.d for i in range(size): d[i] = i ``` Usage requires `asv 0.6.0`. ([#17](https://github.com/airspeed-velocity/asv_runner/issues/17)) - Benchmarks can now be parameterized using decorators. ```python import numpy as np from asv_runner.benchmarks.mark import parameterize @parameterize({"n":[10, 100]}) def time_sort(n): np.sort(np.random.rand(n)) @parameterize({'n': [10, 100], 'func_name': ['range', 'arange']}) def time_ranges_multi(n, func_name): f = {'range': range, 'arange': np.arange}[func_name] for i in f(n): pass @parameterize({"size": [10, 100, 200]}) class TimeSuiteDecoratorSingle: def setup(self, size): self.d = {} for x in range(size): self.d[x] = None def time_keys(self, size): for key in self.d.keys(): pass def time_values(self, size): for value in self.d.values(): pass @parameterize({'n': [10, 100], 'func_name': ['range', 'arange']}) class TimeSuiteMultiDecorator: def time_ranges(self, n, func_name): f = {'range': range, 'arange': np.arange}[func_name] for i in f(n): pass ``` Usage requires `asv 0.6.0`. ([#18](https://github.com/airspeed-velocity/asv_runner/issues/18)) - Benchmarks can now be skipped during execution. ```python from asv_runner.benchmarks.mark import skip_for_params, parameterize, SkipNotImplemented # Fast because no setup is called class SimpleFast: params = ([False, True]) param_names = ["ok"] @skip_for_params([(False, )]) def time_failure(self, ok): if ok: x = 34.2**4.2 @parameterize({"ok": [False, True]}) class SimpleSlow: def time_failure(self, ok): if ok: x = 34.2**4.2 else: raise SkipNotImplemented(f"{ok} is skipped") ``` Usage requires `asv 0.6.0`. ([#20](https://github.com/airspeed-velocity/asv_runner/issues/20)) ### Bug Fixes - It is possible to set a default timeout from `asv`. ([#19](https://github.com/airspeed-velocity/asv_runner/issues/19)) ### Other Changes and Additions - Documentation, both long-form and API level has been added. ([#6](https://github.com/airspeed-velocity/asv_runner/issues/6)) asv_runner-0.2.1/CODEOWNERS000066400000000000000000000010431456224025000152650ustar00rootroot00000000000000# This is a comment. # Each line is a file pattern followed by one or more owners. # These owners will be the default owners for everything in # the repo. Unless a later match takes precedence, # @global-owner1 and @global-owner2 will be requested for # review when someone opens a pull request. * @HaoZeke # Order is important; the last matching pattern takes the most # precedence. When someone opens a pull request that only # modifies JS files, for example, only @js-owner and not the global # owner(s) will be requested for a review. asv_runner-0.2.1/CODE_OF_CONDUCT.md000066400000000000000000000125601456224025000164770ustar00rootroot00000000000000# Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at [INSERT CONTACT METHOD]. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.1, available at [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder][Mozilla CoC]. For answers to common questions about this code of conduct, see the FAQ at [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at [https://www.contributor-covenant.org/translations][translations]. [homepage]: https://www.contributor-covenant.org [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html [Mozilla CoC]: https://github.com/mozilla/diversity [FAQ]: https://www.contributor-covenant.org/faq [translations]: https://www.contributor-covenant.org/translations asv_runner-0.2.1/LICENSE.md000066400000000000000000000030641456224025000153030ustar00rootroot00000000000000Copyright (c) 2018-2023, asv Developers. Copyright (c) 2011-2018, Michael Droettboom, Space Telescope Science Institute, Pauli Virtanen All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Astropy Team nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. asv_runner-0.2.1/README.md000066400000000000000000000020571456224025000151570ustar00rootroot00000000000000# About [![Documentation](https://img.shields.io/badge/Documentation-latest-brightgreen?style=for-the-badge)](https://asv.readthedocs.io/projects/asv-runner/en/latest/) Core Python benchmark code for `asv`. **This package shall not have any dependencies on external packages and must be compatible with all Python versions greater than or equal to `3.7`.** For other functionality, refer to the `asv` package or consider writing an extension. # Contributions All contributions are welcome, this includes code and documentation contributions but also questions or other clarifications. Note that we expect all contributors to follow our [Code of Conduct](https://github.com/airspeed-velocity/asv_runner/blob/main/CODE_OF_CONDUCT.md). ## Developing locally A `pre-commit` job is setup on CI to enforce consistent styles, so it is best to set it up locally as well (using [pipx](https://pypa.github.io/pipx/) for isolation): ```sh # Run before commiting pipx run pre-commit run --all-files # Or install the git hook to enforce this pipx run pre-commit install ``` asv_runner-0.2.1/asv_runner/000077500000000000000000000000001456224025000160565ustar00rootroot00000000000000asv_runner-0.2.1/asv_runner/__init__.py000066400000000000000000000000001456224025000201550ustar00rootroot00000000000000asv_runner-0.2.1/asv_runner/_aux.py000066400000000000000000000150441456224025000173700ustar00rootroot00000000000000import contextlib import importlib import os import sys import tempfile from .benchmarks._maxrss import set_cpu_affinity class SpecificImporter: """ Module importer that only allows loading a given module from the given path. #### Notes Using this enables importing the asv benchmark suite without adding its parent directory to sys.path. The parent directory can in principle contain anything, including some version of the project module (common situation if asv.conf.json is on project repository top level). """ def __init__(self, name, root): """ Initialize a new instance of `SpecificImporter`. #### Parameters **name** (`str`) : The name of the module to load. **root** (`str`) : The path to the directory containing the module. """ self._name = name self._root = root def find_spec(self, fullname, path, target): """ Find the module specification for the given module. #### Parameters **fullname** (`str`) : The fully qualified name of the module. **path** (list or None) : The path for module search, or None if unavailable. **target** (object) : The target object to import. #### Returns **spec** (`ModuleSpec` or None) : The module specification if the module is found, or None otherwise. #### Notes This method is called by the import system to find the module specification for the requested module. If the requested module matches the name of the SpecificImporter instance, it returns the module specification using the `importlib.machinery.PathFinder`. """ if fullname == self._name: if path is not None: raise ValueError() finder = importlib.machinery.PathFinder() return finder.find_spec(fullname, [self._root], target) return None def update_sys_path(root): """ Update sys.meta_path to include the SpecificImporter. ##### Parameters `root` (`str`): The path to the root directory. ##### Notes This function inserts the SpecificImporter into the `sys.meta_path` at the beginning, allowing the module to be imported using the SpecificImporter when it is encountered during the import process. """ sys.meta_path.insert( 0, SpecificImporter(os.path.basename(root), os.path.dirname(root)) ) @contextlib.contextmanager def posix_redirect_output(filename=None, permanent=True): """ Redirect stdout/stderr to a file, using posix `dup2`. #### Parameters **filename** (`str` or None, optional) : The name of the file to redirect the output to. If None, a temporary file will be created. **permanent** (`bool`, optional) : Indicates whether the redirection is permanent or temporary. If False, the original stdout/stderr will be restored after the context is exited. #### Yields **filename** (`str`) : The name of the file where the output is redirected. #### Notes The function redirects the `stdout` and `stderr` streams to a file using the posix `dup2` function. It is typically used within a `with` statement to encapsulate the code block where the redirection is desired. If `filename` is not provided, a temporary file will be created and used for redirection. If `permanent` is `True`, the redirection will persist after the context is exited. If `False`, the original `stdout`/`stderr` will be restored. """ sys.stdout.flush() sys.stderr.flush() stdout_fd = sys.stdout.fileno() stderr_fd = sys.stderr.fileno() if not permanent: stdout_fd_copy = os.dup(stdout_fd) stderr_fd_copy = os.dup(stderr_fd) if filename is None: out_fd, filename = tempfile.mkstemp() else: out_fd = os.open(filename, os.O_WRONLY | os.O_CREAT | os.O_TRUNC) try: # Redirect stdout and stderr to file os.dup2(out_fd, stdout_fd) os.dup2(out_fd, stderr_fd) yield filename finally: sys.stdout.flush() sys.stderr.flush() os.close(out_fd) if not permanent: os.dup2(stdout_fd_copy, stdout_fd) os.dup2(stderr_fd_copy, stderr_fd) os.close(stdout_fd_copy) os.close(stderr_fd_copy) def recvall(sock, size): """ Receive data of given size from a socket connection. #### Parameters **sock** (socket object) : The socket connection to receive data from. **size** (`int`) : The size of the data to receive, in bytes. #### Returns **data** (`bytes`) : The received data. #### Raises **RuntimeError** : If the data received from the socket is less than the specified size. #### Notes The function receives data from a socket connection in multiple chunks until the specified size is reached. It ensures that all the required data is received before returning. If the received data size is less than the specified size, a `RuntimeError` is raised indicating the failure to receive the complete data. """ data = b"" while len(data) < size: s = sock.recv(size - len(data)) data += s if not s: raise RuntimeError( "did not receive data from socket " f"(size {size}, got only {data!r})" ) return data def set_cpu_affinity_from_params(extra_params): """ Set CPU affinity based on the provided parameters. #### Parameters **extra_params** (`dict` or `None`) : Additional parameters containing CPU affinity information. #### Notes This function attempts to set the CPU affinity for the current process based on the provided parameters. It uses the `set_cpu_affinity` function internally to perform the actual affinity setting. If the `extra_params` dictionary contains a key "cpu_affinity" with a valid affinity list, the CPU affinity will be set accordingly. #### Raises **BaseException** : If setting the CPU affinity fails, an exception is raised and an error message is printed. #### Example ```{code-block} python extra_params = {"cpu_affinity": [0, 1]} set_cpu_affinity_from_params(extra_params) ``` """ affinity_list = extra_params.get("cpu_affinity", None) if affinity_list is not None: try: set_cpu_affinity(affinity_list) except BaseException as exc: print(f"asv: setting cpu affinity {affinity_list !r} failed: {exc !r}") asv_runner-0.2.1/asv_runner/benchmarks/000077500000000000000000000000001456224025000201735ustar00rootroot00000000000000asv_runner-0.2.1/asv_runner/benchmarks/__init__.py000066400000000000000000000053021456224025000223040ustar00rootroot00000000000000""" Automatically discovers and imports benchmark classes from all submodules in the current package. #### Variables **pkgname** (`str`) : The name of the current package. **pkgpath** (`_frozen_importlib_external._NamespacePath`) : The path of the current package. **module_names** (`List[str]`) : The names of all submodules in the current package that don't contain an underscore. **benchmark_types** (`List[Type]`) : A list to hold all benchmark classes from the submodules. #### Raises **NotRequired** (`Exception`) : If a submodule raises a `NotRequired` exception during import, it is ignored. #### Notes This module first identifies all submodules in the current package that don't contain an underscore in their names. It then iterates over these submodules, imports each one, and checks if it contains an attribute named "export_as_benchmark". If such an attribute exists, its contents (which should be a list of benchmark classes) are added to the `benchmark_types` list. If a submodule raises a `NotRequired` exception during the import, it is ignored, and the loop continues with the next submodule. This code is useful in a benchmarking suite where new benchmarks can be added simply by adding a new submodule with an "export_as_benchmark" attribute. """ import importlib import pkgutil from pathlib import Path # py37 doesn't have importlib.metadata from importlib_metadata import distributions from ._exceptions import NotRequired pkgname = __name__ pkgpath = __path__ submodule_names = [ name for _, name, _ in pkgutil.iter_modules(pkgpath) if "_" not in name ] asv_modules = [ dist.metadata["Name"] for dist in distributions() if dist.metadata["Name"].startswith("asv_bench") ] benchmark_types = [] # Builtin modules for module_name in submodule_names: try: module = importlib.import_module(f"{pkgname}.{module_name}") if "export_as_benchmark" in dir(module): benchmark_types.extend(iter(getattr(module, "export_as_benchmark"))) except NotRequired: # Ignored. pass # External asv_bench modules for module_name in asv_modules: try: module = importlib.import_module(module_name) benchmarks_path = Path(module.__file__).parent / "benchmarks" benchmark_submodules = [ name for _, name, _ in pkgutil.iter_modules([str(benchmarks_path)]) ] for submodule_name in benchmark_submodules: submodule = importlib.import_module( f"{module_name}.benchmarks.{submodule_name}" ) if "export_as_benchmark" in dir(submodule): benchmark_types.extend(iter(getattr(submodule, "export_as_benchmark"))) except (ImportError, NotRequired): pass asv_runner-0.2.1/asv_runner/benchmarks/_base.py000066400000000000000000000556641456224025000216360ustar00rootroot00000000000000import cProfile as profile import inspect import itertools import math import os import re import textwrap from collections import Counter from hashlib import sha256 def _get_attr(source, name, ignore_case=False): """ Retrieves an attribute from a source by its name. #### Parameters **source** (`object`) : The source from which to get the attribute. **name** (`str`) : The name of the attribute. **ignore_case** (`bool`, optional) : Whether to ignore case when comparing attribute names. Defaults to `False`. #### Returns **attr** (`object` or `None`) : The attribute if it is found, else `None`. #### Raises **ValueError** : If more than one attribute with the given name exists and `ignore_case` is `True`. """ if not ignore_case: return getattr(source, name, None) attrs = [getattr(source, key) for key in dir(source) if key.lower() == name.lower()] if len(attrs) > 1: raise ValueError(f"{source.__name__} contains multiple {name} functions.") elif len(attrs) == 1: return attrs[0] else: return None def _get_all_attrs(sources, name, ignore_case=False): """ Yields attributes from a list of sources by their name. #### Parameters **sources** (`List[object]`) : The list of sources from which to get the attribute. **name** (`str`) : The name of the attribute. **ignore_case** (`bool`, optional) : Whether to ignore case when comparing attribute names. Defaults to `False`. #### Yields **val** (`object`) : The attribute if it is found in the source. """ for source in sources: val = _get_attr(source, name, ignore_case=ignore_case) if val is not None: yield val def _get_first_attr(sources, name, default, ignore_case=False): """ Retrieves the first attribute from a list of sources by its name. #### Parameters **sources** (`List[object]`) : The list of sources from which to get the attribute. **name** (`str`) : The name of the attribute. **default** (`object`) : The default value to return if no attribute is found. **ignore_case** (`bool`, optional) : Whether to ignore case when comparing attribute names. Defaults to `False`. #### Returns **attr** (`object`) : The first attribute found or the default value if no attribute is found. """ for val in _get_all_attrs(sources, name, ignore_case=ignore_case): return val return default def get_setup_cache_key(func): """ Retrieves the cache key for a function's setup. #### Parameters **func** (`function`) : The function for which to get the cache key. #### Returns **cache_key** (`str` or `None`) : The cache key if the function is not `None`, else `None`. #### Notes The cache key is a string composed of the function's module name and the line number where the function's source code starts. """ if func is None: return None module = inspect.getmodule(func) mname = ".".join(module.__name__.split(".", 1)[1:]) if not mname: mname = inspect.getsourcefile(func) return f"{mname}:{inspect.getsourcelines(func)[1]}" def get_source_code(items): """ Extracts, concatenates, and dedents the source code of the given items. #### Parameters **items** (`Iterable[object]`) : An iterable of items, typically functions or methods, for which to extract the source code. #### Returns **source_code** (`str`) : The concatenated and dedented source code of the items. #### Notes The function retrieves the source code of each item. If the item has a `pretty_source` attribute, it uses that as the source code. Otherwise, it attempts to use the `inspect` module's `getsourcelines` function to extract the source code. The function also adds class names to methods and properly indents the source code. If the source code belongs to a method, the function retrieves the class name and prepends it to the source code, properly indenting it to reflect its position within the class. If the source code belongs to the same class as the previous item, only the indentation is adjusted. """ sources = [] prev_class_name = None for func in items: # custom source if hasattr(func, "pretty_source"): src = textwrap.dedent(func.pretty_source).lstrip() # original source else: try: lines, _ = inspect.getsourcelines(func) except TypeError: continue if not lines: continue src = "\n".join(line.rstrip() for line in lines) src = textwrap.dedent(src) class_name = None if inspect.ismethod(func): # Add class name if hasattr(func, "im_class"): class_name = func.im_class.__name__ elif hasattr(func, "__qualname__"): names = func.__qualname__.split(".") if len(names) > 1: class_name = names[-2] if class_name and prev_class_name != class_name: src = "class {}:\n {}".format(class_name, src.replace("\n", "\n ")) elif class_name: src = " " + src.replace("\n", "\n ") sources.append(src) prev_class_name = class_name return "\n\n".join(sources).rstrip() def _get_sourceline_info(obj, basedir): """ Retrieves the source file and line number information of the given object. #### Parameters **obj** (`object`) : The object for which to retrieve source file and line number information. This is typically a function or a method. **basedir** (`str`) : The base directory relative to which the source file path should be expressed. #### Returns **sourceline_info** (`str`) : A string containing the relative path of the source file and the line number where the object is defined, in the format `' in {filename}:{lineno}'`. If the source file or line number cannot be determined, an empty string is returned. #### Notes The function uses the `inspect` module's `getsourcefile` and `getsourcelines` functions to determine the source file and line number of the object, respectively. The source file path is converted to a path relative to `basedir` using `os.path.relpath`. """ try: fn = inspect.getsourcefile(obj) fn = os.path.relpath(fn, basedir) _, lineno = inspect.getsourcelines(obj) return f" in {fn !s}:{lineno !s}" except Exception: return "" def _check_num_args(root, benchmark_name, func, min_num_args, max_num_args=None): """ Verifies if the function under benchmarking accepts a correct number of arguments. #### Parameters **root** (`str`) : The root directory for the function's source file (used to print detailed error messages). **benchmark_name** (`str`) : The name of the benchmark for which the function is being checked (used in error messages). **func** (`function`) : The function to check for correct number of arguments. **min_num_args** (`int`) : The minimum number of arguments the function should accept. **max_num_args** (`int`, optional) : The maximum number of arguments the function should accept. If not provided, `max_num_args` is assumed to be the same as `min_num_args`. #### Returns **validity** (`bool`) : True if the function accepts a correct number of arguments, False otherwise. #### Notes The function uses the `inspect` module's `getfullargspec` function to determine the number of arguments the function accepts. It correctly handles functions, methods, variable argument lists, and functions with default argument values. In case of any error or if the function does not accept a correct number of arguments, an error message is printed to standard output. """ if max_num_args is None: max_num_args = min_num_args try: info = inspect.getfullargspec(func) except Exception as exc: print( f"{benchmark_name !s}: failed to check " f"({func !r}{_get_sourceline_info(func, root) !s}): {exc !s}" ) return True max_args = len(info.args) if inspect.ismethod(func): max_args -= 1 min_args = max_args if info.defaults is None else max_args - len(info.defaults) if info.varargs is not None: max_args = math.inf ok = (min_args <= max_num_args) and (min_num_args <= max_args) if not ok: args_str = min_args if min_args == max_args else f"{min_args}-{max_args}" if min_num_args == max_num_args: num_args_str = min_num_args else: num_args_str = f"{min_num_args}-{max_num_args}" print( f"{benchmark_name !s}: wrong number of arguments " f"(for {func !r}{_get_sourceline_info(func, root) !s}):", f"expected {num_args_str}, " f"has {args_str}", ) return ok def _repr_no_address(obj): """ Returns a string representing the object, but without its memory address. #### Parameters **obj** (`object`) : The object to represent. #### Returns **representation** (`str`) : A string representation of the object without its memory address. #### Notes When Python's built-in `repr` function is used on an object, it often includes the memory address of the object. In some cases, this might not be desirable (for example, when comparing object representations in unit tests, where the memory address is not relevant). This function provides a way to get a string representation of an object without its memory address. The function works by first getting the `repr` of the object, then using a regular expression to detect and remove the memory address if it's present. To avoid false positives, the function also gets the `repr` of the object using the `object` class's `__repr__` method (which always includes the address), and only removes the address from the original `repr` if it matches the address in the `object.__repr__`. Please note, this function is not guaranteed to remove the memory address for all objects. It is primarily intended to work for objects that have a `repr` similar to the default one provided by the `object` class. """ result = repr(obj) address_regex = re.compile(r"^(<.*) at (0x[\da-fA-F]*)(>)$") match = address_regex.match(result) if match: suspected_address = match[2] # Double check this is the actual address default_result = object.__repr__(obj) match2 = address_regex.match(default_result) if match2: known_address = match2[2] if known_address == suspected_address: result = match[1] + match[3] return result def _validate_params(params, param_names, name): """ Validates the params and param_names attributes and returns validated lists. #### Parameters **params** (`list`) : List of parameters for the function to be benchmarked. **param_names** (`list`) : List of names for the parameters. **name** (`str`) : The name of the benchmark. #### Returns **params**, **param_names** (`list`, `list`) : The validated parameter and parameter name lists. """ try: param_names = [str(x) for x in list(param_names)] except ValueError: raise ValueError(f"{name}.param_names is not a list of strings") try: params = list(params) except ValueError: raise ValueError(f"{name}.params is not a list") if params and not isinstance(params[0], (tuple, list)): params = [params] else: params = [list(entry) for entry in params] if len(param_names) != len(params): param_names = param_names[: len(params)] param_names += [ "param%d" % (k + 1,) for k in range(len(param_names), len(params)) ] return params, param_names def _unique_param_ids(params): """ Processes the params list to handle duplicate names within parameter sets, ensuring unique IDs. #### Parameters **params** (`list`) : List of parameters. Each entry is a list representing a set of parameters. #### Returns **params** (`list`) : List of parameters with duplicate names within each set handled. If there are duplicate names, they are renamed with a numerical suffix to ensure unique IDs. """ params = [[_repr_no_address(item) for item in entry] for entry in params] for i, param in enumerate(params): if len(param) != len(set(param)): counter = Counter(param) dupe_dict = {name: 0 for name, count in counter.items() if count > 1} for j in range(len(param)): name = param[j] if name in dupe_dict: param[j] = f"{name} ({dupe_dict[name]})" dupe_dict[name] += 1 params[i] = param return params class Benchmark: """ Class representing a single benchmark. The class encapsulates functions and methods that can be marked as benchmarks, along with setup and teardown methods, timing and other configuration. #### Notes The class uses regex to match method names that will be considered as benchmarks. The matched functions are then processed for benchmarking using various helper methods. By default, a benchmark's timeout is set to 60 seconds. """ # The regex of the name of function or method to be considered as # this type of benchmark. The default in the base class, will # match nothing. name_regex = re.compile("^$") def __init__(self, name, func, attr_sources): """ Initialize a new instance of `Benchmark`. #### Parameters **name** (`str`) : The name of the benchmark. **func** (`function`) : The function to benchmark. **attr_sources** (`list`) : List of sources from which attributes of the benchmark will be drawn. These attributes include setup, teardown, timeout, etc. #### Attributes **pretty_name** (`str`) : A user-friendly name for the function being benchmarked, if available. **_setups** (`list`) : List of setup methods to be executed before the benchmark. **_teardowns** (`list`) : List of teardown methods to be executed after the benchmark. **_setup_cache** (`function`) : A special setup method that is only run once per parameter set. **setup_cache_key** (`str`) : A unique key for the setup cache. **setup_cache_timeout** (`float`) : The time after which the setup cache should be invalidated. **timeout** (`float`) : The maximum time the benchmark is allowed to run before it is aborted. **code** (`str`) : The source code of the function to be benchmarked and its setup methods. **version** (`str`) : A version string derived from a hash of the code. **_params** (`list`) : List of parameters for the function to be benchmarked. **param_names** (`list`) : List of names for the parameters. **_current_params** (`tuple`) : The current set of parameters to be passed to the function during the benchmark. **params** (`list`) : The list of parameters with unique representations for exporting. **_skip_tuples** (`list`) : List of tuples representing parameter combinations to be skipped before calling the setup method. #### Raises **ValueError** : If `param_names` or `_params` is not a list or if the number of parameters does not match the number of parameter names. """ self.name = name self.func = func self.pretty_name = getattr(func, "pretty_name", None) self._attr_sources = attr_sources self._setups = list(_get_all_attrs(attr_sources, "setup", True))[::-1] self._teardowns = list(_get_all_attrs(attr_sources, "teardown", True)) self._setup_cache = _get_first_attr(attr_sources, "setup_cache", None) self.setup_cache_key = get_setup_cache_key(self._setup_cache) self.setup_cache_timeout = _get_first_attr([self._setup_cache], "timeout", None) self.timeout = _get_first_attr(attr_sources, "timeout", None) self.code = get_source_code([self.func] + self._setups + [self._setup_cache]) code_text = self.code.encode("utf-8") code_hash = sha256(code_text).hexdigest() self.version = str(_get_first_attr(attr_sources, "version", code_hash)) self.type = "base" self.unit = "unit" self._redo_setup_next = False self._params = _get_first_attr(attr_sources, "params", []) self.param_names = _get_first_attr(attr_sources, "param_names", []) self._current_params = () self._params, self.param_names = _validate_params( self._params, self.param_names, self.name ) # Fetch skip parameters self._skip_tuples = _get_first_attr(attr_sources, "skip_params", []) # Exported parameter representations self.params = _unique_param_ids(self._params) def __repr__(self): return f"<{self.__class__.__name__} {self.name}>" def set_param_idx(self, param_idx): """ Set the current parameter values for the benchmark based on a parameter index. This method updates the `_current_params` attribute with the set of parameter values that correspond to the provided parameter index. #### Parameters **param_idx** (`int`) : The index of the desired parameter set in the Cartesian product of `_params` attribute list. #### Raises **ValueError** : If the provided parameter index is not valid. This could occur if the index does not correspond to any element in the Cartesian product of the `_params` list. """ try: (self._current_params,) = itertools.islice( itertools.product(*self._params), param_idx, param_idx + 1 ) except ValueError: raise ValueError( f"Invalid benchmark parameter permutation index: {param_idx!r}" ) def insert_param(self, param): """ Inserts a parameter at the beginning of the current parameter list. This method modifies the `_current_params` attribute, inserting the provided parameter value at the front of the parameter tuple. #### Parameters **param** (`Any`) : The parameter value to insert at the front of `_current_params`. """ self._current_params = tuple([param] + list(self._current_params)) def check(self, root): """ Checks call syntax (argument count) for benchmark's setup, call, and teardown. #### Parameters **root** (`Any`) : The root context for checking argument count in setup, call and teardown. #### Returns **result** (`bool`) : `True` if correct argument count is used in all methods, `False` otherwise. #### Notes The call syntax is checked only based on the number of arguments. It also sets the current parameters for the benchmark if they exist. The number of arguments required by setup, call, and teardown methods may increase if a setup cache is defined. """ # Check call syntax (number of arguments only...) ok = True if self._params: self.set_param_idx(0) min_num_args = len(self._current_params) max_num_args = min_num_args if self.setup_cache_key is not None: ok = ok and _check_num_args( root, f"{self.name}: setup_cache", self._setup_cache, 0 ) max_num_args += 1 for setup in self._setups: ok = ok and _check_num_args( root, f"{self.name}: setup", setup, min_num_args, max_num_args ) ok = ok and _check_num_args( root, f"{self.name}: call", self.func, min_num_args, max_num_args ) for teardown in self._teardowns: ok = ok and _check_num_args( root, f"{self.name}: teardown", teardown, min_num_args, max_num_args, ) return ok def do_setup(self): if tuple(self._current_params) in self._skip_tuples: # Skip return True try: for setup in self._setups: setup(*self._current_params) except NotImplementedError as e: # allow skipping test print(f"asv: skipped: {e !r} ") return True return False def redo_setup(self): if not self._redo_setup_next: self._redo_setup_next = True return self.do_teardown() self.do_setup() def do_teardown(self): if tuple(self._current_params) in self._skip_tuples: # Skip return for teardown in self._teardowns: teardown(*self._current_params) def do_setup_cache(self): if self._setup_cache is not None: return self._setup_cache() def do_run(self): if tuple(self._current_params) in self._skip_tuples: # Skip return return self.run(*self._current_params) def do_profile(self, filename=None): """ Executes the benchmark's function with profiling using `cProfile`. #### Parameters **filename** (`str`, optional) : The name of the file where the profiling data should be saved. If not provided, the profiling data will not be saved. #### Raises **RuntimeError** : If the `cProfile` module couldn't be imported. #### Notes The method uses an inner function `method_caller` to call the function to be profiled. The function and its parameters should be available in the scope where `method_caller` is called. The `cProfile` module should be available, or else a `RuntimeError` is raised. If a `filename` is provided, the profiling results will be saved to that file. """ if tuple(self._current_params) in self._skip_tuples: # Skip return def method_caller(): run(*params) # noqa:F821 undefined name if profile is None: raise RuntimeError("cProfile could not be imported") if filename is not None: if hasattr(method_caller, "func_code"): code = method_caller.func_code else: code = method_caller.__code__ self.redo_setup() profile.runctx( code, {"run": self.func, "params": self._current_params}, {}, filename ) asv_runner-0.2.1/asv_runner/benchmarks/_exceptions.py000066400000000000000000000021071456224025000230650ustar00rootroot00000000000000class NotRequired(ImportError): """ Exception raised when a requirement is not met. This exception inherits from `ImportError`. It's typically used when a particular package, module or other dependency that is not essential for the overall function of the program is not found or doesn't meet specific requirements. #### Attributes **message** (`str`) : A string that provides a more detailed explanation of the error. #### Example This exception might be used in a scenario where an optional feature of a program relies on a specific package that is not installed: ```{code-block} python try: import optional_package except ImportError: raise NotRequired("optional_package is not installed.") ``` """ def __init__(self, message): """ Initialize a new instance of `NotRequired`. #### Parameters **message** (`str`) : A string that provides a more detailed explanation of the error. """ self.message = message super().__init__(self.message) asv_runner-0.2.1/asv_runner/benchmarks/_maxrss.py000066400000000000000000000130631456224025000222240ustar00rootroot00000000000000import os import sys ON_PYPY = hasattr(sys, "pypy_version_info") if sys.platform.startswith("win"): import ctypes.wintypes SIZE_T = ctypes.c_size_t class PROCESS_MEMORY_COUNTERS(ctypes.Structure): """ The PROCESS_MEMORY_COUNTERS structure is used by the GetProcessMemoryInfo function to store performance information. It's used here to retrieve the peak working set size, which is the maximum amount of memory in the working set of the process at any point in time. """ _fields_ = [ ("cb", ctypes.wintypes.DWORD), ("PageFaultCount", ctypes.wintypes.DWORD), ("PeakWorkingSetSize", SIZE_T), ("WorkingSetSize", SIZE_T), ("QuotaPeakPagedPoolUsage", SIZE_T), ("QuotaPagedPoolUsage", SIZE_T), ("QuotaPeakNonPagedPoolUsage", SIZE_T), ("QuotaNonPagedPoolUsage", SIZE_T), ("PagefileUsage", SIZE_T), ("PeakPagefileUsage", SIZE_T), ] GetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess GetCurrentProcess.argtypes = [] GetCurrentProcess.restype = ctypes.wintypes.HANDLE GetProcessMemoryInfo = ctypes.windll.psapi.GetProcessMemoryInfo GetProcessMemoryInfo.argtypes = ( ctypes.wintypes.HANDLE, ctypes.POINTER(PROCESS_MEMORY_COUNTERS), ctypes.wintypes.DWORD, ) GetProcessMemoryInfo.restype = ctypes.wintypes.BOOL def get_maxrss(): """ Returns the peak working set size for the current process. On Windows, the peak working set size is the maximum amount of physical memory used by the process. #### Returns **peak_working_set_size** (`int`) : The peak working set size for the current process. """ proc_hnd = GetCurrentProcess() counters = PROCESS_MEMORY_COUNTERS() info = GetProcessMemoryInfo( proc_hnd, ctypes.byref(counters), ctypes.sizeof(counters) ) if info == 0: raise ctypes.WinError() return counters.PeakWorkingSetSize # Determine correct DWORD_PTR type for current Python version (32 or 64 bit) if ctypes.sizeof(ctypes.c_void_p) == ctypes.sizeof(ctypes.c_uint64): DWORD_PTR = ctypes.c_uint64 elif ctypes.sizeof(ctypes.c_void_p) == ctypes.sizeof(ctypes.c_uint32): DWORD_PTR = ctypes.c_uint32 SetProcessAffinityMask = ctypes.windll.kernel32.SetProcessAffinityMask SetProcessAffinityMask.argtypes = [ctypes.wintypes.HANDLE, DWORD_PTR] SetProcessAffinityMask.restype = bool GetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess GetCurrentProcess.argtypes = [] GetCurrentProcess.restype = ctypes.wintypes.HANDLE def set_cpu_affinity(affinity_list): """ Set CPU affinity to CPUs listed (numbered 0...n-1). CPU affinity is about binding and unbinding a process to a physical CPU or a range of CPUs, so that the process in question uses only a subset of the available CPUs. #### Parameters **affinity_list** (`list`) : A list of CPU cores to which the current process will be bound. """ mask = 0 for num in affinity_list: mask |= 2**num # Pseudohandle, doesn't need to be closed handle = GetCurrentProcess() ok = SetProcessAffinityMask(handle, mask) if not ok: raise RuntimeError("SetProcessAffinityMask failed") else: try: import resource # POSIX if sys.platform == "darwin": def get_maxrss(): """ Returns the peak resident set size for the current process. On macOS, the peak resident set size is the maximum amount of memory occupied by the process's resident set at any point in time. #### Returns **peak_resident_set_size** (`int`) : The peak resident set size for the current process. """ # OSX getrusage returns maxrss in bytes # https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/getrusage.2.html return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss else: def get_maxrss(): """ Returns the peak resident set size for the current process. On Linux, the peak resident set size is the maximum amount of memory occupied by the process's resident set at any point in time. #### Returns **peak_resident_set_size** (`int`) : The peak resident set size for the current process. """ # Linux, *BSD return maxrss in kilobytes return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * 1024 except ImportError: pass def set_cpu_affinity(affinity_list): """ Set CPU affinity to CPUs listed (numbered 0...n-1). CPU affinity is about binding and unbinding a process to a physical CPU or a range of CPUs, so that the process in question uses only a subset of the available CPUs. #### Parameters **affinity_list** (`list`) : A list of CPU cores to which the current process will be bound. """ if hasattr(os, "sched_setaffinity"): os.sched_setaffinity(0, affinity_list) else: import psutil p = psutil.Process() if hasattr(p, "cpu_affinity"): p.cpu_affinity(affinity_list) asv_runner-0.2.1/asv_runner/benchmarks/mark.py000066400000000000000000000330751456224025000215070ustar00rootroot00000000000000import functools import inspect class SkipNotImplemented(NotImplementedError): """ Exception raised to indicate a skipped benchmark. This exception inherits from `NotImplementedError`. It's used within an ASV benchmark to skip the current benchmark for certain parameters or conditions that are not implemented or do not apply. #### Attributes **message** (`str`) : A string that provides a more detailed explanation of the skip reason. #### Warning Use of `SkipNotImplemented` is less efficient than the `@skip_for_params` decorator as the setup for the benchmarks and the benchmarks themselves are run before the error is raised, thus consuming unnecessary resources. Use `@skip_for_params` where possible to avoid running the benchmarks that should be skipped. #### Notes This is mainly provided for backwards compatibility with the behavior of asv before 0.5 wherein individual benchmarks could raise and be skipped. From 0.5 onwards, only the setup function is meant to raise `NotImplemented` for skipping parameter sets. #### Example This exception might be used in a scenario where a benchmark should be skipped for certain conditions or parameters: ```{code-block} python class Simple: params = ([False, True]) param_names = ["ok"] def time_failure(self, ok): if ok: x = 34.2**4.2 else: raise SkipNotImplemented ``` """ def __init__(self, message=""): """ Initialize a new instance of `SkipNotImplemented`. #### Parameters **message** (`str`) : A string that provides a more detailed explanation of the skip reason. Optional; if not provided, defaults to an empty string. """ self.message = message super().__init__(self.message) def skip_for_params(skip_params_list): """ Decorator to set skip parameters for a benchmark function. #### Parameters **skip_params_list** (`list`): A list of tuples, each specifying a combination of parameter values that should cause the benchmark function to be skipped. #### Returns **decorator** (`function`): A decorator function that sets the skip parameters for the benchmark function. #### Notes The `skip_for_params` decorator can be used to specify conditions under which a benchmark function should be skipped. Each tuple in the list represents a combination of parameter values which, if received by the benchmark function, will cause that function to be skipped during the benchmarking process. The decorated function's `skip_params` attribute will be set with the provided skip parameters, which will be used during the benchmarking process. Using this decorator is always more efficient than raising a `SkipNotImplemented` exception within the benchmark function, as the function setup and execution can be avoided entirely for skipped parameters. #### Example ```{code-block} python class Simple: params = ([False, True]) param_names = ["ok"] @skip_for_params([(False, )]) def time_failure(self, ok): if ok: x = 34.2**4.2 ``` """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) setattr(wrapper, "skip_params", skip_params_list) return wrapper return decorator def skip_benchmark(func): """ Decorator to mark a function as skipped for benchmarking. #### Parameters **func** (function) : The function to be marked as skipped. #### Returns **wrapper** (function) : A wrapped function that is marked to be skipped for benchmarking. #### Notes The `skip_benchmark` decorator can be used to mark a specific function as skipped for benchmarking. When the decorated function is encountered during benchmarking, it will be skipped and not included in the benchmarking process. """ @functools.wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) setattr(wrapper, "skip_benchmark", True) return wrapper def skip_benchmark_if(condition): """ Decorator to skip benchmarking of a function if a condition is met. #### Parameters **condition** (`bool`) : A boolean that indicates whether to skip benchmarking. If `True`, the decorated function will be skipped for benchmarking. If `False`, the decorated function will be benchmarked as usual. #### Returns **decorator** (function) : A decorator function that sets the condition under which the decorated function will be skipped for benchmarking. #### Notes The `skip_if` decorator can be used to skip the benchmarking of a specific function if a condition is met. It is faster than raising `SkipNotImplemented` as it skips the `setup()` as well. """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) if condition: setattr(wrapper, "skip_benchmark", True) return wrapper return decorator def skip_params_if(skip_params_list, condition): """ Decorator to set skip parameters for a benchmark function if a condition is met. #### Parameters **skip_params_list** (`list`): A list specifying the skip parameters for the benchmark function. **condition** (`bool`) : A boolean that indicates whether to set the skip parameters. If `True`, the skip parameters will be set for the decorated function. If `False`, no parameters will be skipped. #### Returns **decorator** (function): A decorator function that sets the skip parameters for the benchmark function if the condition is met. #### Notes The `skip_params_if` decorator can be used to specify skip parameters for a benchmark function if a condition is met. """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) if condition: setattr(wrapper, "skip_params", skip_params_list) return wrapper return decorator def parameterize_class_with(param_dict): """ Class Decorator to set benchmark parameters for a class. #### Parameters **param_dict** (`dict`): A dictionary specifying the parameters for the benchmark class. The keys represent the parameter names, and the values are lists of values for those parameters. #### Returns **decorator** (function): A class decorator that sets the parameters for the benchmark functions. #### Notes The `parameterize_class_with` decorator can be used to specify parameters for a benchmark class. The parameters are defined as a dictionary, where keys are the parameter names and values are lists of respective values. The decorated class's `params` and `param_names` attributes will be set with the provided parameters and names, which will be used during the benchmarking process. This decorator will overwrite any existing `params` and `param_names` attributes in the class. """ def decorator(cls): if not inspect.isclass(cls): raise TypeError( "The parameterize_class_with decorator can only be used with classes" ) # Handle the single parameter case separately. if len(param_dict) > 1: cls.params = list(param_dict.values()) else: cls.params = list(param_dict.values())[0] cls.param_names = list(param_dict.keys()) return cls return decorator def parameterize_func_with(param_dict): """ Function Decorator to set benchmark parameters for a function. #### Parameters **param_dict** (`dict`): A dictionary specifying the parameters for the benchmark function. The keys represent the parameter names, and the values are lists of values for those parameters. #### Returns **decorator** (function): A function decorator that sets the parameters for the benchmark function. #### Notes The `parameterize_func_with` decorator can be used to specify parameters for a benchmark function. The parameters are defined as a dictionary, where keys are the parameter names and values are lists of respective values. The decorated function's `params` and `param_names` attributes will be set with the provided parameters and names, which will be used during the benchmarking process. This decorator will overwrite any existing `params` and `param_names` attributes in the function, and it should not be used with methods of a class. """ def decorator(func): if inspect.isclass(func) or inspect.ismethod(func): raise TypeError( "The parameterize_func_with decorator can only be used with functions" ) if len(param_dict) > 1: func.params = list(param_dict.values()) else: func.params = list(param_dict.values())[0] func.param_names = list(param_dict.keys()) return func return decorator def parameterize(param_dict): """ Decorator to set benchmark parameters for a function or a class. #### Parameters **param_dict** (`dict`): A dictionary specifying the parameters for the benchmark. The keys represent the parameter names, and the values are lists of values for those parameters. #### Returns **decorator** (function): A function or class decorator that sets the parameters for the benchmark. #### Notes The `parameterize` decorator can be used to specify parameters for a benchmark function or class. The parameters are defined as a dictionary, where keys are the parameter names and values are lists of respective values. The decorated function or class's `params` and `param_names` attributes will be set with the provided parameters and names, which will be used during the benchmarking process. """ def decorator(obj): if inspect.isclass(obj): return parameterize_class_with(param_dict)(obj) elif callable(obj): return parameterize_func_with(param_dict)(obj) else: raise TypeError( "The parameterize decorator can only be used with functions or classes" ) return decorator def timeout_class_at(seconds): """ Class Decorator to set timeout for a class. #### Parameters **seconds** (`float`) : The number of seconds after which the class methods should be timed out. #### Returns **decorator** (function) : A class decorator that sets the timeout for the class. #### Notes The `timeout_class_at` decorator can be used to specify a timeout for all methods in a class. The timeout is stored as an attribute on the class and applies to all its methods. Individual methods can override this timeout by using the `timeout_func_at` or `timeout_at` decorators. """ def decorator(cls): if not inspect.isclass(cls): raise TypeError( "The timeout_class_with decorator can only be used with classes" ) cls.timeout = seconds return cls return decorator def timeout_func_at(seconds): """ Function Decorator to set timeout for a function. #### Parameters **seconds** (`float`) : The number of seconds after which the function should be timed out. #### Returns **decorator** (function) : A function decorator that sets the timeout for the function. #### Notes The `timeout_func_at` decorator can be used to specify a timeout for a specific function. This is particularly useful for benchmarking, where you might want to stop execution of functions that take too long. The timeout is stored as an attribute on the function. """ def decorator(func): if inspect.isclass(func): raise TypeError( "The timeout_func_with decorator can only be used with functions" ) @functools.wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) setattr(wrapper, "timeout", seconds) return wrapper return decorator def timeout_at(seconds): """ Decorator to set a timeout for a function or a class. #### Parameters **seconds** (`float`) : The number of seconds after which the function or the class methods should be timed out. #### Returns **decorator** (function) : A decorator that sets the timeout for the function or the class. #### Notes The `timeout_at` decorator can be used to set a specific timeout for a function or all methods in a class. If applied to a class, the timeout is stored as an attribute on the class and applies to all its methods. Individual methods can override this timeout by using the `timeout_func_at` or `timeout_at` decorators. If applied to a function, the timeout is stored directly on the function. """ def decorator(obj): if inspect.isclass(obj): return timeout_class_at(seconds)(obj) elif callable(obj): return timeout_func_at(seconds)(obj) else: raise TypeError( "The parameterize decorator can only be used with functions or classes" ) return decorator __all__ = [ "parameterize", "skip_benchmark", "skip_benchmark_if", "skip_for_params", "skip_params_if", "timeout_at", ] asv_runner-0.2.1/asv_runner/benchmarks/mem.py000066400000000000000000000043101456224025000213210ustar00rootroot00000000000000import copy import re from ._base import Benchmark from ._exceptions import NotRequired try: from pympler.asizeof import asizeof except ImportError: raise NotRequired("MemBenchmarks not requested or pympler not found") class MemBenchmark(Benchmark): """ Represents a single benchmark for tracking the memory consumption of an object. The MemBenchmark class provides a benchmark type for tracking the memory consumption of the object returned by the benchmark function. #### Attributes **name_regex** (`re.Pattern`) : The regular expression used to match the names of functions that should be considered as memory benchmarks. **type** (`str`) : The type of the benchmark. The default type is "memory". **unit** (`str`) : The unit of the value that's being tracked. By default, this is "bytes". #### Methods **run(*param)** : Runs the benchmark function and returns the memory consumption of the object returned by the function. """ name_regex = re.compile("^(Mem[A-Z_].+)|(mem_.+)$") def __init__(self, name, func, attr_sources): """ Initializes a new instance of the MemBenchmark class. #### Parameters **name** (`str`) : The name of the benchmark. **func** (`callable`) : The function to benchmark. **attr_sources** (`list`) : A list of objects to search for attributes that might be used by the benchmark. """ Benchmark.__init__(self, name, func, attr_sources) self.type = "memory" self.unit = "bytes" def run(self, *param): """ Runs the benchmark function and measures the memory consumption of the object returned by the function. #### Parameters **param** (`tuple`) : The parameters to pass to the benchmark function. #### Returns **result** (`int`) : The memory consumption in bytes of the object returned by the benchmark function. """ obj = self.func(*param) sizeof2 = asizeof([obj, obj]) sizeofcopy = asizeof([obj, copy.copy(obj)]) return sizeofcopy - sizeof2 export_as_benchmark = [MemBenchmark] asv_runner-0.2.1/asv_runner/benchmarks/peakmem.py000066400000000000000000000036551456224025000221750ustar00rootroot00000000000000import re from ._base import Benchmark from ._maxrss import get_maxrss class PeakMemBenchmark(Benchmark): """ Represents a single benchmark for tracking the peak memory consumption of the whole program. The PeakMemBenchmark class provides a benchmark type for tracking the peak memory consumption of the program while the benchmark function is running. #### Attributes **name_regex** (`re.Pattern`) : The regular expression used to match the names of functions that should be considered as peak memory benchmarks. **type** (`str`) : The type of the benchmark. The default type is "peakmemory". **unit** (`str`) : The unit of the value that's being tracked. By default, this is "bytes". #### Methods **run(*param)** : Runs the benchmark function and returns its result. """ name_regex = re.compile("^(PeakMem[A-Z_].+)|(peakmem_.+)$") def __init__(self, name, func, attr_sources): """ Initializes a new instance of the PeakMemBenchmark class. #### Parameters **name** (`str`) : The name of the benchmark. **func** (`callable`) : The function to benchmark. **attr_sources** (`list`) : A list of objects to search for attributes that might be used by the benchmark. """ Benchmark.__init__(self, name, func, attr_sources) self.type = "peakmemory" self.unit = "bytes" def run(self, *param): """ Runs the benchmark function and measures its peak memory consumption. #### Parameters **param** (`tuple`) : The parameters to pass to the benchmark function. #### Returns **result** (`int`) : The peak memory consumption in bytes of the program while the benchmark function was running. """ self.func(*param) return get_maxrss() export_as_benchmark = [PeakMemBenchmark] asv_runner-0.2.1/asv_runner/benchmarks/time.py000066400000000000000000000235121456224025000215060ustar00rootroot00000000000000import re import sys import timeit from ._base import Benchmark, _get_first_attr wall_timer = timeit.default_timer class TimeBenchmark(Benchmark): """ Represents a single benchmark for timing. This class inherits from `Benchmark` and is specialized for timing benchmarks. #### Attributes **name_regex** (`re.Pattern`) : Regular expression that matches the name of the timing benchmarks. **rounds** (`int`) : Number of rounds to execute the benchmark. **repeat** (`int`) : Number of times the code will be repeated during each round. **min_run_count** (`int`) : Minimum number of runs required for the benchmark. **number** (`int`) : The argument to `timeit.timeit`, specifying the number of executions of the setup statement. **sample_time** (`float`) : The target time for each sample. **warmup_time** (`float`) : The time spent warming up the benchmark. **timer** (`callable`) : The timer to use, by default it uses `timeit.default_timer`. """ name_regex = re.compile("^(Time[A-Z_].+)|(time_.+)$") def __init__(self, name, func, attr_sources): """ Initialize a new instance of `TimeBenchmark`. #### Parameters **name** (`str`) : The name of the benchmark. **func** (`callable`) : The function to benchmark. **attr_sources** (`list`) : A list of objects from which to draw attributes. """ Benchmark.__init__(self, name, func, attr_sources) self.type = "time" self.unit = "seconds" self._attr_sources = attr_sources old = int( _get_first_attr(self._attr_sources, "processes", 2) ) # backward compat. self.rounds = int(_get_first_attr(self._attr_sources, "rounds", old)) self._load_vars() def _load_vars(self): """Loads benchmark variables from attribute sources.""" self.repeat = _get_first_attr(self._attr_sources, "repeat", 0) self.min_run_count = _get_first_attr(self._attr_sources, "min_run_count", 2) self.number = int(_get_first_attr(self._attr_sources, "number", 0)) self.sample_time = _get_first_attr(self._attr_sources, "sample_time", 0.01) self.warmup_time = _get_first_attr(self._attr_sources, "warmup_time", -1) self.timer = _get_first_attr(self._attr_sources, "timer", wall_timer) def do_setup(self): """Execute the setup method and load variables.""" result = Benchmark.do_setup(self) # For parameterized tests, setup() is allowed to change these self._load_vars() return result def _get_timer(self, *param): """Get a `timeit.Timer` for the current benchmark.""" if param: def func(): self.func(*param) else: func = self.func timer = timeit.Timer(stmt=func, setup=self.redo_setup, timer=self.timer) return timer def run(self, *param): """ Run the benchmark with the given parameters. #### Parameters **param** (`tuple`) : The parameters to pass to the benchmark function. #### Returns **result** (`dict`) : A dictionary with the benchmark results. It contains the samples taken and the number of times the function was called in each sample. #### Notes The benchmark timing method is designed to adaptively find an optimal `number` of function executions to time based on the estimated performance. This number is then used for the final timings. The warmup time is determined based on the Python interpreter in use. PyPy and GraalPython need longer warmup times due to their JIT compilers. For CPython, a short warmup time is used to account for transient effects such as OS scheduling. The `repeat` attribute specifies how many times to run the function for timing. It can be an integer, meaning the function is run that many times, or a tuple of three values, specifying the minimum number of runs, the maximum number of runs, and the maximum total time to spend on runs. After obtaining the timing samples, each sample is divided by the `number` of function executions to get the average time per function call, and these values are returned as the "samples" in the result. """ warmup_time = self.warmup_time if warmup_time < 0: if "__pypy__" in sys.modules: warmup_time = 1.0 elif "__graalpython__" in sys.modules: warmup_time = 5.0 else: # Transient effects exist also on CPython, e.g. from # OS scheduling warmup_time = 0.1 timer = self._get_timer(*param) try: min_repeat, max_repeat, max_time = self.repeat except (ValueError, TypeError): if self.repeat == 0: min_repeat = 1 max_repeat = 10 max_time = 20.0 if self.rounds > 1: max_repeat //= 2 max_time /= 2.0 else: min_repeat = self.repeat max_repeat = self.repeat max_time = self.timeout # XXX: This is a bug, needed for --quick # gh-1308 in asv if max_time is None: max_time = 60.0 min_repeat = int(min_repeat) max_repeat = int(max_repeat) max_time = float(max_time) samples, number = self.benchmark_timing( timer, min_repeat, max_repeat, max_time=max_time, warmup_time=warmup_time, number=self.number, min_run_count=self.min_run_count, ) samples = [s / number for s in samples] return {"samples": samples, "number": number} def benchmark_timing( self, timer, min_repeat, max_repeat, max_time, warmup_time, number, min_run_count, ): """ Benchmark the timing of the function execution. #### Parameters **timer** (`timeit.Timer`) : The timer to use for the benchmarking. **min_repeat** (`int`) : The minimum number of times to repeat the function execution. **max_repeat** (`int`) : The maximum number of times to repeat the function execution. **max_time** (`float`) : The maximum total time to spend on the benchmarking. **warmup_time** (`float`) : The time spent warming up the benchmark. **number** (`int`) : The number of executions of the setup statement. **min_run_count** (`int`) : The minimum number of runs required for the benchmark. #### Returns **result** (`tuple`) : A tuple with the samples taken and the number of times the function was called in each sample. #### Notes The `too_slow` internal function is used to stop taking samples when certain limits are exceeded. These limits are the minimum run count, the minimum repeat count, and the maximum time. If `number` is zero, a suitable number of function executions is estimated, and the system is warmed up at the same time. If the warmup time is greater than zero, a warmup phase is initiated where the function is called repeatedly until the warmup time has passed. After these initial steps, the function execution times are sampled and added to the `samples` list, stopping when reaching the maximum repeat count or when the `too_slow` function indicates to stop. """ sample_time = self.sample_time start_time = wall_timer() run_count = 0 samples = [] def too_slow(num_samples): # stop taking samples if limits exceeded if run_count < min_run_count: return False if num_samples < min_repeat: return False return wall_timer() > start_time + warmup_time + max_time if number == 0: # Select number & warmup. # # This needs to be done at the same time, because the # benchmark timings at the beginning can be larger, and # lead to too small number being selected. number = 1 while True: self._redo_setup_next = False start = wall_timer() timing = timer.timeit(number) wall_time = wall_timer() - start actual_timing = max(wall_time, timing) run_count += number if actual_timing >= sample_time: if wall_timer() > start_time + warmup_time: break else: try: p = min(10.0, max(1.1, sample_time / actual_timing)) except ZeroDivisionError: p = 10.0 number = max(number + 1, int(p * number)) if too_slow(1): return [timing], number elif warmup_time > 0: # Warmup while True: self._redo_setup_next = False timing = timer.timeit(number) run_count += number if wall_timer() >= start_time + warmup_time: break if too_slow(1): return [timing], number # Collect samples while len(samples) < max_repeat: timing = timer.timeit(number) run_count += number samples.append(timing) if too_slow(len(samples)): break return samples, number export_as_benchmark = [TimeBenchmark] asv_runner-0.2.1/asv_runner/benchmarks/timeraw.py000066400000000000000000000117171456224025000222240ustar00rootroot00000000000000import re import subprocess import sys import textwrap from ._base import _get_first_attr from .time import TimeBenchmark class _SeparateProcessTimer: """ This class provides a timer that runs a given function in a separate Python process. The function should return the statement to be timed. This statement is executed using the Python timeit module in a new Python process. The execution time is then returned. #### Attributes **subprocess_tmpl** (`str`) : The template Python code to be run in the subprocess. It imports necessary modules and prints the execution time of the statement. **func** (`callable`) : The function to be timed. This function should return a string of Python code to be executed, or a tuple of two strings: the code to be executed and the setup code to be run before timing. #### Methods **timeit(number)** : Run the function's code `number` times in a separate Python process, and return the execution time. """ subprocess_tmpl = textwrap.dedent( ''' from __future__ import print_function from timeit import timeit, default_timer as timer print(repr(timeit(stmt="""{stmt}""", setup="""{setup}""", number={number}, timer=timer))) ''' ).strip() def __init__(self, func): self.func = func def timeit(self, number): """ Run the function's code `number` times in a separate Python process, and return the execution time. #### Parameters **number** (`int`) : The number of times to execute the function's code. #### Returns **time** (`float`) : The time it took to execute the function's code `number` times. #### Notes The function's code is executed in a separate Python process to avoid interference from the parent process. The function can return either a single string of code to be executed, or a tuple of two strings: the code to be executed and the setup code to be run before timing. """ stmt = self.func() if isinstance(stmt, tuple): stmt, setup = stmt else: setup = "" stmt = textwrap.dedent(stmt) setup = textwrap.dedent(setup) stmt = stmt.replace(r'"""', r"\"\"\"") setup = setup.replace(r'"""', r"\"\"\"") code = self.subprocess_tmpl.format(stmt=stmt, setup=setup, number=number) res = subprocess.check_output([sys.executable, "-c", code]) return float(res.strip()) class TimerawBenchmark(TimeBenchmark): """ Represents a benchmark for tracking timing benchmarks run once in a separate process. This class inherits from `TimeBenchmark` and modifies it to run the benchmark function in a separate process. This is useful for isolating the benchmark from any potential side effects caused by other Python code running in the same process. #### Attributes **name_regex** (`re.Pattern`) : The regular expression used to match the names of functions that should be considered as raw timing benchmarks. **number** (`int`) : The number of times to execute the function's code. By default, the function's code is executed once. #### Methods **_load_vars()** : Loads variables for the benchmark from the function's attributes or from default values. **_get_timer(*param)** : Returns a timer that runs the benchmark function in a separate process. **do_profile(filename=None)** : Raises a ValueError. Raw timing benchmarks cannot be profiled. """ name_regex = re.compile("^(Timeraw[A-Z_].+)|(timeraw_.+)$") def _load_vars(self): """ Loads variables for the benchmark from the function's attributes or from default values. """ TimeBenchmark._load_vars(self) self.number = int(_get_first_attr(self._attr_sources, "number", 1)) del self.timer def _get_timer(self, *param): """ Returns a timer that runs the benchmark function in a separate process. #### Parameters **param** (`tuple`) : The parameters to pass to the benchmark function. #### Returns **timer** (`_SeparateProcessTimer`) : A timer that runs the function in a separate process. """ if param: def func(): self.func(*param) else: func = self.func return _SeparateProcessTimer(func) def do_profile(self, filename=None): """ Raises a ValueError. Raw timing benchmarks cannot be profiled. #### Parameters **filename** (`str`, optional) : The name of the file to which to save the profile. Default is None. #### Raises **ValueError** : Always. Raw timing benchmarks cannot be profiled. """ raise ValueError("Raw timing benchmarks cannot be profiled") export_as_benchmark = [TimerawBenchmark] asv_runner-0.2.1/asv_runner/benchmarks/track.py000066400000000000000000000035701456224025000216560ustar00rootroot00000000000000import re from ._base import Benchmark, _get_first_attr class TrackBenchmark(Benchmark): """ Represents a single benchmark for tracking an arbitrary value. The TrackBenchmark class provides a benchmark type for tracking any arbitrary value that your code produces. This can be useful when you need to track a value that isn't related to time or memory usage. #### Attributes **name_regex** (`re.Pattern`) : The regular expression used to match the names of functions that should be considered as track benchmarks. **type** (`str`) : The type of the benchmark. The default type is "track". **unit** (`str`) : The unit of the value that's being tracked. By default, this is "unit". #### Methods **run(*param)** : Runs the benchmark function and returns its result. """ name_regex = re.compile("^(Track[A-Z_].+)|(track_.+)$") def __init__(self, name, func, attr_sources): """ Initializes a new instance of the TrackBenchmark class. #### Parameters **name** (`str`) : The name of the benchmark. **func** (`callable`) : The function to benchmark. **attr_sources** (`list`) : A list of objects to search for attributes that might be used by the benchmark. """ Benchmark.__init__(self, name, func, attr_sources) self.type = _get_first_attr(attr_sources, "type", "track") self.unit = _get_first_attr(attr_sources, "unit", "unit") def run(self, *param): """ Runs the benchmark function and returns its result. #### Parameters **param** (`tuple`) : The parameters to pass to the benchmark function. #### Returns **result** : The result of the benchmark function. """ return self.func(*param) export_as_benchmark = [TrackBenchmark] asv_runner-0.2.1/asv_runner/check.py000066400000000000000000000016231456224025000175070ustar00rootroot00000000000000import sys from ._aux import update_sys_path from .discovery import disc_benchmarks def _check(args): """ Checks all the discovered benchmarks in the provided benchmark directory. #### Parameters **args** (`tuple`) : A tuple containing the benchmark directory. #### Notes This function updates the system path with the root directory of the benchmark suite. Then, it iterates over all benchmarks discovered in the root directory. For each benchmark, it calls the check method of the benchmark and updates the 'ok' flag. If all benchmarks pass the check, it exits with a status code 0. If any benchmark fails, it exits with a status code 1. """ (benchmark_dir,) = args update_sys_path(benchmark_dir) ok = True for benchmark in disc_benchmarks(benchmark_dir): ok = ok and benchmark.check(benchmark_dir) sys.exit(0 if ok else 1) asv_runner-0.2.1/asv_runner/console.py000066400000000000000000000337341456224025000201040ustar00rootroot00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ A set of utilities for writing output to the console. """ import contextlib import locale import logging import os import sys import textwrap import time from asv_runner import util WIN = os.name == "nt" def isatty(file): """ Determines if a file is a tty. #### Parameters **file** (`file-like object`) : The file-like object to check. #### Returns **isatty** (`bool`) : Returns `True` if the file is a tty, `False` otherwise. #### Notes Most built-in Python file-like objects have an `isatty` member, but some user-defined types may not. In such cases, this function assumes those are not ttys. """ return file.isatty() if hasattr(file, "isatty") else False def _color_text(text, color): """ Returns a string wrapped in ANSI color codes for coloring the text in a terminal. #### Parameters **text** (`str`) : The string to colorize. **color** (`str`) : An ANSI terminal color name. Must be one of the following: 'black', 'red', 'green', 'brown', 'blue', 'magenta', 'cyan', 'lightgrey', 'default', 'darkgrey', 'lightred', 'lightgreen', 'yellow', 'lightblue', 'lightmagenta', 'lightcyan', 'white', or '' (the empty string). #### Returns **colored_text** (`str`) : The input string, bounded by the appropriate ANSI color codes. #### Notes This function wraps the input text with ANSI color codes based on the given color. It won't actually affect the text until it is printed to the terminal. """ color_mapping = { "black": "0;30", "red": "0;31", "green": "0;32", "brown": "0;33", "blue": "0;34", "magenta": "0;35", "cyan": "0;36", "lightgrey": "0;37", "default": "0;39", "darkgrey": "1;30", "lightred": "1;31", "lightgreen": "1;32", "yellow": "1;33", "lightblue": "1;34", "lightmagenta": "1;35", "lightcyan": "1;36", "white": "1;37", } color_code = color_mapping.get(color, "0;39") return f"\033[{color_code}m{text}\033[0m" # A dictionary of Unicode characters that have reasonable representations in ASCII. # This dictionary contains Unicode characters as keys and their corresponding ASCII # representations as values. This allows for convenient replacement of these specific # Unicode characters with ASCII ones to prevent them from being replaced by '?'. # # The mapping currently includes: # - 'μ' maps to 'u' # - '·' maps to '-' # - '±' maps to '~' # # You can find additional characters that might need an entry using: # `grep -P -n '[^\x00-\x7F]' -r *` # in the `asv` source directory. _unicode_translations = {ord("μ"): "u", ord("·"): "-", ord("±"): "~"} def _write_with_fallback(s, fileobj): """ Writes the supplied string to the given file-like object, handling potential UnicodeEncodeErrors by falling back to the locale's preferred encoding. #### Parameters `s` (`str`): The Unicode string to be written to the file-like object. Raises a `ValueError` if `s` is not a Unicode string. `fileobj` (file-like object): The file-like object to which the string `s` is to be written. On Python 3, this must be a text stream. On Python 2, this must be a `file` byte stream. #### Notes This function first tries to write the input string `s` to the file object `fileobj`. If a `UnicodeError` occurs during this process (indicating that the string contains characters not representable in the file's encoding), the function falls back to encoding the string in the locale's preferred encoding before writing. If the string `s` still cannot be encoded in the locale's preferred encoding, the function translates the string to replace problematic Unicode characters with ASCII ones using the `_unicode_translations` dictionary, and then encodes and writes the resulting string to `fileobj` using the "replace" error handling scheme (which replaces any non-encodable characters with a suitable replacement marker). After the write operation, the function flushes the file object's output buffer to ensure that the written data is actually saved to the file. """ if not isinstance(s, str): raise ValueError("Input string is not a Unicode string") with contextlib.suppress(UnicodeError): fileobj.write(s) return # Fall back to writing bytes enc = locale.getpreferredencoding() try: b = s.encode(enc) except UnicodeError: s = s.translate(_unicode_translations) b = s.encode(enc, errors="replace") fileobj.flush() fileobj.buffer.write(b) def color_print(*args, **kwargs): """ Prints colored and styled text to the terminal using ANSI escape sequences. #### Parameters *args (`tuple` of `str`): The positional arguments should come in pairs (`msg`, `color`), where `msg` is the string to display and `color` is the color to display it in. `color` is an ANSI terminal color name. Must be one of: black, red, green, brown, blue, magenta, cyan, lightgrey, default, darkgrey, lightred, lightgreen, yellow, lightblue, lightmagenta, lightcyan, white, or '' (the empty string). `file` (writable file-like object, optional): Where to write to. Defaults to `sys.stdout`. If `file` is not a tty (as determined by calling its `isatty` member, if one exists), no coloring will be included. It's passed as a keyword argument. `end` (`str`, optional): The ending of the message. Defaults to "\n". The `end` will be printed after resetting any color or font state. It's passed as a keyword argument. #### Notes This function allows you to print text in various colors to the console, which can be helpful for distinguishing different kinds of output or for drawing attention to particular messages. It works by applying ANSI escape sequences to the input strings according to the specified colors. These escape sequences are interpreted by the terminal emulator to apply the specified colors and styles. #### Example ```{code-block} python color_print('This is the color ', 'default', 'GREEN', 'green') ``` """ file = kwargs.get("file", sys.stdout) end = kwargs.get("end", "\n") if isatty(file) and not WIN: for i in range(0, len(args), 2): msg = args[i] color = "" if i + 1 == len(args) else args[i + 1] if color: msg = _color_text(msg, color) _write_with_fallback(msg, file) else: for i in range(0, len(args), 2): msg = args[i] _write_with_fallback(msg, file) _write_with_fallback(end, file) def get_answer_default(prompt, default, use_defaults=False): """ Prompts the user for input and returns the entered value or a default. #### Parameters `prompt` (`str`): The string that is presented to the user. `default` (any): The value returned if the user doesn't enter anything and just hits Enter. This value is also shown in the prompt to indicate to the user what the default is. `use_defaults` (`bool`, optional): If True, the function will immediately return the default value without prompting the user for input. Defaults to False. #### Returns The user's input, or the provided default value if the user didn't enter anything. #### Notes This function enhances the built-in `input` function by allowing a default value to be specified, which is returned if the user doesn't enter anything. """ color_print(f"{prompt} [{default}]: ", end="") if use_defaults: return default x = input() return default if x.strip() == "" else x def truncate_left(s, l): return f"...{s[-(l - 3):]}" if len(s) > l else s class Log: def __init__(self): self._indent = 1 self._total = 0 self._count = 0 self._logger = logging.getLogger() self._needs_newline = False self._last_dot = time.time() self._colorama = False if sys.platform in {"win32", "cli"}: try: import colorama colorama.init() self._colorama = True except Exception as exc: print(f"On Windows or cli, colorama is suggested, but got {exc}") def _stream_formatter(self, record): """ The formatter for standard output """ if self._needs_newline: color_print("") parts = record.msg.split("\n", 1) first_line = parts[0] rest = None if len(parts) == 1 else parts[1] indent = self._indent + 1 continued = getattr(record, "continued", False) if self._total: progress_msg = f"[{self._count / self._total:6.02%}] " if not continued: color_print(progress_msg, end="") indent += len(progress_msg) if not continued: color_print("·" * self._indent, end="") color_print(" ", end="") else: color_print(" " * indent, end="") if hasattr(record, "color"): color = record.color elif record.levelno < logging.DEBUG: color = "default" elif record.levelno < logging.INFO: color = "default" elif record.levelno < logging.WARN: if self._indent == 1: color = "green" elif self._indent == 2: color = "blue" else: color = "default" elif record.levelno < logging.ERROR: color = "brown" else: color = "red" color_print(first_line, color, end="") if rest is not None: color_print("") detail = textwrap.dedent(rest) spaces = " " * indent for line in detail.split("\n"): color_print(spaces, end="") color_print(line) self._needs_newline = True sys.stdout.flush() @contextlib.contextmanager def indent(self): """ A context manager to increase the indentation level. """ self._indent += 1 yield self._indent -= 1 def dot(self): if isatty(sys.stdout): if time.time() > self._last_dot + 1.0: color_print(".", "darkgrey", end="") sys.stdout.flush() self._last_dot = time.time() def set_nitems(self, n): """ Set the number of remaining items to process. Each of these steps should be incremented through using `step`. Can be called multiple times. The progress percentage is ensured to be non-decreasing, except if 100% was already reached in which case it is restarted from 0%. """ try: # Ensure count/total is nondecreasing self._total = util.ceildiv(n * self._total, self._total - self._count) self._count = self._total - n except ZeroDivisionError: # Reset counting from start self._total = n self._count = 0 def step(self): """ Write that a step has been completed. A percentage is displayed along with it. If we are stepping beyond the number of items, stop counting. """ self._count = min(self._total, self._count + 1) def enable(self, verbose=False): sh = logging.StreamHandler() sh.emit = self._stream_formatter self._logger.addHandler(sh) if verbose: self._logger.setLevel(logging.DEBUG) else: self._logger.setLevel(logging.INFO) @contextlib.contextmanager def set_level(self, level): orig_level = self._logger.level if not self.is_debug_enabled(): self._logger.setLevel(level) try: yield finally: self._logger.setLevel(orig_level) def is_debug_enabled(self): return self._logger.getEffectiveLevel() <= logging.DEBUG def _message( self, routine, message, reserve_space=False, color=None, continued=False ): kwargs = {} extra = {} if color is not None: extra["color"] = color if continued: extra["continued"] = True if extra: kwargs["extra"] = extra if reserve_space: max_width = max(16, util.terminal_width - 33) message = truncate_left(message, max_width) self._prev_message = message routine(message, **kwargs) def info(self, *args, **kwargs): self._message(self._logger.info, *args, **kwargs) def warning(self, *args, **kwargs): self._message(self._logger.warning, *args, **kwargs) def debug(self, *args, **kwargs): self._message(self._logger.debug, *args, **kwargs) def error(self, *args, **kwargs): self._message(self._logger.error, *args, **kwargs) def add(self, msg): if self._needs_newline: _write_with_fallback(msg, sys.stdout) sys.stdout.flush() else: self.info(msg) def add_padded(self, msg): """ Final part of two-part info message. Should be preceded by a call to info/warn/...(msg, reserve_space=True) """ if self._prev_message is None: # No previous part: print as an info message self.info(msg) return padding_length = ( util.terminal_width - len(self._prev_message) - 14 - 1 - len(msg) ) if WIN: padding_length -= 1 padding = " " * padding_length self._prev_message = None self.add(f" {padding}{msg}") def flush(self): """ Flush any trailing newlines. Needs to be called before printing to stdout via other means, after using Log. """ if self._needs_newline: color_print("") self._needs_newline = False sys.stdout.flush() asv_runner-0.2.1/asv_runner/discovery.py000066400000000000000000000254201456224025000204420ustar00rootroot00000000000000import importlib import inspect import json import os import pkgutil import traceback from ._aux import update_sys_path from .benchmarks import benchmark_types def _get_benchmark(attr_name, module, klass, func): """ Retrieves benchmark function based on attribute name, module, class, and function. #### Parameters **attr_name** (`str`) : The attribute name of the function. **module** (module) : The module where the function resides. **klass** (class or None) : The class defining the function, or None if not applicable. **func** (function) : The function to be benchmarked. #### Returns **benchmark** (Benchmark instance or None) : A benchmark instance with the name of the benchmark, the function to be benchmarked, and its sources. Returns None if no matching benchmark is found or the function is marked to be skipped. #### Notes The function tries to get the `benchmark_name` from `func`. If it fails, it uses `attr_name` to match with the name regex in the benchmark types. If a match is found, it creates a new benchmark instance and returns it. If no match is found or the function is marked to be skipped, it returns None. """ # Check if the function has been marked to be skipped if getattr(func, "skip_benchmark", False): return try: name = func.benchmark_name except AttributeError: name = None search = attr_name else: search = name.split(".")[-1] for cls in benchmark_types: if cls.name_regex.match(search): break else: return # relative to benchmark_dir mname_parts = module.__name__.split(".", 1)[1:] if klass is None: if name is None: name = ".".join(mname_parts + [func.__name__]) sources = [func, module] else: instance = klass() func = getattr(instance, attr_name) if name is None: name = ".".join(mname_parts + [klass.__name__, attr_name]) sources = [func, instance, module] return cls(name, func, sources) def disc_modules(module_name, ignore_import_errors=False): """ Recursively imports a module and all sub-modules in the package. #### Parameters **module_name** (`str`) : The name of the module to import. **ignore_import_errors** (`bool`, optional) : Whether to ignore import errors. Default is False. #### Yields **module** (module) : The imported module in the package tree. #### Notes This function imports the given module and yields it. If `ignore_import_errors` is set to True, the function will continue executing even if the import fails and will print the traceback. If `ignore_import_errors` is set to False and the import fails, the function will raise the error. After yielding the imported module, the function looks for sub-modules within the package of the imported module and recursively imports and yields them. """ if not ignore_import_errors: module = importlib.import_module(module_name) else: try: module = importlib.import_module(module_name) except BaseException: traceback.print_exc() return yield module if getattr(module, "__path__", None): for _, name, _ in pkgutil.iter_modules(module.__path__, f"{module_name}."): yield from disc_modules(name, ignore_import_errors) def disc_benchmarks(root, ignore_import_errors=False): """ Discovers all benchmarks in a given directory tree, yielding Benchmark objects. #### Parameters **root** (`str`) : The root of the directory tree where the function begins to search for benchmarks. **ignore_import_errors** (`bool`, optional) : Specifies if import errors should be ignored. Default is False. #### Yields **benchmark** (Benchmark instance or None) : A benchmark instance containing the benchmark's name, the function to be benchmarked, and its sources if a matching benchmark is found. #### Notes For each class definition, the function searches for methods with a specific name. For each free function, it yields all functions with a specific name. The function initially imports all modules and submodules in the directory tree using the `disc_modules` function. Then, for each imported module, it searches for classes and functions that might be benchmarks. If it finds a class, it looks for methods within that class that could be benchmarks. If it finds a free function, it considers it as a potential benchmark. A potential benchmark is confirmed by the `_get_benchmark` function. If this function returns a benchmark instance, the instance is yielded. """ root_name = os.path.basename(root) for module in disc_modules(root_name, ignore_import_errors=ignore_import_errors): for attr_name, module_attr in ( (k, v) for k, v in module.__dict__.items() if not k.startswith("_") ): if inspect.isclass(module_attr) and not inspect.isabstract(module_attr): for name, class_attr in inspect.getmembers(module_attr): if inspect.isfunction(class_attr) or inspect.ismethod(class_attr): benchmark = _get_benchmark( name, module, module_attr, class_attr ) if benchmark is not None: yield benchmark elif inspect.isfunction(module_attr): benchmark = _get_benchmark(attr_name, module, None, module_attr) if benchmark is not None: yield benchmark def get_benchmark_from_name(root, name, extra_params=None): """ Creates a benchmark from a fully-qualified benchmark name. #### Parameters **root** (`str`) : Path to the root of a benchmark suite. **name** (`str`) : Fully-qualified name of a specific benchmark. **extra_params** (`dict`, optional) : Extra parameters to be added to the benchmark. #### Returns **benchmark** (Benchmark instance) : A benchmark instance created from the given fully-qualified benchmark name. #### Raises **ValueError** : If the provided benchmark ID is invalid or if the benchmark could not be found. #### Notes This function aims to create a benchmark from the given fully-qualified name. It splits the name using the "-" character. If "-" is present in the name, the string after the "-" is converted to an integer and is considered as the parameter index. If "-" is not present, the parameter index is set to None. The function then tries to directly import the benchmark function by guessing its import module name. If the benchmark is not found this way, the function searches for the benchmark in the directory tree root using `disc_benchmarks`. If the benchmark is still not found, it raises a ValueError. If extra parameters are provided, they are added to the benchmark. """ if "-" in name: try: name, param_idx = name.split("-", 1) param_idx = int(param_idx) except ValueError: raise ValueError(f"Benchmark id {name!r} is invalid") else: param_idx = None update_sys_path(root) benchmark = None # try to directly import benchmark function by guessing its import module name parts = name.split(".") for i in [1, 2]: path = f"{os.path.join(root, *parts[:-i])}.py" if not os.path.isfile(path): continue modname = ".".join([os.path.basename(root)] + parts[:-i]) module = importlib.import_module(modname) try: module_attr = getattr(module, parts[-i]) except AttributeError: break if i == 1 and inspect.isfunction(module_attr): benchmark = _get_benchmark(parts[-i], module, None, module_attr) break elif i == 2 and inspect.isclass(module_attr): try: class_attr = getattr(module_attr, parts[-1]) except AttributeError: break if inspect.isfunction(class_attr) or inspect.ismethod(class_attr): benchmark = _get_benchmark(parts[-1], module, module_attr, class_attr) break if benchmark is None: for benchmark in disc_benchmarks(root): if benchmark.name == name: break else: raise ValueError(f"Could not find benchmark '{name}'") if param_idx is not None: benchmark.set_param_idx(param_idx) if extra_params: class ExtraBenchmarkAttrs: pass for key, value in extra_params.items(): setattr(ExtraBenchmarkAttrs, key, value) benchmark._attr_sources.insert(0, ExtraBenchmarkAttrs) return benchmark def list_benchmarks(root, fp): """ Lists all discovered benchmarks to a file pointer as JSON. #### Parameters **root** (`str`) : Path to the root of a benchmark suite. **fp** (file object) : File pointer where the JSON list of benchmarks should be written. #### Notes The function updates the system path with the root directory of the benchmark suite. Then, it iterates over all benchmarks discovered in the root directory. For each benchmark, it creates a dictionary containing all attributes of the benchmark that are of types `str`, `int`, `float`, `list`, `dict`, `bool` and don't start with an underscore `_`. These attribute dictionaries are then dumped as JSON into the file pointed by `fp`. """ update_sys_path(root) # Streaming of JSON back out to the master process fp.write("[") first = True for benchmark in disc_benchmarks(root): if not first: fp.write(", ") clean = { k: v for (k, v) in benchmark.__dict__.items() if isinstance(v, (str, int, float, list, dict, bool)) and not k.startswith("_") } json.dump(clean, fp, skipkeys=True) first = False fp.write("]") def _discover(args): """ Discovers all benchmarks in the provided benchmark directory and lists them to a file. #### Parameters **args** (`tuple`) : A tuple containing benchmark directory and result file path. #### Notes The function takes a tuple as an argument. The first element of the tuple should be the path to the benchmark directory, and the second element should be the path to the result file. It opens the result file for writing and calls the `list_benchmarks` function with the benchmark directory and the file pointer of the result file. """ benchmark_dir, result_file = args with open(result_file, "w") as fp: list_benchmarks(benchmark_dir, fp) asv_runner-0.2.1/asv_runner/run.py000066400000000000000000000051321456224025000172350ustar00rootroot00000000000000import json import math import pickle from ._aux import set_cpu_affinity_from_params from .benchmarks.mark import SkipNotImplemented from .discovery import get_benchmark_from_name def _run(args): """ Runs a specified benchmark and writes the result to a file. #### Parameters **args** (`tuple`) : A tuple containing benchmark directory, benchmark id, parameters string, profile path, and result file path. #### Notes This function first loads the extra parameters and sets the CPU affinity based on them. It then creates a benchmark from the `benchmark_id`. If the benchmark has a setup cache key, it loads the cache from a file and inserts it into the benchmark parameters. Then, the function runs the setup for the benchmark. If the setup indicates that the benchmark should be skipped, it sets the result as `math.nan`. Otherwise, it runs the benchmark and profiles it if a `profile_path` is provided. After running the benchmark, it performs the teardown for the benchmark and writes the result to the `result_file`. The `args` tuple contains: - **benchmark_dir** (`str`) : The directory where the benchmarks are located. - **benchmark_id** (`str`) : The id of the benchmark to run. - **params_str** (`str`) : A string containing JSON-encoded extra parameters. - **profile_path** (`str`) : The path for profile data. "None" implies no profiling. - **result_file** (`str`) : The path to the file where the result should be written. """ (benchmark_dir, benchmark_id, params_str, profile_path, result_file) = args extra_params = json.loads(params_str) set_cpu_affinity_from_params(extra_params) extra_params.pop("cpu_affinity", None) if profile_path == "None": profile_path = None benchmark = get_benchmark_from_name( benchmark_dir, benchmark_id, extra_params=extra_params ) if benchmark.setup_cache_key is not None: with open("cache.pickle", "rb") as fd: cache = pickle.load(fd) if cache is not None: benchmark.insert_param(cache) skip = benchmark.do_setup() try: if skip: result = math.nan else: try: result = benchmark.do_run() if profile_path is not None: benchmark.do_profile(profile_path) except SkipNotImplemented: # Still runs setup() though result = math.nan finally: benchmark.do_teardown() with open(result_file, "w") as fp: json.dump(result, fp) asv_runner-0.2.1/asv_runner/server.py000066400000000000000000000173361456224025000177500ustar00rootroot00000000000000import json import os import struct import sys import tempfile import time import timeit from ._aux import posix_redirect_output, update_sys_path from .discovery import disc_benchmarks from .run import _run wall_timer = timeit.default_timer def recvall(sock, size): """ Receives data from a socket until the specified size of data has been received. #### Parameters **sock** (`socket`) : The socket from which the data will be received. This socket should already be connected to the other end from which data is to be received. **size** (`int`) : The total size of data to be received from the socket. #### Returns **data** (`bytes`) : The data received from the socket. The length of this data will be equal to the size specified. #### Raises **RuntimeError** : If the socket closed before the specified size of data could be received. #### Notes This function continuously receives data from the provided socket in a loop until the total length of the received data is equal to the specified size. If the socket closes before the specified size of data could be received, a `RuntimeError` is raised. The function returns the received data as a byte string. """ data = b"" while len(data) < size: s = sock.recv(size - len(data)) data += s if not s: raise RuntimeError( "did not receive data from socket " f"(size {size}, got only {data !r})" ) return data def _run_server(args): """ Runs a server that executes benchmarks based on the received commands. #### Parameters **args** (`tuple`) : A tuple containing the benchmark directory and socket name. - `benchmark_dir` (`str`): The directory where the benchmarks are located. - `socket_name` (`str`): The name of the UNIX socket to be used for - communication. #### Raises **RuntimeError** : If the received command contains unknown data. #### Notes This function creates a server that listens on a UNIX socket for commands. It can perform two actions based on the received command: quit or preimport benchmarks. If the command is "quit", the server stops running. If the command is "preimport", the function imports all the benchmarks in the specified directory, capturing all the I/O to a file during import. After the benchmarks are imported, the function sends the contents of the output file back through the socket. If the action is not "quit" or "preimport", the function assumes it is a command to run a specific benchmark. It then runs the benchmark and waits for the results. It also handles a timeout for the benchmark execution and sends the results back through the socket. The function continuously accepts new commands until it receives a "quit" command or a KeyboardInterrupt. It uses UNIX domain sockets for inter-process communication. The name of the socket is passed as a parameter in `args`. The socket is created, bound to the socket name, and set to listen for connections. When a connection is accepted, the command is read from the socket, parsed, and executed accordingly. After executing the command, the server sends back the result through the socket and waits for the next command. """ import signal import socket ( benchmark_dir, socket_name, ) = args update_sys_path(benchmark_dir) # Socket I/O s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s.bind(socket_name) s.listen(1) # Read and act on commands from socket while True: stdout_file = None try: conn, addr = s.accept() except KeyboardInterrupt: break try: fd, stdout_file = tempfile.mkstemp() os.close(fd) # Read command (read_size,) = struct.unpack(" start_time + timeout: # Timeout if is_timeout: os.kill(pid, signal.SIGKILL) else: os.kill(pid, signal.SIGTERM) is_timeout = True time2sleep *= 1e1 time.sleep(min(time2sleep, 0.001)) # Report result with open(stdout_file, errors="replace") as f: out = f.read() # Emulate subprocess if os.WIFSIGNALED(status): retcode = -os.WTERMSIG(status) elif os.WIFEXITED(status): retcode = os.WEXITSTATUS(status) elif os.WIFSTOPPED(status): retcode = -os.WSTOPSIG(status) else: # shouldn't happen, but fail silently retcode = -128 info = {"out": out, "errcode": -256 if is_timeout else retcode} result_text = json.dumps(info) result_text = result_text.encode("utf-8") conn.sendall(struct.pack("= 1 - alpha_min. Defaults to 0.01. #### Returns **m** (`float`) : The computed quantile from the dataset. **ci** (`tuple` of `float`) : Confidence interval (a, b), of coverage >= alpha_min. #### Notes This function assumes independence but is otherwise nonparametric. It sorts the input data and calculates the quantile using a linear interpolation method if the desired quantile lies between two data points. The confidence interval is computed using a known property of the cumulative distribution function (CDF) of a binomial distribution. This method calculates the smallest range (y[r-1], y[s-1]) for which the coverage is at least alpha_min. """ y = sorted(x) n = len(y) alpha_min = min(alpha_min, 1 - alpha_min) pa = alpha_min / 2 pb = 1 - pa a = -math.inf b = math.inf # It's known that # # Pr[X_{(r)} < m < X_{(s)}] = Pr[r <= K <= s-1], K ~ Bin(n,p) # # where cdf(m) = p defines the quantile. # # Simplest median CI follows by picking r,s such that # # F(r;n,q) <= alpha/2 # F(s;n,q) >= 1 - alpha/2 # # F(k;n,q) = sum(binom_pmf(n, j, q) for j in range(k)) # # Then (y[r-1], y[s-1]) is a CI. # If no such r or s exists, replace by +-inf. F = 0 for k, yp in enumerate(y): F += binom_pmf(n, k, q) # F = F(k+1;n,q) if F <= pa: a = yp if F >= pb: b = yp break m = quantile(y, q) return m, (a, b) class LaplacePosterior: """ Class to represent univariate Laplace posterior distribution. #### Description This class represents the univariate posterior distribution defined as `p(beta|y) = N [sum(|y_j - beta|)]**(-nu-1)` where N is the normalization factor. #### Parameters **y** (`list` of `float`) : A list of sample values from the distribution. **nu** (`float`, optional) : Degrees of freedom. Default is `len(y) - 1`. #### Attributes **mle** (`float`) : The maximum likelihood estimate for beta which is the median of y. #### Notes This is the posterior distribution in the Bayesian model assuming Laplace distributed noise, where `p(y|beta,sigma) = N exp(- sum_j (1/sigma) |y_j - beta|)`, `p(sigma) ~ 1/sigma`, and `nu = len(y) - 1`. The MLE for beta is `median(y)`. Applying the same approach to a Gaussian model results to `p(beta|y) = N T(t, m-1)`, `t = (beta - mean(y)) / (sstd(y) / sqrt(m))` where `T(t, nu)` is the Student t-distribution pdf, which gives the standard textbook formulas for the mean. """ def __init__(self, y, nu=None): """ Initializes an instance of the LaplacePosterior class. #### Parameters **y** (`list` of `float`): : The samples from the distribution. **nu** (`float`, optional): : The degrees of freedom. Default is `len(y) - 1`. #### Raises `ValueError`: If `y` is an empty list. #### Notes This constructor sorts the input data `y` and calculates the MLE (Maximum Likelihood Estimate). It computes a scale factor, `_y_scale`, to prevent overflows when computing unnormalized CDF integrals. The input data `y` is then shifted and scaled according to this computed scale. The method also initializes a memoization dictionary `_cdf_memo` for the unnormalized CDF, and a placeholder `_cdf_norm` for the normalization constant of the CDF. """ if len(y) == 0: raise ValueError("empty input") self.nu = len(y) - 1 if nu is None else nu # Sort input y = sorted(y) # Get location and scale so that data is centered at MLE, and # the unnormalized PDF at MLE has amplitude ~ 1/nu. # # Proper scaling of inputs is important to avoid overflows # when computing the unnormalized CDF integrals below. self.mle = quantile(y, 0.5) self._y_scale = sum(abs(yp - self.mle) for yp in y) self._y_scale *= self.nu ** (1 / (self.nu + 1)) # Shift and scale if self._y_scale != 0: self.y = [(yp - self.mle) / self._y_scale for yp in y] else: self.y = [0 for _ in y] self._cdf_norm = None self._cdf_memo = {} def _cdf_unnorm(self, beta): """ Computes the unnormalized cumulative distribution function (CDF). #### Parameters **beta** (`float`): : The upper limit of the integration for the CDF. #### Returns Returns the unnormalized CDF evaluated at `beta`. #### Notes The method computes the unnormalized CDF as: cdf_unnorm(b) = int_{-oo}^{b} 1/(sum_j |y - b'|)**(m+1) db' The method integrates piecewise, resolving the absolute values separately for each section. The results of these calculations are memoized to speed up subsequent computations. It also handles special cases, such as when `beta` is not a number (returns `beta` as is), or when `beta` is positive infinity (memoizes the integral value at the end of the list `y`). """ if beta != beta: return beta k0 = next((k for k, y in enumerate(self.y) if y > beta), len(self.y)) cdf = 0 nu = self.nu # Save some work by memoizing intermediate results if k0 - 1 in self._cdf_memo: k_start = k0 cdf = self._cdf_memo[k0 - 1] else: k_start = 0 cdf = 0 # Do the integral piecewise, resolving the absolute values for k in range(k_start, k0 + 1): c = 2 * k - len(self.y) y = sum(self.y[k:]) - sum(self.y[:k]) a = -math.inf if k == 0 else self.y[k - 1] b = beta if k == k0 else self.y[k] if c == 0: term = (b - a) / y ** (nu + 1) else: term = 1 / (nu * c) * ((a * c + y) ** (-nu) - (b * c + y) ** (-nu)) cdf += max(0, term) # avoid rounding error if k != k0: self._cdf_memo[k] = cdf if beta == math.inf: self._cdf_memo[len(self.y)] = cdf return cdf def _ppf_unnorm(self, cdfx): """ Computes the inverse function of `_cdf_unnorm`. #### Parameters **cdfx** (`float`): : The value for which to compute the inverse cumulative distribution function (CDF). #### Returns Returns the unnormalized quantile function evaluated at `cdfx`. #### Notes This method computes the inverse of `_cdf_unnorm`. It first finds the interval within which `cdfx` lies, then performs the inversion on this interval. Special cases are handled when the interval index `k` is 0 (the computation of `beta` involves a check for negative infinity), or when the calculated `c` is 0. The result `beta` is clipped at the upper bound of the interval, ensuring it does not exceed `self.y[k]`. """ # Find interval for k in range(len(self.y) + 1): if cdfx <= self._cdf_memo[k]: break # Invert on interval c = 2 * k - len(self.y) y = sum(self.y[k:]) - sum(self.y[:k]) nu = self.nu if k == 0: term = cdfx else: a = self.y[k - 1] term = cdfx - self._cdf_memo[k - 1] if k == 0: z = -nu * c * term beta = (z ** (-1 / nu) - y) / c if z > 0 else -math.inf elif c == 0: beta = a + term * y ** (nu + 1) else: z = (a * c + y) ** (-nu) - nu * c * term beta = (z ** (-1 / nu) - y) / c if z > 0 else math.inf if k < len(self.y): beta = min(beta, self.y[k]) return beta def pdf(self, beta): """ Computes the probability distribution function (PDF). #### Parameters **beta** (`float`) : The point at which to evaluate the PDF. #### Returns A `float` which is the probability density function evaluated at `beta`. #### Notes This function computes the PDF by exponentiating the result of `self.logpdf(beta)`. The `logpdf` method should therefore be implemented in the class that uses this method. """ return math.exp(self.logpdf(beta)) def logpdf(self, beta): """ Computes the logarithm of the probability distribution function (log-PDF). #### Parameters **beta** (`float`) : The point at which to evaluate the log-PDF. #### Returns A `float` which is the logarithm of the probability density function evaluated at `beta`. #### Notes This function computes the log-PDF by first checking if the scale of the distribution `_y_scale` is zero. If so, it returns `math.inf` if `beta` equals the maximum likelihood estimate `mle`, otherwise it returns `-math.inf`. The `beta` value is then transformed by subtracting the maximum likelihood estimate `mle` and dividing by `_y_scale`. If the unnormalized cumulative distribution function `_cdf_norm` has not been computed yet, it is computed by calling `_cdf_unnorm(math.inf)`. The function then computes the sum of absolute differences between `beta` and all points in `y`, applies the log-PDF formula and returns the result. """ if self._y_scale == 0: return math.inf if beta == self.mle else -math.inf beta = (beta - self.mle) / self._y_scale if self._cdf_norm is None: self._cdf_norm = self._cdf_unnorm(math.inf) ws = sum(abs(yp - beta) for yp in self.y) m = self.nu return ( -(m + 1) * math.log(ws) - math.log(self._cdf_norm) - math.log(self._y_scale) ) def cdf(self, beta): """ Computes the cumulative distribution function (CDF). #### Parameters **beta** (`float`) : The point at which to evaluate the CDF. #### Returns A `float` which is the value of the cumulative distribution function evaluated at `beta`. #### Notes This function computes the CDF by first checking if the scale of the distribution `_y_scale` is zero. If so, it returns 1 if `beta` is greater than the maximum likelihood estimate `mle`, and 0 otherwise. The `beta` value is then transformed by subtracting the maximum likelihood estimate `mle` and dividing by `_y_scale`. If the unnormalized cumulative distribution function `_cdf_norm` has not been computed yet, it is computed by calling `_cdf_unnorm(math.inf)`. The function then computes the unnormalized CDF at `beta` and normalizes it by dividing with `_cdf_norm`. """ if self._y_scale == 0: return 1.0 * (beta > self.mle) beta = (beta - self.mle) / self._y_scale if self._cdf_norm is None: self._cdf_norm = self._cdf_unnorm(math.inf) return self._cdf_unnorm(beta) / self._cdf_norm def ppf(self, cdf): """ Computes the percent point function (PPF), also known as the inverse cumulative distribution function. #### Parameters **cdf** (`float`) : The cumulative probability for which to compute the inverse CDF. It must be between 0 and 1 (inclusive). #### Returns A `float` which is the value of the percent point function evaluated at `cdf`. #### Notes This function computes the PPF by first checking if `cdf` is not between 0 and 1. If it is not, it returns `math.nan`. If the scale of the distribution `_y_scale` is zero, it returns the maximum likelihood estimate `mle`. If the unnormalized cumulative distribution function `_cdf_norm` has not been computed yet, it is computed by calling `_cdf_unnorm(math.inf)`. The function then scales `cdf` by `_cdf_norm` (making sure it does not exceed `_cdf_norm`), computes the unnormalized PPF at this scaled value, and transforms it back to the original scale. """ if cdf < 0 or cdf > 1.0: return math.nan if self._y_scale == 0: return self.mle if self._cdf_norm is None: self._cdf_norm = self._cdf_unnorm(math.inf) cdfx = min(cdf * self._cdf_norm, self._cdf_norm) beta = self._ppf_unnorm(cdfx) return beta * self._y_scale + self.mle def compute_stats(samples, number): """ Performs statistical analysis on the provided samples. #### Parameters **samples** (`list` of `float`) : A list of total times (in seconds) of benchmarks. **number** (`int`) : The number of times each benchmark was repeated. #### Returns **beta_hat** (`float`) : The estimated time per iteration. **stats** (`dict`) : A dictionary containing various statistical measures of the estimator. It includes: - **"ci_99_a"**: The lower bound of the 99% confidence interval. - **"ci_99_b"**: The upper bound of the 99% confidence interval. - **"q_25"**: The 25th percentile of the sample times. - **"q_75"**: The 75th percentile of the sample times. - **"repeat"**: The total number of samples. - **"number"**: The repeat number for each sample. #### Notes This function first checks if there are any samples. If there are none, it returns `None, None`. It then calculates the median and the 25th and 75th percentiles of the samples. If the nonparametric confidence interval estimation did not provide an estimate, it computes the posterior distribution for the location, assuming exponential noise. The Maximum Likelihood Estimate (MLE) is equal to the median. The function uses the confidence interval from that distribution to extend beyond the sample bounds if necessary. Finally, it produces the median as the result and a dictionary of the computed statistics. """ if len(samples) < 1: return None, None Y = list(samples) # Median and quantiles y_50, ci_50 = quantile_ci(Y, 0.5, alpha_min=0.99) y_25 = quantile(Y, 0.25) y_75 = quantile(Y, 0.75) # If nonparametric CI estimation didn't give an estimate, # use the credible interval of a bayesian posterior distribution. a, b = ci_50 if (math.isinf(a) or math.isinf(b)) and len(Y) > 1: # Compute posterior distribution for location, assuming # exponential noise. The MLE is equal to the median. c = LaplacePosterior(Y) # Use the CI from that distribution to extend beyond sample # bounds if math.isinf(a): a = min(c.ppf(0.01 / 2), min(Y)) if math.isinf(b): b = max(c.ppf(1 - 0.01 / 2), max(Y)) ci_50 = (a, b) # Produce results result = y_50 stats = { "ci_99_a": ci_50[0], "ci_99_b": ci_50[1], "q_25": y_25, "q_75": y_75, "repeat": len(Y), "number": number, } return result, stats asv_runner-0.2.1/asv_runner/timing.py000066400000000000000000000052701456224025000177230ustar00rootroot00000000000000import json import sys import timeit from time import process_time from .benchmarks.time import TimeBenchmark def _timing(argv): """ Executes a timing benchmark. #### Parameters **argv** (`list` of `str`) : Command line arguments. #### Notes This function parses the command line arguments, including options for setup, number of repeats, timing method, and output format (JSON or not). It selects the appropriate timing function based on the `--timer` argument. It creates an instance of the `TimeBenchmark` class, with the provided statement to be executed, and runs it. The setup is provided from the `--setup` argument. Once the benchmark is run, it computes the statistics of the results and formats the output. If the `--json` flag is not set, it prints the output in a human- readable format. Otherwise, it outputs the result, samples, and stats as a JSON. """ import argparse import asv_runner.console import asv_runner.statistics import asv_runner.util parser = argparse.ArgumentParser( usage="python -masv.benchmark timing [options] STATEMENT" ) parser.add_argument("--setup", action="store", default=(lambda: None)) parser.add_argument("--number", action="store", type=int, default=0) parser.add_argument("--repeat", action="store", type=int, default=0) parser.add_argument( "--timer", action="store", choices=("process_time", "perf_counter"), default="perf_counter", ) parser.add_argument("--json", action="store_true") parser.add_argument("statement") args = parser.parse_args(argv) timer_func = { "process_time": process_time, "perf_counter": timeit.default_timer, }[args.timer] class AttrSource: pass attrs = AttrSource() attrs.repeat = args.repeat attrs.number = args.number attrs.timer = timer_func bench = TimeBenchmark("tmp", args.statement, [attrs]) bench.redo_setup = args.setup result = bench.run() value, stats = asv_runner.statistics.compute_stats( result["samples"], result["number"] ) formatted = asv_runner.util.human_time( value, asv_runner.statistics.get_err(value, stats) ) if not args.json: asv_runner.console.color_print(formatted, "red") asv_runner.console.color_print("", "default") asv_runner.console.color_print( "\n".join(f"{k}: {v}" for k, v in sorted(stats.items())), "default" ) asv_runner.console.color_print(f"samples: {result['samples']}", "default") else: json.dump( {"result": value, "samples": result["samples"], "stats": stats}, sys.stdout ) asv_runner-0.2.1/asv_runner/util.py000066400000000000000000000105401456224025000174050ustar00rootroot00000000000000# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Various low-level utilities. """ import math import shutil terminal_width = shutil.get_terminal_size().columns def ceildiv(numerator, denominator): """ Calculate the ceiling division of two numbers. #### Parameters **numerator** (`int`) : The numerator in the division. **denominator** (`int`) : The denominator in the division. #### Returns `int`: The result of the division rounded up to the nearest integer. #### Notes This function calculates the ceiling division of two numbers, i.e., division that rounds up. It is equivalent to `math.ceil(numerator/denominator)`, but avoids the conversion of numerator and denominator to float. """ return -((-numerator) // denominator) def human_float(value, significant=3, truncate_small=None, significant_zeros=False): """ Formats a float into a human-friendly string. #### Parameters **value** (`float`) : The float value to format. **significant** (`int`) : Number of significant digits to include in the output. Default is 3. **truncate_small** (`int`, optional) : If defined, leading zeros of numbers < 1 are counted as significant. **significant_zeros** (`bool`) : If True, trailing unnecessary zeros are included. Default is False. #### Returns `str`: A string representing the float with human-friendly significant digits. #### Notes Switches to scientific notation for very large or very small numbers. The magnitude of the number is calculated using `math.log10(value)`. """ if value == 0: return "0" elif math.isinf(value) or math.isnan(value): return f"{value}" elif value < 0: sign = "-" value = -value else: sign = "" logv = math.log10(value) magnitude = int(math.floor(logv)) + 1 if truncate_small is not None: magnitude = max(magnitude, -truncate_small + 1) num_digits = significant - magnitude if magnitude <= -5 or magnitude >= 9: # Too many digits, use scientific notation fmt = f"{{0:.{significant}e}}" elif value == int(value) or num_digits <= 0: value = int(round(value, num_digits)) fmt = "{0:d}" else: fmt = f"{{0:.{num_digits}f}}" formatted = sign + fmt.format(value) if not significant_zeros and "." in formatted and "e" not in fmt: formatted = formatted.rstrip("0") if formatted[-1] == ".": formatted = formatted[:-1] if significant_zeros and "." not in formatted: if len(formatted) < significant: formatted += "." + "0" * (significant - len(formatted)) return formatted _human_time_units = ( ("ns", 0.000000001), ("μs", 0.000001), ("ms", 0.001), ("s", 1), ("m", 60), ("h", 60 * 60), ("d", 60 * 60 * 24), ("w", 60 * 60 * 24 * 7), ("y", 60 * 60 * 24 * 7 * 52), ("C", 60 * 60 * 24 * 7 * 52 * 100), ) def human_time(seconds, err=None): """ Formats a duration in seconds into a human-friendly time string. Depending on the number of seconds given, can be one of:: 1w 3d 2d 4h 1h 5m 1m 4s 15s The representation is always exactly 6 characters long. #### Parameters **seconds** (`int`) : The number of seconds to represent. **err** (`float`, optional) : If provided, formats the duration as "{value}±{err}", e.g., "1h±5m". It can be used to represent the uncertainty in the measurement. #### Returns `str`: A human-friendly representation of the given duration. If the duration is NaN, returns "n/a". """ units = _human_time_units seconds = float(seconds) scale = seconds if scale == 0 and err is not None: scale = float(err) if scale == 0: # Represent zero in reasonable units units = [("s", 1), ("m", 60)] if scale != scale: # nan return "n/a" for i in range(len(units) - 1): if scale < units[i + 1][1]: str_time = human_float(seconds / units[i][1], 3, significant_zeros=True) if err is None: return f"{str_time:s}{units[i][0]}" str_err = human_float(err / units[i][1], 1, truncate_small=2) return f"{str_time:s}±{str_err:s}{units[i][0]}" return "~0" asv_runner-0.2.1/changelog.d/000077500000000000000000000000001456224025000160455ustar00rootroot00000000000000asv_runner-0.2.1/changelog.d/+bugfix.md000066400000000000000000000001351456224025000177250ustar00rootroot00000000000000Use `importlib_metadata` instead of `importlib.metadata` since we need to support Python 3.7 asv_runner-0.2.1/changelog.d/.gitignore000066400000000000000000000000141456224025000200300ustar00rootroot00000000000000!.gitignore asv_runner-0.2.1/changelog.d/readme.md000066400000000000000000000015221456224025000176240ustar00rootroot00000000000000# Usage `towncrier` is used for keeping track of the changelog. The relevant configuration aspects are: - Each file can be formatted using markdown - The contents are rendered in bullets - Each file should be labeled with the corresponding **pull request**, e.g. `NUM.TYPE.md` + Where there is no clear corresponding pull request, `+` can be used instead of `NUM` For mapping the types to headings, the following table can be used: | **TYPE** | **Heading** | | feat | New Features | | api | API Changes | | bugfix | Bug Fixes | | misc | Other Changes and Additions | ## Release ```bash # View the changes towncrier build --draft --version 0.1.0 --date "$(date -u +%Y-%m-%d)" # Modify CHANGES.md towncrier build --version 0.1.0 --date "$(date -u +%Y-%m-%d)" ``` asv_runner-0.2.1/docs/000077500000000000000000000000001456224025000146245ustar00rootroot00000000000000asv_runner-0.2.1/docs/requirements.in000066400000000000000000000003141456224025000176750ustar00rootroot00000000000000sphinx==7.2.2 furo==2023.08.19 sphinx-copybutton==0.5.2 sphinx-contributors==0.2.7 sphinx_design==0.5.0 sphinx-autobuild==2021.3.14 sphinxcontrib-spelling==8.0.0 myst-parser==2.0.0 sphinx-autodoc2==0.4.2 asv_runner-0.2.1/docs/requirements.txt000066400000000000000000000050511456224025000201110ustar00rootroot00000000000000# # This file is autogenerated by pip-compile with Python 3.10 # by the following command: # # pip-compile docs/requirements.in # alabaster==0.7.13 # via sphinx astroid==2.15.6 # via sphinx-autodoc2 babel==2.12.1 # via sphinx beautifulsoup4==4.12.2 # via furo certifi==2023.7.22 # via requests charset-normalizer==3.2.0 # via requests colorama==0.4.6 # via sphinx-autobuild docutils==0.20.1 # via # myst-parser # sphinx furo==2023.8.19 # via -r docs/requirements.in idna==3.4 # via requests imagesize==1.4.1 # via sphinx jinja2==3.1.2 # via # myst-parser # sphinx lazy-object-proxy==1.9.0 # via astroid livereload==2.6.3 # via sphinx-autobuild markdown-it-py==3.0.0 # via # mdit-py-plugins # myst-parser markupsafe==2.1.3 # via jinja2 mdit-py-plugins==0.4.0 # via myst-parser mdurl==0.1.2 # via markdown-it-py myst-parser==2.0.0 # via -r docs/requirements.in packaging==23.1 # via sphinx pyenchant==3.2.2 # via sphinxcontrib-spelling pygments==2.16.1 # via # furo # sphinx pyyaml==6.0.1 # via myst-parser requests==2.31.0 # via sphinx six==1.16.0 # via livereload snowballstemmer==2.2.0 # via sphinx soupsieve==2.4.1 # via beautifulsoup4 sphinx==7.2.2 # via # -r docs/requirements.in # furo # myst-parser # sphinx-autobuild # sphinx-basic-ng # sphinx-contributors # sphinx-copybutton # sphinx-design # sphinxcontrib-applehelp # sphinxcontrib-devhelp # sphinxcontrib-htmlhelp # sphinxcontrib-qthelp # sphinxcontrib-serializinghtml # sphinxcontrib-spelling sphinx-autobuild==2021.3.14 # via -r docs/requirements.in sphinx-autodoc2==0.4.2 # via -r docs/requirements.in sphinx-basic-ng==1.0.0b2 # via furo sphinx-contributors==0.2.7 # via -r docs/requirements.in sphinx-copybutton==0.5.2 # via -r docs/requirements.in sphinx-design==0.5.0 # via -r docs/requirements.in sphinxcontrib-applehelp==1.0.7 # via sphinx sphinxcontrib-devhelp==1.0.5 # via sphinx sphinxcontrib-htmlhelp==2.0.4 # via sphinx sphinxcontrib-jsmath==1.0.1 # via sphinx sphinxcontrib-qthelp==1.0.6 # via sphinx sphinxcontrib-serializinghtml==1.1.8 # via sphinx sphinxcontrib-spelling==8.0.0 # via -r docs/requirements.in tomli==2.0.1 # via sphinx-autodoc2 tornado==6.3.3 # via livereload typing-extensions==4.7.1 # via # astroid # sphinx-autodoc2 urllib3==2.0.4 # via requests wrapt==1.15.0 # via astroid asv_runner-0.2.1/docs/source/000077500000000000000000000000001456224025000161245ustar00rootroot00000000000000asv_runner-0.2.1/docs/source/_static/000077500000000000000000000000001456224025000175525ustar00rootroot00000000000000asv_runner-0.2.1/docs/source/_static/.nojekyll000066400000000000000000000000001456224025000213700ustar00rootroot00000000000000asv_runner-0.2.1/docs/source/bplugin-list.md000066400000000000000000000005711456224025000210620ustar00rootroot00000000000000# External Plugin List Here are the existing external plugins which are supported by `asv` and `asv_runner` (pull requests welcome). ## Benchmark Plugins - [`asv_bench_memray`](https://haozeke.github.io/asv_bench_memray/) enables `RayMyClass` or `ray_funcname` for peak memory as profiled by `memray`, which is able to handle native calls and traces every function call asv_runner-0.2.1/docs/source/conf.py000066400000000000000000000033551456224025000174310ustar00rootroot00000000000000import os import shutil # -- Project information ----------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information project = "asv_runner" copyright = "2023--present, asv Developers" author = "asv Developers" release = "0.2.1" # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration extensions = [ "myst_parser", "autodoc2", "sphinx.ext.napoleon", "sphinx.ext.intersphinx", "sphinx.ext.todo", "sphinx.ext.githubpages", "sphinx_contributors", "sphinx_copybutton", "sphinx_design", "sphinxcontrib.spelling", ] autodoc2_render_plugin = "myst" autodoc2_packages = [ "../../asv_runner", ] myst_enable_extensions = [ "deflist", ] intersphinx_mapping = { "python": ("https://docs.python.org/3/", None), "asv": ("https://asv.readthedocs.io/en/latest/", None), } templates_path = ["_templates"] exclude_patterns = [] # -- Options for HTML output ------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output html_theme = "furo" html_static_path = ["_static"] html_theme_options = { "source_repository": "https://github.com/HaoZeke/asv_runner/", "source_branch": "main", "source_directory": "docs/", } # ------------- Copying things docs_source_dir = os.path.abspath(os.path.dirname(__file__)) project_root_dir = os.path.abspath(os.path.join(docs_source_dir, "..", "..")) changelog_src = os.path.join(project_root_dir, "CHANGELOG.md") changelog_dest = os.path.join(docs_source_dir, "CHANGELOG.md") shutil.copyfile(changelog_src, changelog_dest) asv_runner-0.2.1/docs/source/development/000077500000000000000000000000001456224025000204465ustar00rootroot00000000000000asv_runner-0.2.1/docs/source/development/benchmark_plugins.md000066400000000000000000000006101456224025000244600ustar00rootroot00000000000000# Developing benchmarks All benchmark plugins must follow a strict hierarchy: - The package name must begin with `asv_bench`. - Benchmark classes are defined in a `benchmarks` folder under the package module. - Each exported new benchmark type has the `export_as_benchmark = [NAMEBenchmark]` attribute. For more conventions, see the internally defined benchmark types within `asv_runner`. asv_runner-0.2.1/docs/source/index.md000066400000000000000000000021551456224025000175600ustar00rootroot00000000000000# ASV Runner Welcome to ASV Runner, the pure Python core of [ASV (Airspeed-Velocity)](https://asv.readthedocs.io/). ASV Runner provides essential functionality for benchmarking Python packages with ease and efficiency. Key Features: - **Benchmark Classes**: ASV Runner includes the core benchmark classes for `asv` that enable accurate measurement of performance metrics such as runtime, memory consumption, and peak memory usage. - **Minimal Dependencies**: ASV Runner has minimal dependencies, relying primarily on pure Python for timing operations. - **Flexible Integration**: ASV Runner is a core component of ASV, enabling comprehensive benchmarking of Python packages throughout their development lifecycle. ASV runner is a powerful framework for leveraging `asv`'s rich plugin ecosystem. Explore the documentation and discover how ASV Runner can help you accurately measure and analyze the performance of your Python packages. ```{toctree} :maxdepth: 2 :caption: Contents apidocs/index bplugin-list development/benchmark_plugins CHANGELOG ``` ## Indices and tables - [](genindex) - [](modindex) - [](search) asv_runner-0.2.1/pdm.lock000066400000000000000000000516361456224025000153410ustar00rootroot00000000000000# This file is @generated by PDM. # It is not intended for manual editing. [metadata] groups = ["default", "base"] strategy = ["cross_platform", "inherit_metadata"] lock_version = "4.4.1" content_hash = "sha256:90aea3cc0ebdf308c2f77b69ee5852aa20f2e807c1302b95da43ecb23ccc73f7" [[package]] name = "cli-ui" version = "0.17.2" requires_python = ">=3.7,<4.0" summary = "Build Nice User Interfaces In The Terminal" groups = ["base"] dependencies = [ "colorama<0.5.0,>=0.4.1", "tabulate<0.9.0,>=0.8.3", "unidecode<2.0.0,>=1.0.23", ] files = [ {file = "cli-ui-0.17.2.tar.gz", hash = "sha256:2f67e50cf474e76ad160c3e660bbad98bf8b8dfb8d847765f3a261b7e13c05fa"}, {file = "cli_ui-0.17.2-py3-none-any.whl", hash = "sha256:6a1ebdbbcd83a0fa06b2f63f4434082a3ba8664aebedd91f1ff86b9e4289d53e"}, ] [[package]] name = "click" version = "8.1.7" requires_python = ">=3.7" summary = "Composable command line interface toolkit" groups = ["base"] dependencies = [ "colorama; platform_system == \"Windows\"", "importlib-metadata; python_version < \"3.8\"", ] files = [ {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, ] [[package]] name = "click-default-group" version = "1.2.4" requires_python = ">=2.7" summary = "click_default_group" groups = ["base"] dependencies = [ "click", ] files = [ {file = "click_default_group-1.2.4-py2.py3-none-any.whl", hash = "sha256:9b60486923720e7fc61731bdb32b617039aba820e22e1c88766b1125592eaa5f"}, {file = "click_default_group-1.2.4.tar.gz", hash = "sha256:eb3f3c99ec0d456ca6cd2a7f08f7d4e91771bef51b01bdd9580cc6450fe1251e"}, ] [[package]] name = "colorama" version = "0.4.6" requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" summary = "Cross-platform colored terminal text." groups = ["base"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] [[package]] name = "contextlib2" version = "21.6.0" requires_python = ">=3.6" summary = "Backports and enhancements for the contextlib module" groups = ["base"] files = [ {file = "contextlib2-21.6.0-py2.py3-none-any.whl", hash = "sha256:3fbdb64466afd23abaf6c977627b75b6139a5a3e8ce38405c5b413aed7a0471f"}, {file = "contextlib2-21.6.0.tar.gz", hash = "sha256:ab1e2bfe1d01d968e1b7e8d9023bc51ef3509bba217bb730cee3827e1ee82869"}, ] [[package]] name = "docopt" version = "0.6.2" summary = "Pythonic argument parser, that will make you smile" groups = ["base"] files = [ {file = "docopt-0.6.2.tar.gz", hash = "sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491"}, ] [[package]] name = "importlib-metadata" version = "6.7.0" requires_python = ">=3.7" summary = "Read metadata from Python packages" groups = ["base"] marker = "python_version < \"3.8\"" dependencies = [ "typing-extensions>=3.6.4; python_version < \"3.8\"", "zipp>=0.5", ] files = [ {file = "importlib_metadata-6.7.0-py3-none-any.whl", hash = "sha256:cb52082e659e97afc5dac71e79de97d8681de3aa07ff18578330904a9d18e5b5"}, {file = "importlib_metadata-6.7.0.tar.gz", hash = "sha256:1aaf550d4f73e5d6783e7acb77aec43d49da8017410afae93822cc9cca98c4d4"}, ] [[package]] name = "importlib-resources" version = "5.12.0" requires_python = ">=3.7" summary = "Read resources from Python packages" groups = ["base"] marker = "python_version < \"3.10\"" dependencies = [ "zipp>=3.1.0; python_version < \"3.10\"", ] files = [ {file = "importlib_resources-5.12.0-py3-none-any.whl", hash = "sha256:7b1deeebbf351c7578e09bf2f63fa2ce8b5ffec296e0d349139d43cca061a81a"}, {file = "importlib_resources-5.12.0.tar.gz", hash = "sha256:4be82589bf5c1d7999aedf2a45159d10cb3ca4f19b2271f8792bc8e6da7b22f6"}, ] [[package]] name = "incremental" version = "22.10.0" summary = "\"A small library that versions your Python projects.\"" groups = ["base"] files = [ {file = "incremental-22.10.0-py2.py3-none-any.whl", hash = "sha256:b864a1f30885ee72c5ac2835a761b8fe8aa9c28b9395cacf27286602688d3e51"}, {file = "incremental-22.10.0.tar.gz", hash = "sha256:912feeb5e0f7e0188e6f42241d2f450002e11bbc0937c65865045854c24c0bd0"}, ] [[package]] name = "jinja2" version = "3.1.3" requires_python = ">=3.7" summary = "A very fast and expressive template engine." groups = ["base"] dependencies = [ "MarkupSafe>=2.0", ] files = [ {file = "Jinja2-3.1.3-py3-none-any.whl", hash = "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa"}, {file = "Jinja2-3.1.3.tar.gz", hash = "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90"}, ] [[package]] name = "markupsafe" version = "2.1.5" requires_python = ">=3.7" summary = "Safely add untrusted strings to HTML/XML markup." groups = ["base"] files = [ {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, ] [[package]] name = "ruff" version = "0.2.1" requires_python = ">=3.7" summary = "An extremely fast Python linter and code formatter, written in Rust." groups = ["base"] files = [ {file = "ruff-0.2.1-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:dd81b911d28925e7e8b323e8d06951554655021df8dd4ac3045d7212ac4ba080"}, {file = "ruff-0.2.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:dc586724a95b7d980aa17f671e173df00f0a2eef23f8babbeee663229a938fec"}, {file = "ruff-0.2.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c92db7101ef5bfc18e96777ed7bc7c822d545fa5977e90a585accac43d22f18a"}, {file = "ruff-0.2.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:13471684694d41ae0f1e8e3a7497e14cd57ccb7dd72ae08d56a159d6c9c3e30e"}, {file = "ruff-0.2.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a11567e20ea39d1f51aebd778685582d4c56ccb082c1161ffc10f79bebe6df35"}, {file = "ruff-0.2.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:00a818e2db63659570403e44383ab03c529c2b9678ba4ba6c105af7854008105"}, {file = "ruff-0.2.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be60592f9d218b52f03384d1325efa9d3b41e4c4d55ea022cd548547cc42cd2b"}, {file = "ruff-0.2.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbd2288890b88e8aab4499e55148805b58ec711053588cc2f0196a44f6e3d855"}, {file = "ruff-0.2.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3ef052283da7dec1987bba8d8733051c2325654641dfe5877a4022108098683"}, {file = "ruff-0.2.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:7022d66366d6fded4ba3889f73cd791c2d5621b2ccf34befc752cb0df70f5fad"}, {file = "ruff-0.2.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:0a725823cb2a3f08ee743a534cb6935727d9e47409e4ad72c10a3faf042ad5ba"}, {file = "ruff-0.2.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:0034d5b6323e6e8fe91b2a1e55b02d92d0b582d2953a2b37a67a2d7dedbb7acc"}, {file = "ruff-0.2.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:e5cb5526d69bb9143c2e4d2a115d08ffca3d8e0fddc84925a7b54931c96f5c02"}, {file = "ruff-0.2.1-py3-none-win32.whl", hash = "sha256:6b95ac9ce49b4fb390634d46d6ece32ace3acdd52814671ccaf20b7f60adb232"}, {file = "ruff-0.2.1-py3-none-win_amd64.whl", hash = "sha256:e3affdcbc2afb6f5bd0eb3130139ceedc5e3f28d206fe49f63073cb9e65988e0"}, {file = "ruff-0.2.1-py3-none-win_arm64.whl", hash = "sha256:efababa8e12330aa94a53e90a81eb6e2d55f348bc2e71adbf17d9cad23c03ee6"}, {file = "ruff-0.2.1.tar.gz", hash = "sha256:3b42b5d8677cd0c72b99fcaf068ffc62abb5a19e71b4a3b9cfa50658a0af02f1"}, ] [[package]] name = "schema" version = "0.7.5" summary = "Simple data validation library" groups = ["base"] dependencies = [ "contextlib2>=0.5.5", ] files = [ {file = "schema-0.7.5-py2.py3-none-any.whl", hash = "sha256:f3ffdeeada09ec34bf40d7d79996d9f7175db93b7a5065de0faa7f41083c1e6c"}, {file = "schema-0.7.5.tar.gz", hash = "sha256:f06717112c61895cabc4707752b88716e8420a8819d71404501e114f91043197"}, ] [[package]] name = "tabulate" version = "0.8.10" requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" summary = "Pretty-print tabular data" groups = ["base"] files = [ {file = "tabulate-0.8.10-py3-none-any.whl", hash = "sha256:0ba055423dbaa164b9e456abe7920c5e8ed33fcc16f6d1b2f2d152c8e1e8b4fc"}, {file = "tabulate-0.8.10.tar.gz", hash = "sha256:6c57f3f3dd7ac2782770155f3adb2db0b1a269637e42f27599925e64b114f519"}, ] [[package]] name = "tbump" version = "6.11.0" requires_python = ">=3.7,<4.0" summary = "Bump software releases" groups = ["base"] dependencies = [ "cli-ui>=0.10.3", "docopt<0.7.0,>=0.6.2", "schema<0.8.0,>=0.7.1", "tomlkit<0.12,>=0.11", ] files = [ {file = "tbump-6.11.0-py3-none-any.whl", hash = "sha256:6b181fe6f3ae84ce0b9af8cc2009a8bca41ded34e73f623a7413b9684f1b4526"}, {file = "tbump-6.11.0.tar.gz", hash = "sha256:385e710eedf0a8a6ff959cf1e9f3cfd17c873617132fc0ec5f629af0c355c870"}, ] [[package]] name = "tomli" version = "2.0.1" requires_python = ">=3.7" summary = "A lil' TOML parser" groups = ["base"] marker = "python_version < \"3.11\"" files = [ {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] [[package]] name = "tomlkit" version = "0.11.8" requires_python = ">=3.7" summary = "Style preserving TOML library" groups = ["base"] files = [ {file = "tomlkit-0.11.8-py3-none-any.whl", hash = "sha256:8c726c4c202bdb148667835f68d68780b9a003a9ec34167b6c673b38eff2a171"}, {file = "tomlkit-0.11.8.tar.gz", hash = "sha256:9330fc7faa1db67b541b28e62018c17d20be733177d290a13b24c62d1614e0c3"}, ] [[package]] name = "towncrier" version = "23.6.0" requires_python = ">=3.7" summary = "Building newsfiles for your project." groups = ["base"] dependencies = [ "click", "click-default-group", "importlib-resources>=5; python_version < \"3.10\"", "incremental", "jinja2", "tomli; python_version < \"3.11\"", ] files = [ {file = "towncrier-23.6.0-py3-none-any.whl", hash = "sha256:da552f29192b3c2b04d630133f194c98e9f14f0558669d427708e203fea4d0a5"}, {file = "towncrier-23.6.0.tar.gz", hash = "sha256:fc29bd5ab4727c8dacfbe636f7fb5dc53b99805b62da1c96b214836159ff70c1"}, ] [[package]] name = "typing-extensions" version = "4.7.1" requires_python = ">=3.7" summary = "Backported and Experimental Type Hints for Python 3.7+" groups = ["base"] marker = "python_version < \"3.8\"" files = [ {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, ] [[package]] name = "unidecode" version = "1.3.8" requires_python = ">=3.5" summary = "ASCII transliterations of Unicode text" groups = ["base"] files = [ {file = "Unidecode-1.3.8-py3-none-any.whl", hash = "sha256:d130a61ce6696f8148a3bd8fe779c99adeb4b870584eeb9526584e9aa091fd39"}, {file = "Unidecode-1.3.8.tar.gz", hash = "sha256:cfdb349d46ed3873ece4586b96aa75258726e2fa8ec21d6f00a591d98806c2f4"}, ] [[package]] name = "zipp" version = "3.15.0" requires_python = ">=3.7" summary = "Backport of pathlib-compatible object wrapper for zip files" groups = ["base"] marker = "python_version < \"3.10\"" files = [ {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"}, {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"}, ] asv_runner-0.2.1/pyproject.toml000066400000000000000000000042701456224025000166130ustar00rootroot00000000000000[project] name = "asv_runner" description = "Core Python benchmark code for ASV" authors = [ { name = "Rohit Goswami", email = "rog32@hi.is" }, { name = "Michael Droettboom", email = "mdroe@stsci.edu" }, ] maintainers = [ { name = "Rohit Goswami", email = "rog32@hi.is" }, ] classifiers = [ "Development Status :: 4 - Beta", "Programming Language :: Python", "License :: OSI Approved :: BSD License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Topic :: System :: Benchmark", ] dependencies = [ "importlib-metadata", # This is OK, it was vendored from 3.8 ] # Make sure there's nothing (or almost nothing) here requires-python = ">=3.7" readme = "README.md" license = {text = "BSD-3-Clause"} dynamic = ["version"] [project.optional-dependencies] docs = [ "furo", "sphinx", "sphinx-contributors", "sphinx-copybutton", "sphinx-design", "sphinx-autobuild", "sphinxcontrib-spelling", "myst-parser>=2", # Needs py38 "sphinx-autodoc2>=0.4.2", # Needs py38 ] [build-system] requires = ["pdm-backend"] build-backend = "pdm.backend" [tool.ruff] line-length = 88 extend-ignore = [ "E741", # Do not use variables named "I", "O", or "l" ] select = [ "E", # pycodestyle "F", # pyflakes "UP", # pyupgrade "I001", # isort ] [tool.pdm] [tool.pdm.dev-dependencies] base = [ "towncrier>=23.6.0", "tbump>=6.11.0", "ruff>=0.2.1", ] [tool.pdm.scripts] lint = {composite = ["ruff --fix --unsafe-fixes asv_runner/", "ruff format asv_runner/"]} mkdoc = {composite = ["sphinx-build -b html docs/source docs/build/html"]} [tool.pdm.version] source = "scm" write_to = "asv_runner/__init__.py" write_template = "__version__ = '{}'" [project.urls] homepage = "https://asv.readthedocs.io/projects/asv-runner/en/latest/" repository = "https://github.com/airspeed-velocity/asv_runner" documentation = "https://asv.readthedocs.io/projects/asv-runner/en/latest/" asv_runner-0.2.1/tbump.toml000066400000000000000000000021631456224025000157220ustar00rootroot00000000000000# Uncomment this if your project is hosted on GitHub: github_url = "https://github.com/airspeed-velocity/asv_runner/" [version] current = "0.2.1" # Example of a semver regexp. # Make sure this matches current_version before # using tbump regex = ''' (?P\d+) \. (?P\d+) \. (?P\d+) ''' [git] message_template = "REL: Bump version to v{new_version}" tag_template = "v{new_version}" # For each file to patch, add a [[file]] config # section containing the path of the file, relative to the # tbump.toml location. [[file]] src = "docs/source/conf.py" # You can specify a list of commands to # run after the files have been patched # and before the git commit is made [[before_commit]] name = "pre-commit" cmd = "pipx run pre-commit run -a" [[before_commit]] name = "generate-release-notes" cmd = 'towncrier build --version {new_version} --date "$(date -u +%d-%m-%Y)"' # [[before_commit]] # name = "check changelog" # cmd = "grep -q {new_version} Changelog.rst" # Or run some commands after the git tag and the branch # have been pushed: # [[after_push]] # name = "publish" # cmd = "./publish.sh" asv_runner-0.2.1/towncrier.toml000066400000000000000000000014301456224025000166030ustar00rootroot00000000000000[tool.towncrier] package = "asv_runner" package_dir = "asv_runner" all_bullets = false wrap = true directory = "changelog.d" filename = "CHANGELOG.md" start_string = "\n" underlines = ["", "", ""] title_format = "## [{version}](https://github.com/airspeed-velocity/asv_runner/tree/{version}) - {project_date}" issue_format = "[#{issue}](https://github.com/airspeed-velocity/asv_runner/issues/{issue})" [[tool.towncrier.type]] directory = "feat" name = "New Features" showcontent = true [[tool.towncrier.type]] directory = "api" name = "API Changes" showcontent = true [[tool.towncrier.type]] directory = "bugfix" name = "Bug Fixes" showcontent = true [[tool.towncrier.type]] directory = "misc" name = "Other Changes and Additions" showcontent = true