pax_global_header00006660000000000000000000000064141760064010014511gustar00rootroot0000000000000052 comment=0b554d6968cc345a0fad15338d2d4abd40e994a5 threadpoolctl-3.1.0/000077500000000000000000000000001417600640100143565ustar00rootroot00000000000000threadpoolctl-3.1.0/.azure_pipeline.yml000066400000000000000000000126401417600640100201750ustar00rootroot00000000000000# Adapted from https://github.com/pandas-dev/pandas/blob/master/azure-pipelines.yml # Global variables for all jobs variables: VIRTUALENV: 'testvenv' JUNITXML: 'test-data.xml' CODECOV_TOKEN: 'cee0e505-c12e-4139-aa43-621fb16a2347' schedules: - cron: "0 1 * * *" # 1am UTC displayName: Run nightly build branches: include: - master always: true stages: - stage: jobs: - template: continuous_integration/windows.yml parameters: name: Windows vmImage: windows-latest matrix: pylatest_conda_forge_mkl: VERSION_PYTHON: '*' PACKAGER: 'conda-forge' BLAS: 'mkl' py39_conda_forge_openblas: VERSION_PYTHON: '3.9' PACKAGER: 'conda-forge' BLAS: 'openblas' py37_conda: VERSION_PYTHON: '3.7' PACKAGER: 'conda' py36_pip: VERSION_PYTHON: '3.6' PACKAGER: 'pip' - template: continuous_integration/posix.yml parameters: name: Linux vmImage: ubuntu-20.04 matrix: # Linux environment to test that packages that comes with Ubuntu 20.04 # are correctly handled. py38_ubuntu_atlas_gcc_gcc: PACKAGER: 'ubuntu' APT_BLAS: 'libatlas3-base libatlas-base-dev' VERSION_PYTHON: '3.8' CC_OUTER_LOOP: 'gcc' CC_INNER_LOOP: 'gcc' py36_ubuntu_openblas_gcc_gcc: PACKAGER: 'ubuntu' APT_BLAS: 'libopenblas-base libopenblas-dev' VERSION_PYTHON: '3.8' CC_OUTER_LOOP: 'gcc' CC_INNER_LOOP: 'gcc' # Linux + Python 3.7 and homogeneous runtime nesting. py37_conda_openblas_clang_clang: PACKAGER: 'conda' VERSION_PYTHON: '3.7' BLAS: 'openblas' CC_OUTER_LOOP: 'clang-10' CC_INNER_LOOP: 'clang-10' # Linux environment with MKL and Clang (known to be unsafe for # threadpoolctl) to only test the warning from multiple OpenMP. pylatest_conda_mkl_clang_gcc: PACKAGER: 'conda' VERSION_PYTHON: '*' BLAS: 'mkl' CC_OUTER_LOOP: 'clang-10' CC_INNER_LOOP: 'gcc' TESTS: 'libomp_libiomp_warning' # Linux environment with MKL, safe for threadpoolctl. pylatest_conda_mkl_gcc_gcc: PACKAGER: 'conda' VERSION_PYTHON: '*' BLAS: 'mkl' CC_OUTER_LOOP: 'gcc' CC_INNER_LOOP: 'gcc' MKL_THREADING_LAYER: 'INTEL' # Linux + Python 3.8 with numpy / scipy installed with pip from PyPI # and heterogeneous OpenMP runtimes. py38_pip_openblas_gcc_clang: PACKAGER: 'pip' VERSION_PYTHON: '3.8' CC_OUTER_LOOP: 'gcc' CC_INNER_LOOP: 'clang-10' # Linux environment with numpy from conda-forge channel and openblas-openmp pylatest_conda_forge: PACKAGER: 'conda-forge' VERSION_PYTHON: '*' BLAS: 'openblas' OPENBLAS_THREADING_LAYER: 'openmp' CC_OUTER_LOOP: 'gcc' CC_INNER_LOOP: 'gcc' LINT: 'true' # Linux environment with no numpy and heterogeneous OpenMP runtimes. pylatest_conda_nonumpy_gcc_clang: PACKAGER: 'conda' NO_NUMPY: 'true' VERSION_PYTHON: '*' CC_OUTER_LOOP: 'gcc' CC_INNER_LOOP: 'clang-10' # Linux environment with numpy linked to BLIS pylatest_blis_gcc_clang_openmp: PACKAGER: 'conda' VERSION_PYTHON: '*' INSTALL_BLIS: 'true' BLIS_NUM_THREADS: '4' CC_OUTER_LOOP: 'gcc' CC_INNER_LOOP: 'gcc' BLIS_CC: 'clang-10' BLIS_ENABLE_THREADING: 'openmp' pylatest_blis_clang_gcc_pthreads: PACKAGER: 'conda' VERSION_PYTHON: '*' INSTALL_BLIS: 'true' BLIS_NUM_THREADS: '4' CC_OUTER_LOOP: 'clang-10' CC_INNER_LOOP: 'clang-10' BLIS_CC: 'gcc-8' BLIS_ENABLE_THREADING: 'pthreads' pylatest_blis_no_threading: PACKAGER: 'conda' VERSION_PYTHON: '*' INSTALL_BLIS: 'true' BLIS_NUM_THREADS: '1' CC_OUTER_LOOP: 'gcc' CC_INNER_LOOP: 'gcc' BLIS_CC: 'gcc-8' BLIS_ENABLE_THREADING: 'no' - template: continuous_integration/posix.yml parameters: name: macOS vmImage: macOS-10.15 matrix: # MacOS environment with OpenMP installed through homebrew py36_conda_homebrew_libomp: PACKAGER: 'conda' VERSION_PYTHON: '3.6' BLAS: 'openblas' CC_OUTER_LOOP: 'clang' CC_INNER_LOOP: 'clang' INSTALL_LIBOMP: 'homebrew' # MacOS environment with OpenMP installed through conda-forge compilers pylatest_conda_forge_clang: PACKAGER: 'conda-forge' VERSION_PYTHON: '*' BLAS: 'mkl' CC_OUTER_LOOP: 'clang' CC_INNER_LOOP: 'clang' INSTALL_LIBOMP: 'conda-forge' - stage: jobs: # Meta-test to ensure that at least of the above CI configurations had # the necessary platform settings to execute each test without raising # skipping. - job: 'no_test_always_skipped' displayName: 'No test always skipped' pool: vmImage: ubuntu-20.04 steps: - download: current - script: | python continuous_integration/check_no_test_skipped.py $(Pipeline.Workspace) displayName: 'No test always skipped' threadpoolctl-3.1.0/.codecov.yml000066400000000000000000000000161417600640100165760ustar00rootroot00000000000000comment: off threadpoolctl-3.1.0/.coveragerc000066400000000000000000000000341417600640100164740ustar00rootroot00000000000000[run] source=threadpoolctl threadpoolctl-3.1.0/.gitignore000066400000000000000000000003771417600640100163550ustar00rootroot00000000000000# Python and Cython generated files *.pyc __pycache__ .cache .pytest_cache *.so *.dylib *.c # Python install files, build and release artifacts *.egg-info/ build dist # Coverage data .coverage /htmlcov # Developer tools .vscode # pytest .pytest_cache threadpoolctl-3.1.0/CHANGES.md000066400000000000000000000073201417600640100157520ustar00rootroot000000000000003.1.0 (2022-01-31) ================== - Fixed a detection issue of the BLAS libraires packaged by conda-forge on Windows. https://github.com/joblib/threadpoolctl/pull/112 - `threadpool_limits` and `ThreadpoolController.limit` now accept the string "sequential_blas_under_openmp" for the `limits` parameter. It should only be used for the specific case when one wants to have sequential BLAS calls within an OpenMP parallel region. It takes into account the unexpected behavior of OpenBLAS with the OpenMP threading layer. https://github.com/joblib/threadpoolctl/pull/114 3.0.0 (2021-10-01) ================== - New object `threadpooctl.ThreadpoolController` which holds controllers for all the supported native libraries. The states of these libraries is accessible through the `info` method (equivalent to `threadpoolctl.threadpool_info()`) and their number of threads can be limited with the `limit` method which can be used as a context manager (equivalent to `threadpoolctl.threadpool_limits()`). This is especially useful to avoid searching through all loaded shared libraries each time. https://github.com/joblib/threadpoolctl/pull/95 - Added support for OpenBLAS built for 64bit integers in Fortran. https://github.com/joblib/threadpoolctl/pull/101 - Added the possibility to use `threadpoolctl.threadpool_limits` and `threadpooctl.ThreadpoolController` as decorators through their `wrap` method. https://github.com/joblib/threadpoolctl/pull/102 - Fixed an attribute error when using old versions of OpenBLAS or BLIS that are missing version query functions. https://github.com/joblib/threadpoolctl/pull/88 https://github.com/joblib/threadpoolctl/pull/91 - Fixed an attribute error when python is run with -OO. https://github.com/joblib/threadpoolctl/pull/87 2.2.0 (2021-07-09) ================== - `threadpoolctl.threadpool_info()` now reports the architecture of the CPU cores detected by OpenBLAS (via `openblas_get_corename`) and BLIS (via `bli_arch_query_id` and `bli_arch_string`). - Fixed a bug when the version of MKL was not found. The "version" field is now set to None in that case. https://github.com/joblib/threadpoolctl/pull/82 2.1.0 (2020-05-29) ================== - New commandline interface: python -m threadpoolctl -i numpy will try to import the `numpy` package and then return the output of `threadpoolctl.threadpool_info()` on STDOUT formatted using the JSON syntax. This makes it easier to quickly introspect a Python environment. 2.0.0 (2019-12-05) ================== - Expose MKL, BLIS and OpenBLAS threading layer in information displayed by `threadpool_info`. This information is referenced in the `threading_layer` field. https://github.com/joblib/threadpoolctl/pull/48 https://github.com/joblib/threadpoolctl/pull/60 - When threadpoolctl finds libomp (LLVM OpenMP) and libiomp (Intel OpenMP) both loaded, a warning is raised to recall that using threadpoolctl with this mix of OpenMP libraries may cause crashes or deadlocks. https://github.com/joblib/threadpoolctl/pull/49 1.1.0 (2019-09-12) ================== - Detect libraries referenced by symlinks (e.g. BLAS libraries from conda-forge). https://github.com/joblib/threadpoolctl/pull/34 - Add support for BLIS. https://github.com/joblib/threadpoolctl/pull/23 - Breaking change: method `get_original_num_threads` on the `threadpool_limits` context manager to cheaply access the initial state of the runtime: - drop the `user_api` parameter; - instead return a dict `{user_api: num_threads}`; - fixed a bug when the limit parameter of `threadpool_limits` was set to `None`. https://github.com/joblib/threadpoolctl/pull/32 1.0.0 (2019-06-03) ================== Initial release. threadpoolctl-3.1.0/LICENSE000066400000000000000000000027431417600640100153710ustar00rootroot00000000000000Copyright (c) 2019, threadpoolctl contributors Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.threadpoolctl-3.1.0/README.md000066400000000000000000000204711417600640100156410ustar00rootroot00000000000000# Thread-pool Controls [![Build Status](https://dev.azure.com/joblib/threadpoolctl/_apis/build/status/joblib.threadpoolctl?branchName=master)](https://dev.azure.com/joblib/threadpoolctl/_build/latest?definitionId=1&branchName=master) [![codecov](https://codecov.io/gh/joblib/threadpoolctl/branch/master/graph/badge.svg)](https://codecov.io/gh/joblib/threadpoolctl) Python helpers to limit the number of threads used in the threadpool-backed of common native libraries used for scientific computing and data science (e.g. BLAS and OpenMP). Fine control of the underlying thread-pool size can be useful in workloads that involve nested parallelism so as to mitigate oversubscription issues. ## Installation - For users, install the last published version from PyPI: ```bash pip install threadpoolctl ``` - For contributors, install from the source repository in developer mode: ```bash pip install -r dev-requirements.txt flit install --symlink ``` then you run the tests with pytest: ```bash pytest ``` ## Usage ### Command Line Interface Get a JSON description of thread-pools initialized when importing python packages such as numpy or scipy for instance: ``` python -m threadpoolctl -i numpy scipy.linalg [ { "filepath": "/home/ogrisel/miniconda3/envs/tmp/lib/libmkl_rt.so", "prefix": "libmkl_rt", "user_api": "blas", "internal_api": "mkl", "version": "2019.0.4", "num_threads": 2, "threading_layer": "intel" }, { "filepath": "/home/ogrisel/miniconda3/envs/tmp/lib/libiomp5.so", "prefix": "libiomp", "user_api": "openmp", "internal_api": "openmp", "version": null, "num_threads": 4 } ] ``` The JSON information is written on STDOUT. If some of the packages are missing, a warning message is displayed on STDERR. ### Python Runtime Programmatic Introspection Introspect the current state of the threadpool-enabled runtime libraries that are loaded when importing Python packages: ```python >>> from threadpoolctl import threadpool_info >>> from pprint import pprint >>> pprint(threadpool_info()) [] >>> import numpy >>> pprint(threadpool_info()) [{'filepath': '/home/ogrisel/miniconda3/envs/tmp/lib/libmkl_rt.so', 'internal_api': 'mkl', 'num_threads': 2, 'prefix': 'libmkl_rt', 'threading_layer': 'intel', 'user_api': 'blas', 'version': '2019.0.4'}, {'filepath': '/home/ogrisel/miniconda3/envs/tmp/lib/libiomp5.so', 'internal_api': 'openmp', 'num_threads': 4, 'prefix': 'libiomp', 'user_api': 'openmp', 'version': None}] >>> import xgboost >>> pprint(threadpool_info()) [{'filepath': '/home/ogrisel/miniconda3/envs/tmp/lib/libmkl_rt.so', 'internal_api': 'mkl', 'num_threads': 2, 'prefix': 'libmkl_rt', 'threading_layer': 'intel', 'user_api': 'blas', 'version': '2019.0.4'}, {'filepath': '/home/ogrisel/miniconda3/envs/tmp/lib/libiomp5.so', 'internal_api': 'openmp', 'num_threads': 4, 'prefix': 'libiomp', 'user_api': 'openmp', 'version': None}, {'filepath': '/home/ogrisel/miniconda3/envs/tmp/lib/libgomp.so.1.0.0', 'internal_api': 'openmp', 'num_threads': 4, 'prefix': 'libgomp', 'user_api': 'openmp', 'version': None}] ``` In the above example, `numpy` was installed from the default anaconda channel and comes with MKL and its Intel OpenMP (`libiomp5`) implementation while `xgboost` was installed from pypi.org and links against GNU OpenMP (`libgomp`) so both OpenMP runtimes are loaded in the same Python program. The state of these libraries is also accessible through the object oriented API: ```python >>> from threadpoolctl import ThreadpoolController, threadpool_info >>> from pprint import pprint >>> import numpy >>> controller = ThreadpoolController() >>> pprint(controller.info()) [{'architecture': 'Haswell', 'filepath': '/home/jeremie/miniconda/envs/dev/lib/libopenblasp-r0.3.17.so', 'internal_api': 'openblas', 'num_threads': 4, 'prefix': 'libopenblas', 'threading_layer': 'pthreads', 'user_api': 'blas', 'version': '0.3.17'}] >>> controller.info() == threadpool_info() True ``` ### Setting the Maximum Size of Thread-Pools Control the number of threads used by the underlying runtime libraries in specific sections of your Python program: ```python >>> from threadpoolctl import threadpool_limits >>> import numpy as np >>> with threadpool_limits(limits=1, user_api='blas'): ... # In this block, calls to blas implementation (like openblas or MKL) ... # will be limited to use only one thread. They can thus be used jointly ... # with thread-parallelism. ... a = np.random.randn(1000, 1000) ... a_squared = a @ a ``` The threadpools can also be controlled via the object oriented API, which is especially useful to avoid searching through all the loaded shared libraries each time. It will however not act on libraries loaded after the instantiation of the `ThreadpoolController`: ```python >>> from threadpoolctl import ThreadpoolController >>> import numpy as np >>> controller = ThreadpoolController() >>> with controller.limit(limits=1, user_api='blas'): ... a = np.random.randn(1000, 1000) ... a_squared = a @ a ``` ### Restricting the limits to the scope of a function `threadpool_limits` and `ThreadpoolController` can also be used as decorators to set the maximum number of threads used by the supported libraries at a function level. The decorators are accessible through their `wrap` method: ```python >>> from threadpoolctl import ThreadpoolController, threadpool_limits >>> import numpy as np >>> controller = ThreadpoolController() >>> @controller.wrap(limits=1, user_api='blas') ... # or @threadpool_limits.wrap(limits=1, user_api='blas') ... def my_func(): ... # Inside this function, calls to blas implementation (like openblas or MKL) ... # will be limited to use only one thread. ... a = np.random.randn(1000, 1000) ... a_squared = a @ a ... ``` ### Sequential BLAS within OpenMP parallel region When one wants to have sequential BLAS calls within an OpenMP parallel region, it's safer to set `limits="sequential_blas_under_openmp"` since setting `limits=1` and `user_api="blas"` might not lead to the expected behavior in some configurations (e.g. OpenBLAS with the OpenMP threading layer https://github.com/xianyi/OpenBLAS/issues/2985). ### Known Limitations - `threadpool_limits` can fail to limit the number of inner threads when nesting parallel loops managed by distinct OpenMP runtime implementations (for instance libgomp from GCC and libomp from clang/llvm or libiomp from ICC). See the `test_openmp_nesting` function in [tests/test_threadpoolctl.py]( https://github.com/joblib/threadpoolctl/blob/master/tests/test_threadpoolctl.py) for an example. More information can be found at: https://github.com/jeremiedbb/Nested_OpenMP Note however that this problem does not happen when `threadpool_limits` is used to limit the number of threads used internally by BLAS calls that are themselves nested under OpenMP parallel loops. `threadpool_limits` works as expected, even if the inner BLAS implementation relies on a distinct OpenMP implementation. - Using Intel OpenMP (ICC) and LLVM OpenMP (clang) in the same Python program under Linux is known to cause problems. See the following guide for more details and workarounds: https://github.com/joblib/threadpoolctl/blob/master/multiple_openmp.md - Setting the maximum number of threads of the OpenMP and BLAS libraries has a global effect and impacts the whole Python process. There is no thread level isolation as these libraries do not offer thread-local APIs to configure the number of threads to use in nested parallel calls. ## Maintainers To make a release: Bump the version number (`__version__`) in `threadpoolctl.py`. Build the distribution archives: ```bash pip install flit flit build ``` Check the contents of `dist/`. If everything is fine, make a commit for the release, tag it, push the tag to github and then: ```bash flit publish ``` ### Credits The initial dynamic library introspection code was written by @anton-malakhov for the smp package available at https://github.com/IntelPython/smp . threadpoolctl extends this for other operating systems. Contrary to smp, threadpoolctl does not attempt to limit the size of Python multiprocessing pools (threads or processes) or set operating system-level CPU affinity constraints: threadpoolctl only interacts with native libraries via their public runtime APIs. threadpoolctl-3.1.0/benchmarks/000077500000000000000000000000001417600640100164735ustar00rootroot00000000000000threadpoolctl-3.1.0/benchmarks/bench_context_manager_overhead.py000066400000000000000000000015361417600640100252440ustar00rootroot00000000000000import time from argparse import ArgumentParser from pprint import pprint from statistics import mean, stdev from threadpoolctl import threadpool_info, threadpool_limits parser = ArgumentParser(description="Measure threadpool_limits call overhead.") parser.add_argument( "--import", dest="packages", default=[], nargs="+", help="Python packages to import to load threadpool enabled libraries.", ) parser.add_argument("--n-calls", type=int, default=100, help="Number of iterations") args = parser.parse_args() for package_name in args.packages: __import__(package_name) pprint(threadpool_info()) timings = [] for _ in range(args.n_calls): t = time.time() with threadpool_limits(limits=1): pass timings.append(time.time() - t) print(f"Overhead per call: {mean(timings) * 1e3:.3f} +/-{stdev(timings) * 1e3:.3f} ms") threadpoolctl-3.1.0/conftest.py000066400000000000000000000000571417600640100165570ustar00rootroot00000000000000collect_ignore = ["tests/_openmp_test_helper"] threadpoolctl-3.1.0/continuous_integration/000077500000000000000000000000001417600640100211675ustar00rootroot00000000000000threadpoolctl-3.1.0/continuous_integration/build_test_ext.sh000077500000000000000000000004501417600640100245430ustar00rootroot00000000000000#!/bin/bash set -e pushd tests/_openmp_test_helper rm -rf *.c *.so *.dylib build/ python setup_inner.py build_ext -i python setup_outer.py build_ext -i # skip scipy required extension if no numpy if [[ "$NO_NUMPY" != "true" ]]; then python setup_nested_prange_blas.py build_ext -i fi popd threadpoolctl-3.1.0/continuous_integration/check_no_test_skipped.py000066400000000000000000000025071417600640100260740ustar00rootroot00000000000000"""Check tests are not skipped in every ci job""" from __future__ import print_function import os import sys import xml.etree.ElementTree as ET base_dir = sys.argv[1] # dict {test: result} where result is False if the test was skipped in every # job and True otherwise. always_skipped = {} for name in os.listdir(base_dir): # all test result files are in /base_dir/jobs.*/ dirs if name.startswith("stage1."): print("> processing test result from job", name.replace("stage1", "")) print(" > tests skipped:") result_file = os.path.join(base_dir, name, "test-data.xml") root = ET.parse(result_file).getroot() # All tests are identified by the xml tag testcase. for test in root.iter("testcase"): test_name = test.attrib["name"] skipped = any(child.tag == "skipped" for child in test) if skipped: print(" -", test_name) if test_name in always_skipped: always_skipped[test_name] &= skipped else: always_skipped[test_name] = skipped print("\n------------------------------------------------------------------\n") fail = False for test, skipped in always_skipped.items(): if skipped: fail = True print(test, "was skipped in every job") if fail: sys.exit(1) threadpoolctl-3.1.0/continuous_integration/install.cmd000066400000000000000000000022471417600640100233270ustar00rootroot00000000000000@rem https://github.com/numba/numba/blob/master/buildscripts/incremental/setup_conda_environment.cmd @rem The cmd /C hack circumvents a regression where conda installs a conda.bat @rem script in non-root environments. set CONDA_INSTALL=cmd /C conda install -q -y set PIP_INSTALL=pip install -q @echo on @rem Deactivate any environment call deactivate @rem Clean up any left-over from a previous build and install version of python conda remove --all -q -y -n %VIRTUALENV% conda create -n %VIRTUALENV% -q -y python=%VERSION_PYTHON% call activate %VIRTUALENV% python -m pip install -U pip python --version pip --version @rem Install dependencies with either conda or pip. set TO_INSTALL=numpy scipy cython pytest if "%PACKAGER%" == "conda" (%CONDA_INSTALL% %TO_INSTALL%) if "%PACKAGER%" == "conda-forge" (%CONDA_INSTALL% -c conda-forge %TO_INSTALL% blas[build=%BLAS%]) if "%PACKAGER%" == "pip" (%PIP_INSTALL% %TO_INSTALL%) @rem Install extra developer dependencies pip install -q -r dev-requirements.txt @rem Install package flit install --symlink @rem Build the cython test helper for openmp bash ./continuous_integration/build_test_ext.sh if %errorlevel% neq 0 exit /b %errorlevel% threadpoolctl-3.1.0/continuous_integration/install.sh000077500000000000000000000057641417600640100232100ustar00rootroot00000000000000#!/bin/bash set -e UNAMESTR=`uname` if [[ "$CC_OUTER_LOOP" == "clang-10" || "$CC_INNER_LOOP" == "clang-10" ]]; then # Assume Ubuntu: install a recent version of clang and libomp wget https://apt.llvm.org/llvm.sh chmod +x llvm.sh sudo ./llvm.sh 10 sudo apt-get install libomp-dev fi make_conda() { TO_INSTALL="$@" if [[ "$UNAMESTR" == "Darwin" ]]; then if [[ "$INSTALL_LIBOMP" == "conda-forge" ]]; then # Install an OpenMP-enabled clang/llvm from conda-forge # assumes conda-forge is set on priority channel TO_INSTALL="$TO_INSTALL compilers llvm-openmp" export CFLAGS="$CFLAGS -I$CONDA/envs/$VIRTUALENV/include" export LDFLAGS="$LDFLAGS -Wl,-rpath,$CONDA/envs/$VIRTUALENV/lib -L$CONDA/envs/$VIRTUALENV/lib" elif [[ "$INSTALL_LIBOMP" == "homebrew" ]]; then # Install a compiler with a working openmp HOMEBREW_NO_AUTO_UPDATE=1 brew install libomp # enable OpenMP support for Apple-clang export CC=/usr/bin/clang export CPPFLAGS="$CPPFLAGS -Xpreprocessor -fopenmp" export CFLAGS="$CFLAGS -I/usr/local/opt/libomp/include" export LDFLAGS="$LDFLAGS -Wl,-rpath,/usr/local/opt/libomp/lib -L/usr/local/opt/libomp/lib -lomp" fi fi conda create -n $VIRTUALENV -q --yes $TO_INSTALL source activate $VIRTUALENV } if [[ "$PACKAGER" == "conda" ]]; then TO_INSTALL="python=$VERSION_PYTHON pip" if [[ "$NO_NUMPY" != "true" ]]; then TO_INSTALL="$TO_INSTALL numpy scipy blas[build=$BLAS]" fi make_conda $TO_INSTALL elif [[ "$PACKAGER" == "conda-forge" ]]; then conda config --prepend channels conda-forge conda config --set channel_priority strict TO_INSTALL="python=$VERSION_PYTHON numpy scipy blas[build=$BLAS]" if [[ "$BLAS" == "openblas" && "$OPENBLAS_THREADING_LAYER" == "openmp" ]]; then TO_INSTALL="$TO_INSTALL libopenblas=*=*openmp*" fi make_conda $TO_INSTALL elif [[ "$PACKAGER" == "pip" ]]; then # Use conda to build an empty python env and then use pip to install # numpy and scipy TO_INSTALL="python=$VERSION_PYTHON pip" make_conda $TO_INSTALL if [[ "$NO_NUMPY" != "true" ]]; then pip install numpy scipy fi elif [[ "$PACKAGER" == "ubuntu" ]]; then # Remove the ubuntu toolchain PPA that seems to be invalid: # https://github.com/scikit-learn/scikit-learn/pull/13934 sudo add-apt-repository --remove ppa:ubuntu-toolchain-r/test sudo apt-get update sudo apt-get install python3-scipy python3-virtualenv $APT_BLAS python3 -m virtualenv --system-site-packages --python=python3 $VIRTUALENV source $VIRTUALENV/bin/activate fi python -m pip install -q -r dev-requirements.txt bash ./continuous_integration/build_test_ext.sh python --version python -c "import numpy; print(f'numpy {numpy.__version__}')" || echo "no numpy" python -c "import scipy; print(f'scipy {scipy.__version__}')" || echo "no scipy" python -m flit install --symlink threadpoolctl-3.1.0/continuous_integration/install_with_blis.sh000077500000000000000000000025611417600640100252440ustar00rootroot00000000000000#!/bin/bash set -e pushd .. ABS_PATH=$(pwd) popd # Install a recent version of clang and libomp wget https://apt.llvm.org/llvm.sh chmod +x llvm.sh sudo ./llvm.sh 10 sudo apt-get install libomp-dev # create conda env conda create -n $VIRTUALENV -q --yes python=$VERSION_PYTHON pip cython source activate $VIRTUALENV if [[ "$BLIS_CC" == "gcc-8" ]]; then sudo apt install gcc-8 fi pushd .. # build & install blis mkdir BLIS_install git clone https://github.com/flame/blis.git pushd blis ./configure --prefix=$ABS_PATH/BLIS_install --enable-cblas --enable-threading=$BLIS_ENABLE_THREADING CC=$BLIS_CC auto make -j4 make install popd # build & install numpy git clone https://github.com/numpy/numpy.git pushd numpy git submodule update --init echo "[blis] libraries = blis library_dirs = $ABS_PATH/BLIS_install/lib include_dirs = $ABS_PATH/BLIS_install/include/blis runtime_library_dirs = $ABS_PATH/BLIS_install/lib" > site.cfg python setup.py build_ext -i pip install -e . popd popd python -m pip install -q -r dev-requirements.txt CFLAGS=-I$ABS_PATH/BLIS_install/include/blis LDFLAGS=-L$ABS_PATH/BLIS_install/lib \ bash ./continuous_integration/build_test_ext.sh python --version python -c "import numpy; print(f'numpy {numpy.__version__}')" || echo "no numpy" python -c "import scipy; print(f'scipy {scipy.__version__}')" || echo "no scipy" python -m flit install --symlink threadpoolctl-3.1.0/continuous_integration/posix.yml000066400000000000000000000033741417600640100230630ustar00rootroot00000000000000parameters: name: '' vmImage: '' matrix: [] jobs: - job: ${{ parameters.name }} pool: vmImage: ${{ parameters.vmImage }} strategy: matrix: ${{ insert }}: ${{ parameters.matrix }} steps: - bash: echo "##vso[task.prependpath]$CONDA/bin" displayName: Add conda to PATH condition: or(startsWith(variables['PACKAGER'], 'conda'), eq(variables['PACKAGER'], 'pip')) - bash: sudo chown -R $USER $CONDA # On Hosted macOS, the agent user doesn't have ownership of Miniconda's installation directory/ # We need to take ownership if we want to update conda or install packages globally displayName: Take ownership of conda installation condition: eq('${{ parameters.name }}', 'macOS') - script: | conda create -n tmp -y -c conda-forge python black source activate tmp black --check . conda deactivate displayName: Lint condition: eq(variables['LINT'], 'true') - script: | continuous_integration/install.sh displayName: 'Install without BLIS' condition: ne(variables['INSTALL_BLIS'], 'true') - script: | continuous_integration/install_with_blis.sh displayName: 'Install with BLIS' condition: eq(variables['INSTALL_BLIS'], 'true') - script: | continuous_integration/test_script.sh displayName: 'Test Library' - task: PublishTestResults@2 inputs: testResultsFiles: '$(JUNITXML)' testRunTitle: ${{ format('{0}-$(Agent.JobName)', parameters.name) }} displayName: 'Publish Test Results' condition: succeededOrFailed() - publish: $(JUNITXML) - script: | bash continuous_integration/upload_codecov.sh displayName: 'Upload to codecov' condition: succeeded() threadpoolctl-3.1.0/continuous_integration/test_script.cmd000066400000000000000000000004161417600640100242200ustar00rootroot00000000000000call activate %VIRTUALENV% # Use the CLI to display the effective runtime environment prior to # launching the tests: python -m threadpoolctl -i numpy scipy.linalg tests._openmp_test_helper.openmp_helpers_inner pytest -vlrxXs --junitxml=%JUNITXML% --cov=threadpoolctl threadpoolctl-3.1.0/continuous_integration/test_script.sh000077500000000000000000000010741417600640100240730ustar00rootroot00000000000000#!/bin/bash set -e if [[ "$PACKAGER" == conda* ]]; then source activate $VIRTUALENV elif [[ "$PACKAGER" == "pip" ]]; then # we actually use conda to install the base environment: source activate $VIRTUALENV elif [[ "$PACKAGER" == "ubuntu" ]]; then source $VIRTUALENV/bin/activate fi set -x # Use the CLI to display the effective runtime environment prior to # launching the tests: python -m threadpoolctl -i numpy scipy.linalg tests._openmp_test_helper.openmp_helpers_inner pytest -vlrxXs -W error -k "$TESTS" --junitxml=$JUNITXML --cov=threadpoolctl threadpoolctl-3.1.0/continuous_integration/upload_codecov.sh000077500000000000000000000004571417600640100245220ustar00rootroot00000000000000#!/bin/bash set -e if [[ "$PACKAGER" == "conda" ]]; then source activate $VIRTUALENV elif [[ "$PACKAGER" == "pip" ]]; then source activate $VIRTUALENV elif [[ "$PACKAGER" == "ubuntu" ]]; then source $VIRTUALENV/bin/activate fi pip install codecov codecov || echo "codecov upload failed" threadpoolctl-3.1.0/continuous_integration/windows.yml000066400000000000000000000016401417600640100234050ustar00rootroot00000000000000 parameters: name: '' vmImage: '' matrix: [] jobs: - job: ${{ parameters.name }} pool: vmImage: ${{ parameters.vmImage }} strategy: matrix: ${{ insert }}: ${{ parameters.matrix }} steps: - powershell: Write-Host "##vso[task.prependpath]$env:CONDA\Scripts" displayName: Add conda to PATH - script: | continuous_integration\\install.cmd displayName: 'Install' - script: | continuous_integration\\test_script.cmd displayName: 'Test threadpoolctl' - task: PublishTestResults@2 inputs: testResultsFiles: '$(JUNITXML)' testRunTitle: ${{ format('{0}-$(Agent.JobName)', parameters.name) }} displayName: 'Publish Test Results' condition: succeededOrFailed() - publish: $(JUNITXML) - script: | bash continuous_integration\\upload_codecov.sh displayName: 'Upload to codecov' condition: succeeded() threadpoolctl-3.1.0/dev-requirements.txt000066400000000000000000000000471417600640100204170ustar00rootroot00000000000000flit coverage pytest pytest-cov cython threadpoolctl-3.1.0/multiple_openmp.md000066400000000000000000000073141417600640100201160ustar00rootroot00000000000000# Multiple OpenMP Runtimes ## Context OpenMP is an API specification for parallel programming. There are many implementations of it, tied to a compiler most of the time: - `libgomp` for GCC (GNU C/C++ Compiler), - `libomp` for Clang (LLVM C/C++ Compiler), - `libiomp` for ICC (Intel C/C++ Compiler), - `vcomp` for MSVC (Microsoft Visual Studio C/C++ Compiler). In general, it is not advised to have different OpenMP runtime libraries (or even different copies of the same library) loaded at the same time in a program. It's considered an undefined behavior. Fortunately it is not as bad as it sounds in most situations. However this situation is frequent in the Python ecosystem since you can install packages compiled with different compilers (hence linked to different OpenMP implementations) and import them together in a Python program. A typical example is installing NumPy from Anaconda which is linked against MKL (Intel's math library) and another package that uses multi-threading with OpenMP directly in a compiled extension, as is the case in Scikit-learn (via Cython `prange`), LightGBM and XGBoost (via pragmas in the C++ source code). From our experience, **most OpenMP libraries can seamlessly coexist in a same program**. For instance, on Linux, we never observed any issue between `libgomp` and `libiomp`, which is the most common mix (NumPy with MKL + a package compiled with GCC, the most widely used C compiler on that platform). ## Incompatibility between Intel OpenMP and LLVM OpenMP under Linux The only unrecoverable incompatibility we encountered happens when loading a mix of compiled extensions linked with **`libomp` (LLVM/Clang) and `libiomp` (ICC), on Linux**, manifested by crashes or deadlocks. It can happen even with the simplest OpenMP calls like getting the maximum number of threads that will be used in a subsequent parallel region. A possible explanation is that `libomp` is actually a fork of `libiomp` causing name colliding for instance. Using `threadpoolctl` may crash your program in such a setting. **Fortunately this problem is very rare**: at the time of writing, all major binary distributions of Python packages for Linux use either GCC or ICC to build the Python scientific packages. Therefore this problem would only happen if some packagers decide to start shipping Python packages built with LLVM/Clang instead of GCC. Surprisingly, we never encountered this kind of issue on macOS, where this mix is the most frequent (Clang being the default C compiler on macOS). ## Workarounds for Intel OpenMP and LLVM OpenMP case As far as we know, the only workaround consists in making sure only of one of the two incompatible OpenMP libraries is loaded. For example: - Tell MKL (used by NumPy) to use the GNU OpenMP runtime instead of the Intel OpenMP runtime by setting the following environment variable: export MKL_THREADING_LAYER=GNU - Install a build of NumPy and SciPy linked against OpenBLAS instead of MKL. This can be done for instance by installing NumPy and SciPy from PyPI: pip install numpy scipy from the conda-forge conda channel: conda install -c conda-forge numpy scipy or from the default conda channel: conda install numpy scipy blas[build=openblas] - Re-build your OpenMP-enabled extensions from source with GCC (or ICC) instead of Clang if you want to keep on using NumPy/SciPy linked against MKL with the default `libiomp`-based threading layer. ## References The above incompatibility has been reported upstream to the LLVM and Intel developers on the following public issue trackers/forums along with a minimal reproducer written in C: - https://bugs.llvm.org/show_bug.cgi?id=43565 - https://software.intel.com/en-us/forums/intel-c-compiler/topic/827607 threadpoolctl-3.1.0/pyproject.toml000066400000000000000000000015141417600640100172730ustar00rootroot00000000000000[build-system] requires = ["flit_core"] build-backend = "flit_core.buildapi" [tool.flit.metadata] module = "threadpoolctl" author = "Thomas Moreau" author-email = "thomas.moreau.2010@gmail.com" home-page = "https://github.com/joblib/threadpoolctl" description-file = "README.md" requires-python = ">=3.6" license = "BSD-3-Clause" classifiers = [ "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development :: Libraries :: Python Modules", ] [tool.black] line-length = 88 target_version = ['py36', 'py37', 'py38', 'py39'] experimental_string_processing = true threadpoolctl-3.1.0/tests/000077500000000000000000000000001417600640100155205ustar00rootroot00000000000000threadpoolctl-3.1.0/tests/__init__.py000066400000000000000000000000001417600640100176170ustar00rootroot00000000000000threadpoolctl-3.1.0/tests/_openmp_test_helper/000077500000000000000000000000001417600640100215535ustar00rootroot00000000000000threadpoolctl-3.1.0/tests/_openmp_test_helper/__init__.py000066400000000000000000000000001417600640100236520ustar00rootroot00000000000000threadpoolctl-3.1.0/tests/_openmp_test_helper/build_utils.py000066400000000000000000000010741417600640100244460ustar00rootroot00000000000000import os import sys def set_cc_variables(var_name="CC"): cc_var = os.environ.get(var_name) if cc_var is not None: os.environ["CC"] = cc_var if sys.platform == "darwin": os.environ["LDSHARED"] = cc_var + " -bundle -undefined dynamic_lookup" else: os.environ["LDSHARED"] = cc_var + " -shared" return cc_var def get_openmp_flag(): if sys.platform == "win32": return ["/openmp"] elif sys.platform == "darwin" and "openmp" in os.getenv("CPPFLAGS", ""): return [] return ["-fopenmp"] threadpoolctl-3.1.0/tests/_openmp_test_helper/nested_prange_blas.pyx000066400000000000000000000040311417600640100261320ustar00rootroot00000000000000cimport openmp from cython.parallel import parallel, prange import numpy as np IF USE_BLIS: cdef extern from 'cblas.h' nogil: ctypedef enum CBLAS_ORDER: CblasRowMajor=101 CblasColMajor=102 ctypedef enum CBLAS_TRANSPOSE: CblasNoTrans=111 CblasTrans=112 CblasConjTrans=113 void dgemm 'cblas_dgemm' ( CBLAS_ORDER Order, CBLAS_TRANSPOSE TransA, CBLAS_TRANSPOSE TransB, int M, int N, int K, double alpha, double *A, int lda, double *B, int ldb, double beta, double *C, int ldc) ELSE: from scipy.linalg.cython_blas cimport dgemm from threadpoolctl import ThreadpoolController def check_nested_prange_blas(double[:, ::1] A, double[:, ::1] B, int nthreads): """Run multithreaded BLAS calls within OpenMP parallel loop""" cdef: int m = A.shape[0] int n = B.shape[0] int k = A.shape[1] double[:, ::1] C = np.empty((m, n)) int n_chunks = 100 int chunk_size = A.shape[0] // n_chunks char* trans = 't' char* no_trans = 'n' double alpha = 1.0 double beta = 0.0 int i int prange_num_threads int *prange_num_threads_ptr = &prange_num_threads inner_info = [None] with nogil, parallel(num_threads=nthreads): if openmp.omp_get_thread_num() == 0: with gil: inner_info[0] = ThreadpoolController().info() prange_num_threads_ptr[0] = openmp.omp_get_num_threads() for i in prange(n_chunks): IF USE_BLIS: dgemm(CblasRowMajor, CblasNoTrans, CblasTrans, chunk_size, n, k, alpha, &A[i * chunk_size, 0], k, &B[0, 0], k, beta, &C[i * chunk_size, 0], n) ELSE: dgemm(trans, no_trans, &n, &chunk_size, &k, &alpha, &B[0, 0], &k, &A[i * chunk_size, 0], &k, &beta, &C[i * chunk_size, 0], &n) return np.asarray(C), prange_num_threads, inner_info[0] threadpoolctl-3.1.0/tests/_openmp_test_helper/openmp_helpers_inner.pxd000066400000000000000000000000461417600640100265030ustar00rootroot00000000000000cdef int inner_openmp_loop(int) nogil threadpoolctl-3.1.0/tests/_openmp_test_helper/openmp_helpers_inner.pyx000066400000000000000000000016261417600640100265350ustar00rootroot00000000000000cimport openmp from cython.parallel import prange def check_openmp_num_threads(int n): """Run a short parallel section with OpenMP Return the number of threads that where effectively used by the OpenMP runtime. """ cdef int num_threads = -1 with nogil: num_threads = inner_openmp_loop(n) return num_threads cdef int inner_openmp_loop(int n) nogil: """Run a short parallel section with OpenMP Return the number of threads that where effectively used by the OpenMP runtime. This function is expected to run without the GIL and can be called by an outer OpenMP / prange loop written in Cython in another file. """ cdef long n_sum = 0 cdef int i, num_threads for i in prange(n): num_threads = openmp.omp_get_num_threads() n_sum += i if n_sum != (n - 1) * n / 2: # error return -1 return num_threads threadpoolctl-3.1.0/tests/_openmp_test_helper/openmp_helpers_outer.pyx000066400000000000000000000013241417600640100265530ustar00rootroot00000000000000cimport openmp from cython.parallel import prange from openmp_helpers_inner cimport inner_openmp_loop def check_nested_openmp_loops(int n, nthreads=None): """Run a short parallel section with OpenMP with nested calls The inner OpenMP loop has not necessarily been built/linked with the same runtime OpenMP runtime. """ cdef: int outer_num_threads = -1 int inner_num_threads = -1 int num_threads = nthreads or openmp.omp_get_max_threads() int i for i in prange(n, num_threads=num_threads, nogil=True): inner_num_threads = inner_openmp_loop(n) outer_num_threads = openmp.omp_get_num_threads() return outer_num_threads, inner_num_threads threadpoolctl-3.1.0/tests/_openmp_test_helper/setup_inner.py000066400000000000000000000021421417600640100244570ustar00rootroot00000000000000import os from distutils.core import setup from Cython.Build import cythonize from distutils.extension import Extension from build_utils import set_cc_variables from build_utils import get_openmp_flag original_environ = os.environ.copy() try: # Make it possible to compile the 2 OpenMP enabled Cython extensions # with different compilers and therefore different OpenMP runtimes. inner_loop_cc_var = set_cc_variables("CC_INNER_LOOP") openmp_flag = get_openmp_flag() ext_modules = [ Extension( "openmp_helpers_inner", ["openmp_helpers_inner.pyx"], extra_compile_args=openmp_flag, extra_link_args=openmp_flag, ) ] setup( name="_openmp_test_helper_inner", ext_modules=cythonize( ext_modules, compiler_directives={ "language_level": 3, "boundscheck": False, "wraparound": False, }, compile_time_env={"CC_INNER_LOOP": inner_loop_cc_var or "unknown"}, ), ) finally: os.environ.update(original_environ) threadpoolctl-3.1.0/tests/_openmp_test_helper/setup_nested_prange_blas.py000066400000000000000000000020521417600640100271630ustar00rootroot00000000000000import os from distutils.core import setup from Cython.Build import cythonize from distutils.extension import Extension from build_utils import set_cc_variables from build_utils import get_openmp_flag original_environ = os.environ.copy() try: set_cc_variables("CC_OUTER_LOOP") openmp_flag = get_openmp_flag() use_blis = os.getenv("INSTALL_BLIS", False) libraries = ["blis"] if use_blis else [] ext_modules = [ Extension( "nested_prange_blas", ["nested_prange_blas.pyx"], extra_compile_args=openmp_flag, extra_link_args=openmp_flag, libraries=libraries, ) ] setup( name="_openmp_test_helper_nested_prange_blas", ext_modules=cythonize( ext_modules, compile_time_env={"USE_BLIS": use_blis}, compiler_directives={ "language_level": 3, "boundscheck": False, "wraparound": False, }, ), ) finally: os.environ.update(original_environ) threadpoolctl-3.1.0/tests/_openmp_test_helper/setup_outer.py000066400000000000000000000021421417600640100245020ustar00rootroot00000000000000import os from distutils.core import setup from Cython.Build import cythonize from distutils.extension import Extension from build_utils import set_cc_variables from build_utils import get_openmp_flag original_environ = os.environ.copy() try: # Make it possible to compile the 2 OpenMP enabled Cython extensions # with different compilers and therefore different OpenMP runtimes. outer_loop_cc_var = set_cc_variables("CC_OUTER_LOOP") openmp_flag = get_openmp_flag() ext_modules = [ Extension( "openmp_helpers_outer", ["openmp_helpers_outer.pyx"], extra_compile_args=openmp_flag, extra_link_args=openmp_flag, ) ] setup( name="_openmp_test_helper_outer", ext_modules=cythonize( ext_modules, compiler_directives={ "language_level": 3, "boundscheck": False, "wraparound": False, }, compile_time_env={"CC_OUTER_LOOP": outer_loop_cc_var or "unknown"}, ), ) finally: os.environ.update(original_environ) threadpoolctl-3.1.0/tests/test_threadpoolctl.py000066400000000000000000000602011417600640100217740ustar00rootroot00000000000000import json import os import pytest import re import subprocess import sys from threadpoolctl import threadpool_limits, threadpool_info from threadpoolctl import ThreadpoolController from threadpoolctl import _ALL_PREFIXES, _ALL_USER_APIS from .utils import cython_extensions_compiled from .utils import libopenblas_paths from .utils import scipy from .utils import threadpool_info_from_subprocess from .utils import select def is_old_openblas(lib_controller): # Possible bug in getting maximum number of threads with OpenBLAS < 0.2.16 # and OpenBLAS does not expose its version before 0.3.4. return lib_controller.internal_api == "openblas" and lib_controller.version is None def effective_num_threads(nthreads, max_threads): if nthreads is None or nthreads > max_threads: return max_threads return nthreads def test_threadpool_info(): # Check consistency between threadpool_info and ThreadpoolController function_info = threadpool_info() object_info = ThreadpoolController().lib_controllers for lib_info, lib_controller in zip(function_info, object_info): assert lib_info == lib_controller.info() def test_threadpool_controller_info(): # Check that all keys expected for the private api are in the dicts # returned by the `info` methods controller = ThreadpoolController() assert threadpool_info() == [ lib_controller.info() for lib_controller in controller.lib_controllers ] assert controller.info() == [ lib_controller.info() for lib_controller in controller.lib_controllers ] for lib_controller_dict in controller.info(): assert "user_api" in lib_controller_dict assert "internal_api" in lib_controller_dict assert "prefix" in lib_controller_dict assert "filepath" in lib_controller_dict assert "version" in lib_controller_dict assert "num_threads" in lib_controller_dict if lib_controller_dict["internal_api"] in ("mkl", "blis", "openblas"): assert "threading_layer" in lib_controller_dict def test_controller_info_actualized(): # Check that the num_threads attribute reflects the actual state of the threadpools controller = ThreadpoolController() original_info = controller.info() with threadpool_limits(limits=1): assert all( lib_controller.num_threads == 1 for lib_controller in controller.lib_controllers ) assert controller.info() == original_info @pytest.mark.parametrize( "kwargs", [ {"user_api": "blas"}, {"prefix": "libgomp"}, {"internal_api": "openblas", "prefix": "libomp"}, {"prefix": ["libgomp", "libomp", "libiomp"]}, ], ) def test_threadpool_controller_select(kwargs): # Check the behior of the select method of ThreadpoolController controller = ThreadpoolController().select(**kwargs) if not controller: pytest.skip(f"Requires at least one of {list(kwargs.values())}.") for lib_controller in controller.lib_controllers: assert any( getattr(lib_controller, key) in (val if isinstance(val, list) else [val]) for key, val in kwargs.items() ) @pytest.mark.parametrize("prefix", _ALL_PREFIXES) @pytest.mark.parametrize("limit", [1, 3]) def test_threadpool_limits_by_prefix(prefix, limit): # Check that the maximum number of threads can be set by prefix controller = ThreadpoolController() original_info = controller.info() controller_matching_prefix = controller.select(prefix=prefix) if not controller_matching_prefix: pytest.skip(f"Requires {prefix} runtime") with threadpool_limits(limits={prefix: limit}): for lib_controller in controller_matching_prefix.lib_controllers: if is_old_openblas(lib_controller): continue # threadpool_limits only sets an upper bound on the number of # threads. assert 0 < lib_controller.num_threads <= limit assert ThreadpoolController().info() == original_info @pytest.mark.parametrize("user_api", (None, "blas", "openmp")) @pytest.mark.parametrize("limit", [1, 3]) def test_set_threadpool_limits_by_api(user_api, limit): # Check that the maximum number of threads can be set by user_api controller = ThreadpoolController() original_info = controller.info() if user_api is None: controller_matching_api = controller else: controller_matching_api = controller.select(user_api=user_api) if not controller_matching_api: user_apis = _ALL_USER_APIS if user_api is None else [user_api] pytest.skip(f"Requires a library which api is in {user_apis}") with threadpool_limits(limits=limit, user_api=user_api): for lib_controller in controller_matching_api.lib_controllers: if is_old_openblas(lib_controller): continue # threadpool_limits only sets an upper bound on the number of # threads. assert 0 < lib_controller.num_threads <= limit assert ThreadpoolController().info() == original_info def test_threadpool_limits_function_with_side_effect(): # Check that threadpool_limits can be used as a function with # side effects instead of a context manager. original_info = ThreadpoolController().info() threadpool_limits(limits=1) try: for lib_controller in ThreadpoolController().lib_controllers: if is_old_openblas(lib_controller): continue assert lib_controller.num_threads == 1 finally: # Restore the original limits so that this test does not have any # side-effect. threadpool_limits(limits=original_info) assert ThreadpoolController().info() == original_info def test_set_threadpool_limits_no_limit(): # Check that limits=None does nothing. original_info = ThreadpoolController().info() with threadpool_limits(limits=None): assert ThreadpoolController().info() == original_info assert ThreadpoolController().info() == original_info def test_threadpool_limits_manual_restore(): # Check that threadpool_limits can be used as an object which holds the # original state of the threadpools and that can be restored thanks to the # dedicated restore_original_limits method original_info = ThreadpoolController().info() limits = threadpool_limits(limits=1) try: for lib_controller in ThreadpoolController().lib_controllers: if is_old_openblas(lib_controller): continue assert lib_controller.num_threads == 1 finally: # Restore the original limits so that this test does not have any # side-effect. limits.restore_original_limits() assert ThreadpoolController().info() == original_info def test_threadpool_controller_limit(): # Check that using the limit method of ThreadpoolController only impact its # library controllers. blas_controller = ThreadpoolController().select(user_api="blas") original_openmp_info = ThreadpoolController().select(user_api="openmp").info() with blas_controller.limit(limits=1): blas_controller = ThreadpoolController().select(user_api="blas") openmp_info = ThreadpoolController().select(user_api="openmp").info() assert all( lib_controller.num_threads == 1 for lib_controller in blas_controller.lib_controllers ) # original_blas_controller contains only blas libraries so no opemp library # should be impacted. This is not True for OpenBLAS with the OpenMP threading # layer. if not any( lib_controller.internal_api == "openblas" and lib_controller.threading_layer == "openmp" for lib_controller in blas_controller.lib_controllers ): assert openmp_info == original_openmp_info def test_get_params_for_sequential_blas_under_openmp(): # Test for the behavior of get_params_for_sequential_blas_under_openmp. controller = ThreadpoolController() original_info = controller.info() params = controller._get_params_for_sequential_blas_under_openmp() if controller.select( internal_api="openblas", threading_layer="openmp" ).lib_controllers: assert params["limits"] is None assert params["user_api"] is None with controller.limit(limits="sequential_blas_under_openmp"): assert controller.info() == original_info else: assert params["limits"] == 1 assert params["user_api"] == "blas" with controller.limit(limits="sequential_blas_under_openmp"): assert all( lib_info["num_threads"] == 1 for lib_info in controller.info() if lib_info["user_api"] == "blas" ) def test_nested_limits(): # Check that exiting the context manager properly restores the original limits even # when nested. controller = ThreadpoolController() original_info = controller.info() if any(info["num_threads"] < 2 for info in original_info): pytest.skip("Test requires at least 2 CPUs on host machine") def check_num_threads(expected_num_threads): assert all( lib_controller.num_threads == expected_num_threads for lib_controller in ThreadpoolController().lib_controllers ) with controller.limit(limits=1): check_num_threads(expected_num_threads=1) with controller.limit(limits=2): check_num_threads(expected_num_threads=2) check_num_threads(expected_num_threads=1) assert ThreadpoolController().info() == original_info def test_threadpool_limits_bad_input(): # Check that appropriate errors are raised for invalid arguments match = re.escape(f"user_api must be either in {_ALL_USER_APIS} or None.") with pytest.raises(ValueError, match=match): threadpool_limits(limits=1, user_api="wrong") with pytest.raises( TypeError, match="limits must either be an int, a list, a dict, or" ): threadpool_limits(limits=(1, 2, 3)) @pytest.mark.skipif( not cython_extensions_compiled, reason="Requires cython extensions to be compiled" ) @pytest.mark.parametrize("num_threads", [1, 2, 4]) def test_openmp_limit_num_threads(num_threads): # checks that OpenMP effectively uses the number of threads requested by # the context manager import tests._openmp_test_helper.openmp_helpers_inner as omp_inner check_openmp_num_threads = omp_inner.check_openmp_num_threads old_num_threads = check_openmp_num_threads(100) with threadpool_limits(limits=num_threads): assert check_openmp_num_threads(100) in (num_threads, old_num_threads) assert check_openmp_num_threads(100) == old_num_threads @pytest.mark.skipif( not cython_extensions_compiled, reason="Requires cython extensions to be compiled" ) @pytest.mark.parametrize("nthreads_outer", [None, 1, 2, 4]) def test_openmp_nesting(nthreads_outer): # checks that OpenMP effectively uses the number of threads requested by # the context manager when nested in an outer OpenMP loop. import tests._openmp_test_helper.openmp_helpers_outer as omp_outer check_nested_openmp_loops = omp_outer.check_nested_openmp_loops # Find which OpenMP lib is used at runtime for inner loop inner_info = threadpool_info_from_subprocess( "tests._openmp_test_helper.openmp_helpers_inner" ) assert len(inner_info) == 1 inner_omp = inner_info[0]["prefix"] # Find which OpenMP lib is used at runtime for outer loop outer_info = threadpool_info_from_subprocess( "tests._openmp_test_helper.openmp_helpers_outer" ) if len(outer_info) == 1: # Only 1 openmp loaded. It has to be this one. outer_omp = outer_info[0]["prefix"] else: # There are 2 openmp, the one from inner and the one from outer. assert len(outer_info) == 2 # We already know the one from inner. It has to be the other one. prefixes = {lib_info["prefix"] for lib_info in outer_info} outer_omp = prefixes - {inner_omp} outer_num_threads, inner_num_threads = check_nested_openmp_loops(10) original_info = ThreadpoolController().info() if inner_omp == outer_omp: # The OpenMP runtime should be shared by default, meaning that the # inner loop should automatically be run serially by the OpenMP runtime assert inner_num_threads == 1 with threadpool_limits(limits=1) as threadpoolctx: max_threads = threadpoolctx.get_original_num_threads()["openmp"] nthreads = effective_num_threads(nthreads_outer, max_threads) # Ask outer loop to run on nthreads threads and inner loop run on 1 # thread outer_num_threads, inner_num_threads = check_nested_openmp_loops(10, nthreads) # The state of the original state of all threadpools should have been # restored. assert ThreadpoolController().info() == original_info # The number of threads available in the outer loop should not have been # decreased: assert outer_num_threads == nthreads # The number of threads available in the inner loop should have been # set to 1 to avoid oversubscription and preserve performance: if inner_omp != outer_omp: if inner_num_threads != 1: # XXX: this does not always work when nesting independent openmp # implementations. See: https://github.com/jeremiedbb/Nested_OpenMP pytest.xfail( f"Inner OpenMP num threads was {inner_num_threads} instead of 1" ) assert inner_num_threads == 1 def test_shipped_openblas(): # checks that OpenBLAS effectively uses the number of threads requested by # the context manager original_info = ThreadpoolController().info() openblas_controller = ThreadpoolController().select(internal_api="openblas") with threadpool_limits(1): for lib_controller in openblas_controller.lib_controllers: assert lib_controller.num_threads == 1 assert ThreadpoolController().info() == original_info @pytest.mark.skipif( len(libopenblas_paths) < 2, reason="need at least 2 shipped openblas library" ) def test_multiple_shipped_openblas(): # This redundant test is meant to make it easier to see if the system # has 2 or more active openblas runtimes available just by reading the # pytest report (whether or not this test has been skipped). test_shipped_openblas() @pytest.mark.skipif(scipy is None, reason="requires scipy") @pytest.mark.skipif( not cython_extensions_compiled, reason="Requires cython extensions to be compiled" ) @pytest.mark.parametrize("nthreads_outer", [None, 1, 2, 4]) def test_nested_prange_blas(nthreads_outer): # Check that the BLAS linked to scipy effectively uses the number of # threads requested by the context manager when nested in an outer OpenMP # loop. import numpy as np import tests._openmp_test_helper.nested_prange_blas as prange_blas check_nested_prange_blas = prange_blas.check_nested_prange_blas original_info = ThreadpoolController().info() blas_controller = ThreadpoolController().select(user_api="blas") blis_controller = ThreadpoolController().select(internal_api="blis") # skip if the BLAS used by numpy is an old openblas. OpenBLAS 0.3.3 and # older are known to cause an unrecoverable deadlock at process shutdown # time (after pytest has exited). # numpy can be linked to BLIS for CBLAS and OpenBLAS for LAPACK. In that # case this test will run BLIS gemm so no need to skip. if not blis_controller and any( is_old_openblas(lib_controller) for lib_controller in blas_controller.lib_controllers ): pytest.skip("Old OpenBLAS: skipping test to avoid deadlock") A = np.ones((1000, 10)) B = np.ones((100, 10)) with threadpool_limits(limits=1) as threadpoolctx: max_threads = threadpoolctx.get_original_num_threads()["openmp"] nthreads = effective_num_threads(nthreads_outer, max_threads) result = check_nested_prange_blas(A, B, nthreads) C, prange_num_threads, inner_info = result assert np.allclose(C, np.dot(A, B.T)) assert prange_num_threads == nthreads nested_blas_info = select(inner_info, user_api="blas") assert len(nested_blas_info) == len(blas_controller.lib_controllers) assert all(lib_info["num_threads"] == 1 for lib_info in nested_blas_info) assert ThreadpoolController().info() == original_info # the method `get_original_num_threads` raises a UserWarning due to different # num_threads from libraries with the same `user_api`. It will be raised only # in the CI job with 2 openblas (py37_pip_openblas_gcc_clang). It is expected # so we can safely filter it. @pytest.mark.filterwarnings("ignore::UserWarning") @pytest.mark.parametrize("limit", [1, None]) def test_get_original_num_threads(limit): # Tests the method get_original_num_threads of the context manager with threadpool_limits(limits=2, user_api="blas") as ctx: # set different blas num threads to start with (when multiple openblas) if len(ctx._controller.select(user_api="blas")) > 1: ctx._controller.lib_controllers[0].set_num_threads(1) original_info = ThreadpoolController().info() with threadpool_limits(limits=limit, user_api="blas") as threadpoolctx: original_num_threads = threadpoolctx.get_original_num_threads() assert "openmp" not in original_num_threads blas_info = select(original_info, user_api="blas") if blas_info: expected = min(lib_info["num_threads"] for lib_info in blas_info) assert original_num_threads["blas"] == expected else: assert original_num_threads["blas"] is None if len(libopenblas_paths) >= 2: with pytest.warns(None, match="Multiple value possible"): threadpoolctx.get_original_num_threads() def test_mkl_threading_layer(): # Check that threadpool_info correctly recovers the threading layer used # by mkl mkl_controller = ThreadpoolController().select(internal_api="mkl") expected_layer = os.getenv("MKL_THREADING_LAYER") if not (mkl_controller and expected_layer): pytest.skip("requires MKL and the environment variable MKL_THREADING_LAYER set") actual_layer = mkl_controller.lib_controllers[0].threading_layer assert actual_layer == expected_layer.lower() def test_blis_threading_layer(): # Check that threadpool_info correctly recovers the threading layer used # by blis blis_controller = ThreadpoolController().select(internal_api="blis") expected_layer = os.getenv("BLIS_ENABLE_THREADING") if expected_layer == "no": expected_layer = "disabled" if not (blis_controller and expected_layer): pytest.skip( "requires BLIS and the environment variable BLIS_ENABLE_THREADING set" ) actual_layer = blis_controller.lib_controllers[0].threading_layer assert actual_layer == expected_layer @pytest.mark.skipif( not cython_extensions_compiled, reason="Requires cython extensions to be compiled" ) def test_libomp_libiomp_warning(recwarn): # Trigger the import of a potentially clang-compiled extension: import tests._openmp_test_helper.openmp_helpers_outer # noqa # Trigger the import of numpy to potentially import Intel OpenMP via MKL pytest.importorskip("numpy.linalg") # Check that a warning is raised when both libomp and libiomp are loaded # It should happen in one CI job (pylatest_conda_mkl_clang_gcc). controller = ThreadpoolController() prefixes = [lib_controller.prefix for lib_controller in controller.lib_controllers] if not ("libomp" in prefixes and "libiomp" in prefixes and sys.platform == "linux"): pytest.skip("Requires both libomp and libiomp loaded, on Linux") assert len(recwarn) == 1 wm = recwarn[0] assert wm.category == RuntimeWarning assert "Found Intel" in str(wm.message) assert "LLVM" in str(wm.message) assert "multiple_openmp.md" in str(wm.message) def test_command_line_empty(): output = subprocess.check_output((sys.executable + " -m threadpoolctl").split()) assert json.loads(output.decode("utf-8")) == [] def test_command_line_command_flag(): pytest.importorskip("numpy") output = subprocess.check_output( [sys.executable, "-m", "threadpoolctl", "-c", "import numpy"] ) cli_info = json.loads(output.decode("utf-8")) this_process_info = threadpool_info() for lib_info in cli_info: assert lib_info in this_process_info @pytest.mark.skipif( sys.version_info < (3, 7), reason="need recent subprocess.run options" ) def test_command_line_import_flag(): result = subprocess.run( [ sys.executable, "-m", "threadpoolctl", "-i", "numpy", "scipy.linalg", "invalid_package", "numpy.invalid_sumodule", ], capture_output=True, check=True, encoding="utf-8", ) cli_info = json.loads(result.stdout) this_process_info = threadpool_info() for lib_info in cli_info: assert lib_info in this_process_info warnings = [w.strip() for w in result.stderr.splitlines()] assert "WARNING: could not import invalid_package" in warnings assert "WARNING: could not import numpy.invalid_sumodule" in warnings if scipy is None: assert "WARNING: could not import scipy.linalg" in warnings else: assert "WARNING: could not import scipy.linalg" not in warnings def test_architecture(): expected_openblas_architectures = ( # XXX: add more as needed by CI or developer laptops "armv8", "Haswell", "Prescott", # see: https://github.com/xianyi/OpenBLAS/pull/3485 "SkylakeX", "Sandybridge", "VORTEX", "Zen", ) expected_blis_architectures = ( # XXX: add more as needed by CI or developer laptops "skx", "haswell", ) for lib_info in threadpool_info(): if lib_info["internal_api"] == "openblas": assert lib_info["architecture"] in expected_openblas_architectures elif lib_info["internal_api"] == "blis": assert lib_info["architecture"] in expected_blis_architectures else: # Not supported for other libraries assert "architecture" not in lib_info def test_openblas_threading_layer(): # Check that threadpool_info correctly recovers the threading layer used by openblas openblas_controller = ThreadpoolController().select(internal_api="openblas") if not (openblas_controller): pytest.skip("requires OpenBLAS.") expected_openblas_threading_layers = ("openmp", "pthreads", "disabled") threading_layer = openblas_controller.lib_controllers[0].threading_layer if threading_layer == "unknown": # If we never recover an acceptable value for the threading layer, it will be # always skipped and caught by check_no_test_always_skipped. pytest.skip("Unknown OpenBLAS threading layer.") assert threading_layer in expected_openblas_threading_layers def test_threadpool_controller_as_decorator(): # Check that using the decorator can be nested and is restricted to the scope of # the decorated function. controller = ThreadpoolController() original_info = controller.info() if any(info["num_threads"] < 2 for info in original_info): pytest.skip("Test requires at least 2 CPUs on host machine") if not controller.select(user_api="blas"): pytest.skip("Requires a blas runtime.") def check_blas_num_threads(expected_num_threads): blas_controller = ThreadpoolController().select(user_api="blas") assert all( lib_controller.num_threads == expected_num_threads for lib_controller in blas_controller.lib_controllers ) @controller.wrap(limits=1, user_api="blas") def outer_func(): check_blas_num_threads(expected_num_threads=1) inner_func() check_blas_num_threads(expected_num_threads=1) @controller.wrap(limits=2, user_api="blas") def inner_func(): check_blas_num_threads(expected_num_threads=2) outer_func() assert ThreadpoolController().info() == original_info threadpoolctl-3.1.0/tests/utils.py000066400000000000000000000043071417600640100172360ustar00rootroot00000000000000import os import json import sys import threadpoolctl from glob import glob from os.path import dirname, normpath from subprocess import check_output # Path to shipped openblas for libraries such as numpy or scipy libopenblas_patterns = [] try: # make sure the mkl/blas are loaded for test_threadpool_limits import numpy as np np.dot(np.ones(1000), np.ones(1000)) libopenblas_patterns.append(os.path.join(np.__path__[0], ".libs", "libopenblas*")) except ImportError: pass try: import scipy import scipy.linalg # noqa: F401 scipy.linalg.svd([[1, 2], [3, 4]]) libopenblas_patterns.append( os.path.join(scipy.__path__[0], ".libs", "libopenblas*") ) except ImportError: scipy = None libopenblas_paths = set( path for pattern in libopenblas_patterns for path in glob(pattern) ) try: import tests._openmp_test_helper.openmp_helpers_inner # noqa: F401 cython_extensions_compiled = True except ImportError: cython_extensions_compiled = False def threadpool_info_from_subprocess(module): """Utility to call threadpool_info in a subprocess `module` is imported before calling threadpool_info """ # set PYTHONPATH to import from non sub-modules path1 = normpath(dirname(threadpoolctl.__file__)) path2 = os.path.join(path1, "tests", "_openmp_test_helper") pythonpath = os.pathsep.join([path1, path2]) env = os.environ.copy() try: env["PYTHONPATH"] = os.pathsep.join([pythonpath, env["PYTHONPATH"]]) except KeyError: env["PYTHONPATH"] = pythonpath cmd = [sys.executable, "-m", "threadpoolctl", "-i", module] out = check_output(cmd, env=env).decode("utf-8") return json.loads(out) def select(info, **kwargs): """Select a subset of the list of library info matching the request""" # It's just a utility function to avoid repeating the pattern # [lib_info for lib_info in info if lib_info[""] == key] for key, vals in kwargs.items(): kwargs[key] = [vals] if not isinstance(vals, list) else vals selected_info = [ lib_info for lib_info in info if any(lib_info.get(key, None) in vals for key, vals in kwargs.items()) ] return selected_info threadpoolctl-3.1.0/threadpoolctl.py000066400000000000000000001202301417600640100175720ustar00rootroot00000000000000"""threadpoolctl This module provides utilities to introspect native libraries that relies on thread pools (notably BLAS and OpenMP implementations) and dynamically set the maximal number of threads they can use. """ # License: BSD 3-Clause # The code to introspect dynamically loaded libraries on POSIX systems is # adapted from code by Intel developer @anton-malakhov available at # https://github.com/IntelPython/smp (Copyright (c) 2017, Intel Corporation) # and also published under the BSD 3-Clause license import os import re import sys import ctypes import textwrap import warnings from ctypes.util import find_library from abc import ABC, abstractmethod from functools import lru_cache from contextlib import ContextDecorator __version__ = "3.1.0" __all__ = ["threadpool_limits", "threadpool_info", "ThreadpoolController"] # One can get runtime errors or even segfaults due to multiple OpenMP libraries # loaded simultaneously which can happen easily in Python when importing and # using compiled extensions built with different compilers and therefore # different OpenMP runtimes in the same program. In particular libiomp (used by # Intel ICC) and libomp used by clang/llvm tend to crash. This can happen for # instance when calling BLAS inside a prange. Setting the following environment # variable allows multiple OpenMP libraries to be loaded. It should not degrade # performances since we manually take care of potential over-subscription # performance issues, in sections of the code where nested OpenMP loops can # happen, by dynamically reconfiguring the inner OpenMP runtime to temporarily # disable it while under the scope of the outer OpenMP parallel section. os.environ.setdefault("KMP_DUPLICATE_LIB_OK", "True") # Structure to cast the info on dynamically loaded library. See # https://linux.die.net/man/3/dl_iterate_phdr for more details. _SYSTEM_UINT = ctypes.c_uint64 if sys.maxsize > 2 ** 32 else ctypes.c_uint32 _SYSTEM_UINT_HALF = ctypes.c_uint32 if sys.maxsize > 2 ** 32 else ctypes.c_uint16 class _dl_phdr_info(ctypes.Structure): _fields_ = [ ("dlpi_addr", _SYSTEM_UINT), # Base address of object ("dlpi_name", ctypes.c_char_p), # path to the library ("dlpi_phdr", ctypes.c_void_p), # pointer on dlpi_headers ("dlpi_phnum", _SYSTEM_UINT_HALF), # number of elements in dlpi_phdr ] # The RTLD_NOLOAD flag for loading shared libraries is not defined on Windows. try: _RTLD_NOLOAD = os.RTLD_NOLOAD except AttributeError: _RTLD_NOLOAD = ctypes.DEFAULT_MODE # List of the supported libraries. The items are indexed by the name of the # class to instantiate to create the library controller objects. The items hold # the possible prefixes of loaded shared objects, the name of the internal_api # to call, the name of the user_api and potentially some symbols that the library is # expected to have (this is necessary to distinguish between the blas implementations # when they are all renamed "libblas.dll" on conda-forge on windows). _SUPPORTED_LIBRARIES = { "OpenMPController": { "user_api": "openmp", "internal_api": "openmp", "filename_prefixes": ("libiomp", "libgomp", "libomp", "vcomp"), }, "OpenBLASController": { "user_api": "blas", "internal_api": "openblas", "filename_prefixes": ("libopenblas", "libblas"), "check_symbols": ("openblas_get_num_threads", "openblas_get_num_threads64_"), }, "MKLController": { "user_api": "blas", "internal_api": "mkl", "filename_prefixes": ("libmkl_rt", "mkl_rt", "libblas"), "check_symbols": ("MKL_Get_Max_Threads",), }, "BLISController": { "user_api": "blas", "internal_api": "blis", "filename_prefixes": ("libblis", "libblas"), "check_symbols": ("bli_thread_get_num_threads",), }, } # Helpers for the doc and test names _ALL_USER_APIS = list(set(lib["user_api"] for lib in _SUPPORTED_LIBRARIES.values())) _ALL_INTERNAL_APIS = [lib["internal_api"] for lib in _SUPPORTED_LIBRARIES.values()] _ALL_PREFIXES = list( set( prefix for lib in _SUPPORTED_LIBRARIES.values() for prefix in lib["filename_prefixes"] ) ) _ALL_BLAS_LIBRARIES = [ lib["internal_api"] for lib in _SUPPORTED_LIBRARIES.values() if lib["user_api"] == "blas" ] _ALL_OPENMP_LIBRARIES = list( _SUPPORTED_LIBRARIES["OpenMPController"]["filename_prefixes"] ) def _format_docstring(*args, **kwargs): def decorator(o): if o.__doc__ is not None: o.__doc__ = o.__doc__.format(*args, **kwargs) return o return decorator @lru_cache(maxsize=10000) def _realpath(filepath): """Small caching wrapper around os.path.realpath to limit system calls""" return os.path.realpath(filepath) @_format_docstring(USER_APIS=list(_ALL_USER_APIS), INTERNAL_APIS=_ALL_INTERNAL_APIS) def threadpool_info(): """Return the maximal number of threads for each detected library. Return a list with all the supported libraries that have been found. Each library is represented by a dict with the following information: - "user_api" : user API. Possible values are {USER_APIS}. - "internal_api": internal API. Possible values are {INTERNAL_APIS}. - "prefix" : filename prefix of the specific implementation. - "filepath": path to the loaded library. - "version": version of the library (if available). - "num_threads": the current thread limit. In addition, each library may contain internal_api specific entries. """ return ThreadpoolController().info() class _ThreadpoolLimiter: """The guts of ThreadpoolController.limit Refer to the docstring of ThreadpoolController.limit for more details. It will only act on the library controllers held by the provided `controller`. Using the default constructor sets the limits right away such that it can be used as a callable. Setting the limits can be delayed by using the `wrap` class method such that it can be used as a decorator. """ def __init__(self, controller, *, limits=None, user_api=None): self._controller = controller self._limits, self._user_api, self._prefixes = self._check_params( limits, user_api ) self._original_info = self._controller.info() self._set_threadpool_limits() def __enter__(self): return self def __exit__(self, type, value, traceback): self.restore_original_limits() @classmethod def wrap(cls, controller, *, limits=None, user_api=None): """Return an instance of this class that can be used as a decorator""" return _ThreadpoolLimiterDecorator( controller=controller, limits=limits, user_api=user_api ) def restore_original_limits(self): """Set the limits back to their original values""" for lib_controller, original_info in zip( self._controller.lib_controllers, self._original_info ): lib_controller.set_num_threads(original_info["num_threads"]) # Alias of `restore_original_limits` for backward compatibility unregister = restore_original_limits def get_original_num_threads(self): """Original num_threads from before calling threadpool_limits Return a dict `{user_api: num_threads}`. """ num_threads = {} warning_apis = [] for user_api in self._user_api: limits = [ lib_info["num_threads"] for lib_info in self._original_info if lib_info["user_api"] == user_api ] limits = set(limits) n_limits = len(limits) if n_limits == 1: limit = limits.pop() elif n_limits == 0: limit = None else: limit = min(limits) warning_apis.append(user_api) num_threads[user_api] = limit if warning_apis: warnings.warn( "Multiple value possible for following user apis: " + ", ".join(warning_apis) + ". Returning the minimum." ) return num_threads def _check_params(self, limits, user_api): """Suitable values for the _limits, _user_api and _prefixes attributes""" if isinstance(limits, str) and limits == "sequential_blas_under_openmp": ( limits, user_api, ) = self._controller._get_params_for_sequential_blas_under_openmp().values() if limits is None or isinstance(limits, int): if user_api is None: user_api = _ALL_USER_APIS elif user_api in _ALL_USER_APIS: user_api = [user_api] else: raise ValueError( f"user_api must be either in {_ALL_USER_APIS} or None. Got " f"{user_api} instead." ) if limits is not None: limits = {api: limits for api in user_api} prefixes = [] else: if isinstance(limits, list): # This should be a list of dicts of library info, for # compatibility with the result from threadpool_info. limits = { lib_info["prefix"]: lib_info["num_threads"] for lib_info in limits } elif isinstance(limits, ThreadpoolController): # To set the limits from the library controllers of a # ThreadpoolController object. limits = { lib_controller.prefix: lib_controller.num_threads for lib_controller in limits.lib_controllers } if not isinstance(limits, dict): raise TypeError( "limits must either be an int, a list, a dict, or " f"'sequential_blas_under_openmp'. Got {type(limits)} instead" ) # With a dictionary, can set both specific limit for given # libraries and global limit for user_api. Fetch each separately. prefixes = [prefix for prefix in limits if prefix in _ALL_PREFIXES] user_api = [api for api in limits if api in _ALL_USER_APIS] return limits, user_api, prefixes def _set_threadpool_limits(self): """Change the maximal number of threads in selected thread pools. Return a list with all the supported libraries that have been found matching `self._prefixes` and `self._user_api`. """ if self._limits is None: return for lib_controller in self._controller.lib_controllers: # self._limits is a dict {key: num_threads} where key is either # a prefix or a user_api. If a library matches both, the limit # corresponding to the prefix is chosen. if lib_controller.prefix in self._limits: num_threads = self._limits[lib_controller.prefix] elif lib_controller.user_api in self._limits: num_threads = self._limits[lib_controller.user_api] else: continue if num_threads is not None: lib_controller.set_num_threads(num_threads) class _ThreadpoolLimiterDecorator(_ThreadpoolLimiter, ContextDecorator): """Same as _ThreadpoolLimiter but to be used as a decorator""" def __init__(self, controller, *, limits=None, user_api=None): self._limits, self._user_api, self._prefixes = self._check_params( limits, user_api ) self._controller = controller def __enter__(self): # we need to set the limits here and not in the __init__ because we want the # limits to be set when calling the decorated function, not when creating the # decorator. self._original_info = self._controller.info() self._set_threadpool_limits() return self @_format_docstring( USER_APIS=", ".join(f'"{api}"' for api in _ALL_USER_APIS), BLAS_LIBS=", ".join(_ALL_BLAS_LIBRARIES), OPENMP_LIBS=", ".join(_ALL_OPENMP_LIBRARIES), ) class threadpool_limits(_ThreadpoolLimiter): """Change the maximal number of threads that can be used in thread pools. This object can be used either as a callable (the construction of this object limits the number of threads), as a context manager in a `with` block to automatically restore the original state of the controlled libraries when exiting the block, or as a decorator through its `wrap` method. Set the maximal number of threads that can be used in thread pools used in the supported libraries to `limit`. This function works for libraries that are already loaded in the interpreter and can be changed dynamically. This effect is global and impacts the whole Python process. There is no thread level isolation as these libraries do not offer thread-local APIs to configure the number of threads to use in nested parallel calls. Parameters ---------- limits : int, dict, 'sequential_blas_under_openmp' or None (default=None) The maximal number of threads that can be used in thread pools - If int, sets the maximum number of threads to `limits` for each library selected by `user_api`. - If it is a dictionary `{{key: max_threads}}`, this function sets a custom maximum number of threads for each `key` which can be either a `user_api` or a `prefix` for a specific library. - If 'sequential_blas_under_openmp', it will chose the appropriate `limits` and `user_api` parameters for the specific use case of sequential BLAS calls within an OpenMP parallel region. The `user_api` parameter is ignored. - If None, this function does not do anything. user_api : {USER_APIS} or None (default=None) APIs of libraries to limit. Used only if `limits` is an int. - If "blas", it will only limit BLAS supported libraries ({BLAS_LIBS}). - If "openmp", it will only limit OpenMP supported libraries ({OPENMP_LIBS}). Note that it can affect the number of threads used by the BLAS libraries if they rely on OpenMP. - If None, this function will apply to all supported libraries. """ def __init__(self, limits=None, user_api=None): super().__init__(ThreadpoolController(), limits=limits, user_api=user_api) @classmethod def wrap(cls, limits=None, user_api=None): return super().wrap(ThreadpoolController(), limits=limits, user_api=user_api) @_format_docstring( PREFIXES=", ".join(f'"{prefix}"' for prefix in _ALL_PREFIXES), USER_APIS=", ".join(f'"{api}"' for api in _ALL_USER_APIS), BLAS_LIBS=", ".join(_ALL_BLAS_LIBRARIES), OPENMP_LIBS=", ".join(_ALL_OPENMP_LIBRARIES), ) class ThreadpoolController: """Collection of LibController objects for all loaded supported libraries Attributes ---------- lib_controllers : list of `LibController` objects The list of library controllers of all loaded supported libraries. """ # Cache for libc under POSIX and a few system libraries under Windows. # We use a class level cache instead of an instance level cache because # it's very unlikely that a shared library will be unloaded and reloaded # during the lifetime of a program. _system_libraries = dict() def __init__(self): self.lib_controllers = [] self._load_libraries() self._warn_if_incompatible_openmp() @classmethod def _from_controllers(cls, lib_controllers): new_controller = cls.__new__(cls) new_controller.lib_controllers = lib_controllers return new_controller def info(self): """Return lib_controllers info as a list of dicts""" return [lib_controller.info() for lib_controller in self.lib_controllers] def select(self, **kwargs): """Return a ThreadpoolController containing a subset of its current library controllers It will select all libraries matching at least one pair (key, value) from kwargs where key is an entry of the library info dict (like "user_api", "internal_api", "prefix", ...) and value is the value or a list of acceptable values for that entry. For instance, `ThreadpoolController().select(internal_api=["blis", "openblas"])` will select all library controllers whose internal_api is either "blis" or "openblas". """ for key, vals in kwargs.items(): kwargs[key] = [vals] if not isinstance(vals, list) else vals lib_controllers = [ lib_controller for lib_controller in self.lib_controllers if any( getattr(lib_controller, key, None) in vals for key, vals in kwargs.items() ) ] return ThreadpoolController._from_controllers(lib_controllers) def _get_params_for_sequential_blas_under_openmp(self): """Return appropriate params to use for a sequential BLAS call in an OpenMP loop This function takes into account the unexpected behavior of OpenBLAS with the OpenMP threading layer. """ if self.select( internal_api="openblas", threading_layer="openmp" ).lib_controllers: return {"limits": None, "user_api": None} return {"limits": 1, "user_api": "blas"} @_format_docstring( USER_APIS=", ".join('"{}"'.format(api) for api in _ALL_USER_APIS), BLAS_LIBS=", ".join(_ALL_BLAS_LIBRARIES), OPENMP_LIBS=", ".join(_ALL_OPENMP_LIBRARIES), ) def limit(self, *, limits=None, user_api=None): """Change the maximal number of threads that can be used in thread pools. This function returns an object that can be used either as a callable (the construction of this object limits the number of threads) or as a context manager, in a `with` block to automatically restore the original state of the controlled libraries when exiting the block. Set the maximal number of threads that can be used in thread pools used in the supported libraries to `limits`. This function works for libraries that are already loaded in the interpreter and can be changed dynamically. This effect is global and impacts the whole Python process. There is no thread level isolation as these libraries do not offer thread-local APIs to configure the number of threads to use in nested parallel calls. Parameters ---------- limits : int, dict, 'sequential_blas_under_openmp' or None (default=None) The maximal number of threads that can be used in thread pools - If int, sets the maximum number of threads to `limits` for each library selected by `user_api`. - If it is a dictionary `{{key: max_threads}}`, this function sets a custom maximum number of threads for each `key` which can be either a `user_api` or a `prefix` for a specific library. - If 'sequential_blas_under_openmp', it will chose the appropriate `limits` and `user_api` parameters for the specific use case of sequential BLAS calls within an OpenMP parallel region. The `user_api` parameter is ignored. - If None, this function does not do anything. user_api : {USER_APIS} or None (default=None) APIs of libraries to limit. Used only if `limits` is an int. - If "blas", it will only limit BLAS supported libraries ({BLAS_LIBS}). - If "openmp", it will only limit OpenMP supported libraries ({OPENMP_LIBS}). Note that it can affect the number of threads used by the BLAS libraries if they rely on OpenMP. - If None, this function will apply to all supported libraries. """ return _ThreadpoolLimiter(self, limits=limits, user_api=user_api) @_format_docstring( USER_APIS=", ".join('"{}"'.format(api) for api in _ALL_USER_APIS), BLAS_LIBS=", ".join(_ALL_BLAS_LIBRARIES), OPENMP_LIBS=", ".join(_ALL_OPENMP_LIBRARIES), ) def wrap(self, *, limits=None, user_api=None): """Change the maximal number of threads that can be used in thread pools. This function returns an object that can be used as a decorator. Set the maximal number of threads that can be used in thread pools used in the supported libraries to `limits`. This function works for libraries that are already loaded in the interpreter and can be changed dynamically. Parameters ---------- limits : int, dict or None (default=None) The maximal number of threads that can be used in thread pools - If int, sets the maximum number of threads to `limits` for each library selected by `user_api`. - If it is a dictionary `{{key: max_threads}}`, this function sets a custom maximum number of threads for each `key` which can be either a `user_api` or a `prefix` for a specific library. - If None, this function does not do anything. user_api : {USER_APIS} or None (default=None) APIs of libraries to limit. Used only if `limits` is an int. - If "blas", it will only limit BLAS supported libraries ({BLAS_LIBS}). - If "openmp", it will only limit OpenMP supported libraries ({OPENMP_LIBS}). Note that it can affect the number of threads used by the BLAS libraries if they rely on OpenMP. - If None, this function will apply to all supported libraries. """ return _ThreadpoolLimiter.wrap(self, limits=limits, user_api=user_api) def __len__(self): return len(self.lib_controllers) def _load_libraries(self): """Loop through loaded shared libraries and store the supported ones""" if sys.platform == "darwin": self._find_libraries_with_dyld() elif sys.platform == "win32": self._find_libraries_with_enum_process_module_ex() else: self._find_libraries_with_dl_iterate_phdr() def _find_libraries_with_dl_iterate_phdr(self): """Loop through loaded libraries and return binders on supported ones This function is expected to work on POSIX system only. This code is adapted from code by Intel developer @anton-malakhov available at https://github.com/IntelPython/smp Copyright (c) 2017, Intel Corporation published under the BSD 3-Clause license """ libc = self._get_libc() if not hasattr(libc, "dl_iterate_phdr"): # pragma: no cover return [] # Callback function for `dl_iterate_phdr` which is called for every # library loaded in the current process until it returns 1. def match_library_callback(info, size, data): # Get the path of the current library filepath = info.contents.dlpi_name if filepath: filepath = filepath.decode("utf-8") # Store the library controller if it is supported and selected self._make_controller_from_path(filepath) return 0 c_func_signature = ctypes.CFUNCTYPE( ctypes.c_int, # Return type ctypes.POINTER(_dl_phdr_info), ctypes.c_size_t, ctypes.c_char_p, ) c_match_library_callback = c_func_signature(match_library_callback) data = ctypes.c_char_p(b"") libc.dl_iterate_phdr(c_match_library_callback, data) def _find_libraries_with_dyld(self): """Loop through loaded libraries and return binders on supported ones This function is expected to work on OSX system only """ libc = self._get_libc() if not hasattr(libc, "_dyld_image_count"): # pragma: no cover return [] n_dyld = libc._dyld_image_count() libc._dyld_get_image_name.restype = ctypes.c_char_p for i in range(n_dyld): filepath = ctypes.string_at(libc._dyld_get_image_name(i)) filepath = filepath.decode("utf-8") # Store the library controller if it is supported and selected self._make_controller_from_path(filepath) def _find_libraries_with_enum_process_module_ex(self): """Loop through loaded libraries and return binders on supported ones This function is expected to work on windows system only. This code is adapted from code by Philipp Hagemeister @phihag available at https://stackoverflow.com/questions/17474574 """ from ctypes.wintypes import DWORD, HMODULE, MAX_PATH PROCESS_QUERY_INFORMATION = 0x0400 PROCESS_VM_READ = 0x0010 LIST_LIBRARIES_ALL = 0x03 ps_api = self._get_windll("Psapi") kernel_32 = self._get_windll("kernel32") h_process = kernel_32.OpenProcess( PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, False, os.getpid() ) if not h_process: # pragma: no cover raise OSError(f"Could not open PID {os.getpid()}") try: buf_count = 256 needed = DWORD() # Grow the buffer until it becomes large enough to hold all the # module headers while True: buf = (HMODULE * buf_count)() buf_size = ctypes.sizeof(buf) if not ps_api.EnumProcessModulesEx( h_process, ctypes.byref(buf), buf_size, ctypes.byref(needed), LIST_LIBRARIES_ALL, ): raise OSError("EnumProcessModulesEx failed") if buf_size >= needed.value: break buf_count = needed.value // (buf_size // buf_count) count = needed.value // (buf_size // buf_count) h_modules = map(HMODULE, buf[:count]) # Loop through all the module headers and get the library path buf = ctypes.create_unicode_buffer(MAX_PATH) n_size = DWORD() for h_module in h_modules: # Get the path of the current module if not ps_api.GetModuleFileNameExW( h_process, h_module, ctypes.byref(buf), ctypes.byref(n_size) ): raise OSError("GetModuleFileNameEx failed") filepath = buf.value # Store the library controller if it is supported and selected self._make_controller_from_path(filepath) finally: kernel_32.CloseHandle(h_process) def _make_controller_from_path(self, filepath): """Store a library controller if it is supported and selected""" # Required to resolve symlinks filepath = _realpath(filepath) # `lower` required to take account of OpenMP dll case on Windows # (vcomp, VCOMP, Vcomp, ...) filename = os.path.basename(filepath).lower() # Loop through supported libraries to find if this filename corresponds # to a supported one. for controller_class, candidate_lib in _SUPPORTED_LIBRARIES.items(): # check if filename matches a supported prefix prefix = self._check_prefix(filename, candidate_lib["filename_prefixes"]) # filename does not match any of the prefixes of the candidate # library. move to next library. if prefix is None: continue # workaround for BLAS libraries packaged by conda-forge on windows, which # are all renamed "libblas.dll". We thus have to check to which BLAS # implementation it actually corresponds looking for implementation # specific symbols. if prefix == "libblas": if filename.endswith(".dll"): libblas = ctypes.CDLL(filepath, _RTLD_NOLOAD) if not any( hasattr(libblas, func) for func in candidate_lib["check_symbols"] ): continue else: # We ignore libblas on other platforms than windows because there # might be a libblas dso comming with openblas for instance that # can't be used to instantiate a pertinent LibController (many # symbols are missing) and would create confusion by making a # duplicate entry in threadpool_info. continue # filename matches a prefix. Create and store the library # controller. user_api = candidate_lib["user_api"] internal_api = candidate_lib["internal_api"] lib_controller_class = globals()[controller_class] lib_controller = lib_controller_class( filepath=filepath, prefix=prefix, user_api=user_api, internal_api=internal_api, ) self.lib_controllers.append(lib_controller) def _check_prefix(self, library_basename, filename_prefixes): """Return the prefix library_basename starts with Return None if none matches. """ for prefix in filename_prefixes: if library_basename.startswith(prefix): return prefix return None def _warn_if_incompatible_openmp(self): """Raise a warning if llvm-OpenMP and intel-OpenMP are both loaded""" if sys.platform != "linux": # Only raise the warning on linux return prefixes = [lib_controller.prefix for lib_controller in self.lib_controllers] msg = textwrap.dedent( """ Found Intel OpenMP ('libiomp') and LLVM OpenMP ('libomp') loaded at the same time. Both libraries are known to be incompatible and this can cause random crashes or deadlocks on Linux when loaded in the same Python program. Using threadpoolctl may cause crashes or deadlocks. For more information and possible workarounds, please see https://github.com/joblib/threadpoolctl/blob/master/multiple_openmp.md """ ) if "libomp" in prefixes and "libiomp" in prefixes: warnings.warn(msg, RuntimeWarning) @classmethod def _get_libc(cls): """Load the lib-C for unix systems.""" libc = cls._system_libraries.get("libc") if libc is None: libc_name = find_library("c") if libc_name is None: # pragma: no cover return None libc = ctypes.CDLL(libc_name, mode=_RTLD_NOLOAD) cls._system_libraries["libc"] = libc return libc @classmethod def _get_windll(cls, dll_name): """Load a windows DLL""" dll = cls._system_libraries.get(dll_name) if dll is None: dll = ctypes.WinDLL(f"{dll_name}.dll") cls._system_libraries[dll_name] = dll return dll @_format_docstring( USER_APIS=", ".join('"{}"'.format(api) for api in _ALL_USER_APIS), INTERNAL_APIS=", ".join('"{}"'.format(api) for api in _ALL_INTERNAL_APIS), ) class LibController(ABC): """Abstract base class for the individual library controllers A library controller is represented by the following information: - "user_api" : user API. Possible values are {USER_APIS}. - "internal_api" : internal API. Possible values are {INTERNAL_APIS}. - "prefix" : prefix of the shared library's name. - "filepath" : path to the loaded library. - "version" : version of the library (if available). - "num_threads" : the current thread limit. In addition, each library controller may contain internal_api specific entries. """ def __init__(self, *, filepath=None, prefix=None, user_api=None, internal_api=None): self.user_api = user_api self.internal_api = internal_api self.prefix = prefix self.filepath = filepath self._dynlib = ctypes.CDLL(filepath, mode=_RTLD_NOLOAD) self.version = self.get_version() def info(self): """Return relevant info wrapped in a dict""" all_attrs = dict(vars(self), **{"num_threads": self.num_threads}) return {k: v for k, v in all_attrs.items() if not k.startswith("_")} @property def num_threads(self): return self.get_num_threads() @abstractmethod def get_num_threads(self): """Return the maximum number of threads available to use""" pass # pragma: no cover @abstractmethod def set_num_threads(self, num_threads): """Set the maximum number of threads to use""" pass # pragma: no cover @abstractmethod def get_version(self): """Return the version of the shared library""" pass # pragma: no cover class OpenBLASController(LibController): """Controller class for OpenBLAS""" def __init__(self, **kwargs): super().__init__(**kwargs) self.threading_layer = self._get_threading_layer() self.architecture = self._get_architecture() def get_num_threads(self): get_func = getattr( self._dynlib, "openblas_get_num_threads", # Symbols differ when built for 64bit integers in Fortran getattr(self._dynlib, "openblas_get_num_threads64_", lambda: None), ) return get_func() def set_num_threads(self, num_threads): set_func = getattr( self._dynlib, "openblas_set_num_threads", # Symbols differ when built for 64bit integers in Fortran getattr( self._dynlib, "openblas_set_num_threads64_", lambda num_threads: None ), ) return set_func(num_threads) def get_version(self): # None means OpenBLAS is not loaded or version < 0.3.4, since OpenBLAS # did not expose its version before that. get_config = getattr( self._dynlib, "openblas_get_config", getattr(self._dynlib, "openblas_get_config64_", None), ) if get_config is None: return None get_config.restype = ctypes.c_char_p config = get_config().split() if config[0] == b"OpenBLAS": return config[1].decode("utf-8") return None def _get_threading_layer(self): """Return the threading layer of OpenBLAS""" openblas_get_parallel = getattr( self._dynlib, "openblas_get_parallel", getattr(self._dynlib, "openblas_get_parallel64_", None), ) if openblas_get_parallel is None: return "unknown" threading_layer = openblas_get_parallel() if threading_layer == 2: return "openmp" elif threading_layer == 1: return "pthreads" return "disabled" def _get_architecture(self): """Return the architecture detected by OpenBLAS""" get_corename = getattr( self._dynlib, "openblas_get_corename", getattr(self._dynlib, "openblas_get_corename64_", None), ) if get_corename is None: return None get_corename.restype = ctypes.c_char_p return get_corename().decode("utf-8") class BLISController(LibController): """Controller class for BLIS""" def __init__(self, **kwargs): super().__init__(**kwargs) self.threading_layer = self._get_threading_layer() self.architecture = self._get_architecture() def get_num_threads(self): get_func = getattr(self._dynlib, "bli_thread_get_num_threads", lambda: None) num_threads = get_func() # by default BLIS is single-threaded and get_num_threads # returns -1. We map it to 1 for consistency with other libraries. return 1 if num_threads == -1 else num_threads def set_num_threads(self, num_threads): set_func = getattr( self._dynlib, "bli_thread_set_num_threads", lambda num_threads: None ) return set_func(num_threads) def get_version(self): get_version_ = getattr(self._dynlib, "bli_info_get_version_str", None) if get_version_ is None: return None get_version_.restype = ctypes.c_char_p return get_version_().decode("utf-8") def _get_threading_layer(self): """Return the threading layer of BLIS""" if self._dynlib.bli_info_get_enable_openmp(): return "openmp" elif self._dynlib.bli_info_get_enable_pthreads(): return "pthreads" return "disabled" def _get_architecture(self): """Return the architecture detected by BLIS""" bli_arch_query_id = getattr(self._dynlib, "bli_arch_query_id", None) bli_arch_string = getattr(self._dynlib, "bli_arch_string", None) if bli_arch_query_id is None or bli_arch_string is None: return None # the true restype should be BLIS' arch_t (enum) but int should work # for us: bli_arch_query_id.restype = ctypes.c_int bli_arch_string.restype = ctypes.c_char_p return bli_arch_string(bli_arch_query_id()).decode("utf-8") class MKLController(LibController): """Controller class for MKL""" def __init__(self, **kwargs): super().__init__(**kwargs) self.threading_layer = self._get_threading_layer() def get_num_threads(self): get_func = getattr(self._dynlib, "MKL_Get_Max_Threads", lambda: None) return get_func() def set_num_threads(self, num_threads): set_func = getattr( self._dynlib, "MKL_Set_Num_Threads", lambda num_threads: None ) return set_func(num_threads) def get_version(self): if not hasattr(self._dynlib, "MKL_Get_Version_String"): return None res = ctypes.create_string_buffer(200) self._dynlib.MKL_Get_Version_String(res, 200) version = res.value.decode("utf-8") group = re.search(r"Version ([^ ]+) ", version) if group is not None: version = group.groups()[0] return version.strip() def _get_threading_layer(self): """Return the threading layer of MKL""" # The function mkl_set_threading_layer returns the current threading # layer. Calling it with an invalid threading layer allows us to safely # get the threading layer set_threading_layer = getattr( self._dynlib, "MKL_Set_Threading_Layer", lambda layer: -1 ) layer_map = { 0: "intel", 1: "sequential", 2: "pgi", 3: "gnu", 4: "tbb", -1: "not specified", } return layer_map[set_threading_layer(-1)] class OpenMPController(LibController): """Controller class for OpenMP""" def get_num_threads(self): get_func = getattr(self._dynlib, "omp_get_max_threads", lambda: None) return get_func() def set_num_threads(self, num_threads): set_func = getattr( self._dynlib, "omp_set_num_threads", lambda num_threads: None ) return set_func(num_threads) def get_version(self): # There is no way to get the version number programmatically in OpenMP. return None def _main(): """Commandline interface to display thread-pool information and exit.""" import argparse import importlib import json import sys parser = argparse.ArgumentParser( usage="python -m threadpoolctl -i numpy scipy.linalg xgboost", description="Display thread-pool information and exit.", ) parser.add_argument( "-i", "--import", dest="modules", nargs="*", default=(), help="Python modules to import before introspecting thread-pools.", ) parser.add_argument( "-c", "--command", help="a Python statement to execute before introspecting thread-pools.", ) options = parser.parse_args(sys.argv[1:]) for module in options.modules: try: importlib.import_module(module, package=None) except ImportError: print("WARNING: could not import", module, file=sys.stderr) if options.command: exec(options.command) print(json.dumps(threadpool_info(), indent=2)) if __name__ == "__main__": _main()