pax_global_header00006660000000000000000000000064145006730000014505gustar00rootroot0000000000000052 comment=e1523e2450c89ec4f7fd6a39ed10c67ebd303ad2 array-api-compat-1.4/000077500000000000000000000000001450067300000145175ustar00rootroot00000000000000array-api-compat-1.4/.github/000077500000000000000000000000001450067300000160575ustar00rootroot00000000000000array-api-compat-1.4/.github/dependabot.yml000066400000000000000000000004011450067300000207020ustar00rootroot00000000000000version: 2 updates: # Maintain dependencies for GitHub Actions - package-ecosystem: "github-actions" directory: "/" schedule: interval: "weekly" labels: - "github-actions" - "dependencies" reviewers: - "asmeurer" array-api-compat-1.4/.github/workflows/000077500000000000000000000000001450067300000201145ustar00rootroot00000000000000array-api-compat-1.4/.github/workflows/array-api-tests-numpy-1-21.yml000066400000000000000000000003741450067300000254140ustar00rootroot00000000000000name: Array API Tests (NumPy 1.21) on: [push, pull_request] jobs: array-api-tests-numpy-1-21: uses: ./.github/workflows/array-api-tests.yml with: package-name: numpy package-version: '== 1.21.*' xfails-file-extra: '-1-21' array-api-compat-1.4/.github/workflows/array-api-tests-numpy.yml000066400000000000000000000002741450067300000250350ustar00rootroot00000000000000name: Array API Tests (NumPy Latest) on: [push, pull_request] jobs: array-api-tests-numpy-latest: uses: ./.github/workflows/array-api-tests.yml with: package-name: numpy array-api-compat-1.4/.github/workflows/array-api-tests-torch.yml000066400000000000000000000005261450067300000250040ustar00rootroot00000000000000name: Array API Tests (PyTorch Latest) on: [push, pull_request] jobs: array-api-tests-torch: uses: ./.github/workflows/array-api-tests.yml with: package-name: torch # Proper linalg testing will require # https://github.com/data-apis/array-api-tests/pull/101 pytest-extra-args: "--disable-extension linalg" array-api-compat-1.4/.github/workflows/array-api-tests.yml000066400000000000000000000051211450067300000236630ustar00rootroot00000000000000name: Array API Tests on: workflow_call: inputs: package-name: required: true type: string package-version: required: false type: string default: '>= 0' pytest-extra-args: required: false type: string # This is not how I would prefer to implement this but it's the only way # that seems possible with GitHub Actions' limited expressions syntax xfails-file-extra: required: false type: string skips-file-extra: required: false type: string env: PYTEST_ARGS: "--max-examples 200 -v -rxXfE --ci ${{ inputs.pytest-extra-args }}" jobs: tests: runs-on: ubuntu-latest strategy: matrix: python-version: ['3.8', '3.9', '3.10', '3.11'] steps: - name: Checkout array-api-compat uses: actions/checkout@v3 with: path: array-api-compat - name: Checkout array-api-tests uses: actions/checkout@v3 with: repository: data-apis/array-api-tests submodules: 'true' path: array-api-tests - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v1 with: python-version: ${{ matrix.python-version }} - name: Install dependencies # NumPy 1.21 doesn't support Python 3.11. There doesn't seem to be a way # to put this in the numpy 1.21 config file. if: "! (matrix.python-version == '3.11' && inputs.package-name == 'numpy' && contains(inputs.package-version, '1.21'))" run: | python -m pip install --upgrade pip python -m pip install '${{ inputs.package-name }} ${{ inputs.package-version }}' python -m pip install -r ${GITHUB_WORKSPACE}/array-api-tests/requirements.txt - name: Run the array API testsuite (${{ inputs.package-name }}) if: "! (matrix.python-version == '3.11' && inputs.package-name == 'numpy' && contains(inputs.package-version, '1.21'))" env: ARRAY_API_TESTS_MODULE: array_api_compat.${{ inputs.package-name }} # This enables the NEP 50 type promotion behavior (without it a lot of # tests fail on bad scalar type promotion behavior) NPY_PROMOTION_STATE: weak run: | export PYTHONPATH="${GITHUB_WORKSPACE}/array-api-compat" cd ${GITHUB_WORKSPACE}/array-api-tests pytest array_api_tests/ --xfails-file ${GITHUB_WORKSPACE}/array-api-compat/${{ inputs.package-name }}${{ inputs.xfails-file-extra }}-xfails.txt --skips-file ${GITHUB_WORKSPACE}/array-api-compat/${{ inputs.package-name }}${{ inputs.skips-file-extra}}-skips.txt ${PYTEST_ARGS} array-api-compat-1.4/.github/workflows/publish-package.yml000066400000000000000000000060071450067300000237010ustar00rootroot00000000000000name: publish distributions on: push: branches: - main tags: - '[0-9]+.[0-9]+' - '[0-9]+.[0-9]+.[0-9]+' pull_request: branches: - main release: types: [published] workflow_dispatch: inputs: publish: type: choice description: 'Publish to TestPyPI?' options: - false - true concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: build: name: Build Python distribution runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 with: fetch-depth: 0 - name: Set up Python uses: actions/setup-python@v4 with: python-version: '3.x' - name: Install python-build and twine run: | python -m pip install --upgrade pip setuptools python -m pip install build twine python -m pip list - name: Build a wheel and a sdist run: | PYTHONWARNINGS=error,default::DeprecationWarning python -m build . - name: Verify the distribution run: twine check --strict dist/* - name: List contents of sdist run: python -m tarfile --list dist/array_api_compat-*.tar.gz - name: List contents of wheel run: python -m zipfile --list dist/array_api_compat-*.whl - name: Upload distribution artifact uses: actions/upload-artifact@v3 with: name: dist-artifact path: dist publish: name: Publish Python distribution to (Test)PyPI if: github.event_name != 'pull_request' && github.repository == 'data-apis/array-api-compat' needs: build runs-on: ubuntu-latest # Mandatory for publishing with a trusted publisher # c.f. https://docs.pypi.org/trusted-publishers/using-a-publisher/ permissions: id-token: write contents: write # Restrict to the environment set for the trusted publisher environment: name: publish-package steps: - name: Download distribution artifact uses: actions/download-artifact@v3 with: name: dist-artifact path: dist - name: List all files run: ls -lh dist # - name: Publish distribution 📦 to Test PyPI # # Publish to TestPyPI on tag events of if manually triggered # # Compare to 'true' string as booleans get turned into strings in the console # if: >- # (github.event_name == 'push' && startsWith(github.ref, 'refs/tags')) # || (github.event_name == 'workflow_dispatch' && github.event.inputs.publish == 'true') # uses: pypa/gh-action-pypi-publish@v1.8.10 # with: # repository-url: https://test.pypi.org/legacy/ # print-hash: true - name: Create GitHub Release from a Tag uses: softprops/action-gh-release@v1 if: startsWith(github.ref, 'refs/tags/') with: files: dist/* - name: Publish distribution 📦 to PyPI if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') uses: pypa/gh-action-pypi-publish@v1.8.10 with: print-hash: true array-api-compat-1.4/.github/workflows/tests.yml000066400000000000000000000011411450067300000217760ustar00rootroot00000000000000name: Tests on: [push, pull_request] jobs: tests: runs-on: ubuntu-latest strategy: matrix: python-version: ['3.8', '3.9', '3.10', '3.11'] fail-fast: true steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - name: Install Dependencies run: | python -m pip install --upgrade pip python -m pip install pytest numpy torch - name: Run Tests run: | pytest # Make sure it installs python setup.py install array-api-compat-1.4/.gitignore000066400000000000000000000034071450067300000165130ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ pip-wheel-metadata/ share/python-wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .nox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover *.py,cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 db.sqlite3-journal # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # IPython profile_default/ ipython_config.py # pyenv .python-version # pipenv # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # However, in case of collaboration, if having platform-specific dependencies or dependencies # having no cross-platform support, pipenv may install dependencies that don't work, or not # install all needed dependencies. #Pipfile.lock # PEP 582; used by e.g. github.com/David-OConnor/pyflow __pypackages__/ # Celery stuff celerybeat-schedule celerybeat.pid # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ .dmypy.json dmypy.json # Pyre type checker .pyre/ array-api-compat-1.4/CHANGELOG.md000066400000000000000000000051701450067300000163330ustar00rootroot00000000000000# 1.4 (2023-09-13) ## Major Changes - Releases are now made with GitHub Actions (thanks [@matthewfeickert](https://github.com/matthewfeickert)). ## Minor Changes - Fix `torch.result_type()` cross-kind promotion ([@lucascolley](https://github.com/lucascolley)). - Fix the torch.take() wrapper to make axis optional for ndim = 1. - Add requires-python metadata to the package ([@matthewfeickert](https://github.com/matthewfeickert)). # 1.3 (2023-06-20) ## Major Changes - Add [2022.12](https://data-apis.org/array-api/2022.12/) standard support. This includes things like adding complex dtype support, adding the new `take` function, and various minor changes in the specification. ## Minor Changes - Support `"cpu"` in CuPy `to_device()`. - Return a new array in NumPy/CuPy `reshape(copy=False)`. - Fix signatures for PyTorch `broadcast_to` and `permute_dims`. # 1.2 (2023-04-03) ## Major Changes - Support the linalg extension in the `array_api_compat.torch` namespace. - Add `isdtype()`. ## Minor Changes - Fix the `k` keyword argument to `tril` and `triu` in `torch`. # 1.1.1 (2023-03-10) ## Major Changes - Rename `get_namespace()` to `array_namespace()` (`get_namespace()` is maintained as a backwards compatible alias). ## Minor Changes - The minimum supported NumPy version is now 1.21. Fixed a few issues with NumPy 1.21 (with `unique_*` and `asarray`), although there are also a few known issues with this version (see the README). - Add `api_version` to `get_namespace()`. - `array_namespace()` (*née* `get_namespace()`) now works correctly with `torch` tensors. - `array_namespace()` (*née* `get_namespace()`) now works correctly with `numpy.array_api` arrays. - `array_namespace()` (*née* `get_namespace()`) now raises `TypeError` instead of `ValueError`. - Fix the `torch.std` wrapper. - Add `torch` wrappers for `ones`, `empty`, and `zeros` so that `shape` can be passed as a keyword argument. # 1.1 (2023-02-24) ## Major Changes - Added support for PyTorch. - Add helper function `size()` (required if torch is used as `torch.Tensor.size` is a method that is incompatible with the array API [`.size`](https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.size.html#array_api.array.size)). - All wrapper functions that wrap existing library functions now pass through arbitrary `**kwargs`. ## Minor Changes - Added CI to run against the [array API testsuite](https://github.com/data-apis/array-api-tests). - Fix `sort(stable=False)` and `argsort(stable=False)` with CuPy. # 1.0 (2022-12-05) ## Major Changes - Initial release. Includes support for NumPy and CuPy. array-api-compat-1.4/LICENSE000066400000000000000000000021111450067300000155170ustar00rootroot00000000000000MIT License Copyright (c) 2022 Consortium for Python Data API Standards Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. array-api-compat-1.4/README.md000066400000000000000000000302721450067300000160020ustar00rootroot00000000000000# Array API compatibility library This is a small wrapper around common array libraries that is compatible with the [Array API standard](https://data-apis.org/array-api/latest/). Currently, NumPy, CuPy, and PyTorch are supported. If you want support for other array libraries, or if you encounter any issues, please [open an issue](https://github.com/data-apis/array-api-compat/issues). Note that some of the functionality in this library is backwards incompatible with the corresponding wrapped libraries. The end-goal is to eventually make each array library itself fully compatible with the array API, but this requires making backwards incompatible changes in many cases, so this will take some time. Currently all libraries here are implemented against the [2022.12 version](https://data-apis.org/array-api/2022.12/) of the standard. ## Install `array-api-compat` is available on both [PyPI](https://pypi.org/project/array-api-compat/) ``` python -m pip install array-api-compat ``` and [Conda-forge](https://anaconda.org/conda-forge/array-api-compat) ``` conda install --channel conda-forge array-api-compat ``` ## Usage The typical usage of this library will be to get the corresponding array API compliant namespace from the input arrays using `array_namespace()`, like ```py def your_function(x, y): xp = array_api_compat.array_namespace(x, y) # Now use xp as the array library namespace return xp.mean(x, axis=0) + 2*xp.std(y, axis=0) ``` If you wish to have library-specific code-paths, you can import the corresponding wrapped namespace for each library, like ```py import array_api_compat.numpy as np ``` ```py import array_api_compat.cupy as cp ``` ```py import array_api_compat.torch as torch ``` Each will include all the functions from the normal NumPy/CuPy/PyTorch namespace, except that functions that are part of the array API are wrapped so that they have the correct array API behavior. In each case, the array object used will be the same array object from the wrapped library. ## Difference between `array_api_compat` and `numpy.array_api` `numpy.array_api` is a strict minimal implementation of the Array API (see [NEP 47](https://numpy.org/neps/nep-0047-array-api-standard.html)). For example, `numpy.array_api` does not include any functions that are not part of the array API specification, and will explicitly disallow behaviors that are not required by the spec (e.g., [cross-kind type promotions](https://data-apis.org/array-api/latest/API_specification/type_promotion.html)). (`cupy.array_api` is similar to `numpy.array_api`) `array_api_compat`, on the other hand, is just an extension of the corresponding array library namespaces with changes needed to be compliant with the array API. It includes all additional library functions not mentioned in the spec, and allows any library behaviors not explicitly disallowed by it, such as cross-kind casting. In particular, unlike `numpy.array_api`, this package does not use a separate `Array` object, but rather just uses the corresponding array library array objects (`numpy.ndarray`, `cupy.ndarray`, `torch.Tensor`, etc.) directly. This is because those are the objects that are going to be passed as inputs to functions by end users. This does mean that a few behaviors cannot be wrapped (see below), but most of the array API functional, so this does not affect most things. Array consuming library authors coding against the array API may wish to test against `numpy.array_api` to ensure they are not using functionality outside of the standard, but prefer this implementation for the default behavior for end-users. ## Helper Functions In addition to the wrapped library namespaces and functions in the array API specification, there are several helper functions included here that aren't part of the specification but which are useful for using the array API: - `is_array_api_obj(x)`: Return `True` if `x` is an array API compatible array object. - `array_namespace(*xs)`: Get the corresponding array API namespace for the arrays `xs`. For example, if the arrays are NumPy arrays, the returned namespace will be `array_api_compat.numpy`. Note that this function will also work for namespaces that aren't supported by this compat library but which do support the array API (i.e., arrays that have the `__array_namespace__` attribute). - `device(x)`: Equivalent to [`x.device`](https://data-apis.org/array-api/latest/API_specification/generated/signatures.array_object.array.device.html) in the array API specification. Included because `numpy.ndarray` does not include the `device` attribute and this library does not wrap or extend the array object. Note that for NumPy, `device(x)` is always `"cpu"`. - `to_device(x, device, /, *, stream=None)`: Equivalent to [`x.to_device`](https://data-apis.org/array-api/latest/API_specification/generated/signatures.array_object.array.to_device.html). Included because neither NumPy's, CuPy's, nor PyTorch's array objects include this method. For NumPy, this function effectively does nothing since the only supported device is the CPU, but for CuPy, this method supports CuPy CUDA [Device](https://docs.cupy.dev/en/stable/reference/generated/cupy.cuda.Device.html) and [Stream](https://docs.cupy.dev/en/stable/reference/generated/cupy.cuda.Stream.html) objects. For PyTorch, this is the same as [`x.to(device)`](https://pytorch.org/docs/stable/generated/torch.Tensor.to.html) (the `stream` argument is not supported in PyTorch). - `size(x)`: Equivalent to [`x.size`](https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.size.html#array_api.array.size), i.e., the number of elements in the array. Included because PyTorch's `Tensor` defines `size` as a method which returns the shape, and this cannot be wrapped because this compat library doesn't wrap or extend the array objects. ## Known Differences from the Array API Specification There are some known differences between this library and the array API specification: ### NumPy and CuPy - The array methods `__array_namespace__`, `device` (for NumPy), `to_device`, and `mT` are not defined. This reuses `np.ndarray` and `cp.ndarray` and we don't want to monkeypatch or wrap it. The helper functions `device()` and `to_device()` are provided to work around these missing methods (see above). `x.mT` can be replaced with `xp.linalg.matrix_transpose(x)`. `array_namespace(x)` should be used instead of `x.__array_namespace__`. - Value-based casting for scalars will be in effect unless explicitly disabled with the environment variable `NPY_PROMOTION_STATE=weak` or `np._set_promotion_state('weak')` (requires NumPy 1.24 or newer, see [NEP 50](https://numpy.org/neps/nep-0050-scalar-promotion.html) and https://github.com/numpy/numpy/issues/22341) - `asarray()` does not support `copy=False`. - Functions which are not wrapped may not have the same type annotations as the spec. - Functions which are not wrapped may not use positional-only arguments. The minimum supported NumPy version is 1.21. However, this older version of NumPy has a few issues: - `unique_*` will not compare nans as unequal. - `finfo()` has no `smallest_normal`. - No `from_dlpack` or `__dlpack__`. - `argmax()` and `argmin()` do not have `keepdims`. - `qr()` doesn't support matrix stacks. - `asarray()` doesn't support `copy=True` (as noted above, `copy=False` is not supported even in the latest NumPy). - Type promotion behavior will be value based for 0-D arrays (and there is no `NPY_PROMOTION_STATE=weak` to disable this). If any of these are an issue, it is recommended to bump your minimum NumPy version. ### PyTorch - Like NumPy/CuPy, we do not wrap the `torch.Tensor` object. It is missing the `__array_namespace__` and `to_device` methods, so the corresponding helper functions `array_namespace()` and `to_device()` in this library should be used instead (see above). - The `x.size` attribute on `torch.Tensor` is a function that behaves differently from [`x.size`](https://data-apis.org/array-api/draft/API_specification/generated/array_api.array.size.html) in the spec. Use the `size(x)` helper function as a portable workaround (see above). - PyTorch does not have unsigned integer types other than `uint8`, and no attempt is made to implement them here. - PyTorch has type promotion semantics that differ from the array API specification for 0-D tensor objects. The array functions in this wrapper library do work around this, but the operators on the Tensor object do not, as no operators or methods on the Tensor object are modified. If this is a concern, use the functional form instead of the operator form, e.g., `add(x, y)` instead of `x + y`. - [`unique_all()`](https://data-apis.org/array-api/latest/API_specification/generated/array_api.unique_all.html#array_api.unique_all) is not implemented, due to the fact that `torch.unique` does not support returning the `indices` array. The other [`unique_*`](https://data-apis.org/array-api/latest/API_specification/set_functions.html) functions are implemented. - Slices do not support negative steps. - [`std()`](https://data-apis.org/array-api/latest/API_specification/generated/array_api.std.html#array_api.std) and [`var()`](https://data-apis.org/array-api/latest/API_specification/generated/array_api.var.html#array_api.var) do not support floating-point `correction`. - The `stream` argument of the `to_device()` helper (see above) is not supported. - As with NumPy, type annotations and positional-only arguments may not exactly match the spec for functions that are not wrapped at all. The minimum supported PyTorch version is 1.13. ## Vendoring This library supports vendoring as an installation method. To vendor the library, simply copy `array_api_compat` into the appropriate place in the library, like ``` cp -R array_api_compat/ mylib/vendored/array_api_compat ``` You may also rename it to something else if you like (nowhere in the code references the name "array_api_compat"). Alternatively, the library may be installed as dependency on PyPI. ## Implementation Notes As noted before, the goal of this library is to reuse the NumPy and CuPy array objects, rather than wrapping or extending them. This means that the functions need to accept and return `np.ndarray` for NumPy and `cp.ndarray` for CuPy. Each namespace (`array_api_compat.numpy`, `array_api_compat.cupy`, and `array_api_compat.torch`) is populated with the normal library namespace (like `from numpy import *`). Then specific functions are replaced with wrapped variants. Since NumPy and CuPy are nearly identical in behavior, most wrapping logic can be shared between them. Wrapped functions that have the same logic between NumPy and CuPy are in `array_api_compat/common/`. These functions are defined like ```py # In array_api_compat/common/_aliases.py def acos(x, /, xp): return xp.arccos(x) ``` The `xp` argument refers to the original array namespace (either `numpy` or `cupy`). Then in the specific `array_api_compat/numpy/` and `array_api_compat/cupy/` namespaces, the `@get_xp` decorator is applied to these functions, which automatically removes the `xp` argument from the function signature and replaces it with the corresponding array library, like ```py # In array_api_compat/numpy/_aliases.py from ..common import _aliases import numpy as np acos = get_xp(np)(_aliases.acos) ``` This `acos` now has the signature `acos(x, /)` and calls `numpy.arccos`. Similarly, for CuPy: ```py # In array_api_compat/cupy/_aliases.py from ..common import _aliases import cupy as cp acos = get_xp(cp)(_aliases.acos) ``` Since NumPy and CuPy are nearly identical in their behaviors, this allows writing the wrapping logic for both libraries only once. PyTorch uses a similar layout in `array_api_compat/torch/`, but it differs enough from NumPy/CuPy that very few common wrappers for those libraries are reused. See https://numpy.org/doc/stable/reference/array_api.html for a full list of changes from the base NumPy (the differences for CuPy are nearly identical). A corresponding document does not yet exist for PyTorch, but you can examine the various comments in the [implementation](https://github.com/data-apis/array-api-compat/blob/main/array_api_compat/torch/_aliases.py) to see what functions and behaviors have been wrapped. array-api-compat-1.4/array_api_compat/000077500000000000000000000000001450067300000200315ustar00rootroot00000000000000array-api-compat-1.4/array_api_compat/__init__.py000066400000000000000000000016601450067300000221450ustar00rootroot00000000000000""" NumPy Array API compatibility library This is a small wrapper around NumPy and CuPy that is compatible with the Array API standard https://data-apis.org/array-api/latest/. See also NEP 47 https://numpy.org/neps/nep-0047-array-api-standard.html. Unlike numpy.array_api, this is not a strict minimal implementation of the Array API, but rather just an extension of the main NumPy namespace with changes needed to be compliant with the Array API. See https://numpy.org/doc/stable/reference/array_api.html for a full list of changes. In particular, unlike numpy.array_api, this package does not use a separate Array object, but rather just uses numpy.ndarray directly. Library authors using the Array API may wish to test against numpy.array_api to ensure they are not using functionality outside of the standard, but prefer this implementation for the default when working with NumPy arrays. """ __version__ = '1.4' from .common import * array-api-compat-1.4/array_api_compat/_internal.py000066400000000000000000000017331450067300000223620ustar00rootroot00000000000000""" Internal helpers """ from functools import wraps from inspect import signature def get_xp(xp): """ Decorator to automatically replace xp with the corresponding array module. Use like import numpy as np @get_xp(np) def func(x, /, xp, kwarg=None): return xp.func(x, kwarg=kwarg) Note that xp must be a keyword argument and come after all non-keyword arguments. """ def inner(f): @wraps(f) def wrapped_f(*args, **kwargs): return f(*args, xp=xp, **kwargs) sig = signature(f) new_sig = sig.replace(parameters=[sig.parameters[i] for i in sig.parameters if i != 'xp']) if wrapped_f.__doc__ is None: wrapped_f.__doc__ = f"""\ Array API compatibility wrapper for {f.__name__}. See the corresponding documentation in NumPy/CuPy and/or the array API specification for more details. """ wrapped_f.__signature__ = new_sig return wrapped_f return inner array-api-compat-1.4/array_api_compat/common/000077500000000000000000000000001450067300000213215ustar00rootroot00000000000000array-api-compat-1.4/array_api_compat/common/__init__.py000066400000000000000000000000301450067300000234230ustar00rootroot00000000000000from ._helpers import * array-api-compat-1.4/array_api_compat/common/_aliases.py000066400000000000000000000372011450067300000234560ustar00rootroot00000000000000""" These are functions that are just aliases of existing functions in NumPy. """ from __future__ import annotations from typing import TYPE_CHECKING if TYPE_CHECKING: from typing import Optional, Sequence, Tuple, Union, List from ._typing import ndarray, Device, Dtype, NestedSequence, SupportsBufferProtocol from typing import NamedTuple from types import ModuleType import inspect from ._helpers import _check_device, _is_numpy_array, array_namespace # These functions are modified from the NumPy versions. def arange( start: Union[int, float], /, stop: Optional[Union[int, float]] = None, step: Union[int, float] = 1, *, xp, dtype: Optional[Dtype] = None, device: Optional[Device] = None, **kwargs ) -> ndarray: _check_device(xp, device) return xp.arange(start, stop=stop, step=step, dtype=dtype, **kwargs) def empty( shape: Union[int, Tuple[int, ...]], xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, **kwargs ) -> ndarray: _check_device(xp, device) return xp.empty(shape, dtype=dtype, **kwargs) def empty_like( x: ndarray, /, xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, **kwargs ) -> ndarray: _check_device(xp, device) return xp.empty_like(x, dtype=dtype, **kwargs) def eye( n_rows: int, n_cols: Optional[int] = None, /, *, xp, k: int = 0, dtype: Optional[Dtype] = None, device: Optional[Device] = None, **kwargs, ) -> ndarray: _check_device(xp, device) return xp.eye(n_rows, M=n_cols, k=k, dtype=dtype, **kwargs) def full( shape: Union[int, Tuple[int, ...]], fill_value: Union[int, float], xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, **kwargs, ) -> ndarray: _check_device(xp, device) return xp.full(shape, fill_value, dtype=dtype, **kwargs) def full_like( x: ndarray, /, fill_value: Union[int, float], *, xp, dtype: Optional[Dtype] = None, device: Optional[Device] = None, **kwargs, ) -> ndarray: _check_device(xp, device) return xp.full_like(x, fill_value, dtype=dtype, **kwargs) def linspace( start: Union[int, float], stop: Union[int, float], /, num: int, *, xp, dtype: Optional[Dtype] = None, device: Optional[Device] = None, endpoint: bool = True, **kwargs, ) -> ndarray: _check_device(xp, device) return xp.linspace(start, stop, num, dtype=dtype, endpoint=endpoint, **kwargs) def ones( shape: Union[int, Tuple[int, ...]], xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, **kwargs, ) -> ndarray: _check_device(xp, device) return xp.ones(shape, dtype=dtype, **kwargs) def ones_like( x: ndarray, /, xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, **kwargs, ) -> ndarray: _check_device(xp, device) return xp.ones_like(x, dtype=dtype, **kwargs) def zeros( shape: Union[int, Tuple[int, ...]], xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, **kwargs, ) -> ndarray: _check_device(xp, device) return xp.zeros(shape, dtype=dtype, **kwargs) def zeros_like( x: ndarray, /, xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, **kwargs, ) -> ndarray: _check_device(xp, device) return xp.zeros_like(x, dtype=dtype, **kwargs) # np.unique() is split into four functions in the array API: # unique_all, unique_counts, unique_inverse, and unique_values (this is done # to remove polymorphic return types). # The functions here return namedtuples (np.unique() returns a normal # tuple). class UniqueAllResult(NamedTuple): values: ndarray indices: ndarray inverse_indices: ndarray counts: ndarray class UniqueCountsResult(NamedTuple): values: ndarray counts: ndarray class UniqueInverseResult(NamedTuple): values: ndarray inverse_indices: ndarray def _unique_kwargs(xp): # Older versions of NumPy and CuPy do not have equal_nan. Rather than # trying to parse version numbers, just check if equal_nan is in the # signature. s = inspect.signature(xp.unique) if 'equal_nan' in s.parameters: return {'equal_nan': False} return {} def unique_all(x: ndarray, /, xp) -> UniqueAllResult: kwargs = _unique_kwargs(xp) values, indices, inverse_indices, counts = xp.unique( x, return_counts=True, return_index=True, return_inverse=True, **kwargs, ) # np.unique() flattens inverse indices, but they need to share x's shape # See https://github.com/numpy/numpy/issues/20638 inverse_indices = inverse_indices.reshape(x.shape) return UniqueAllResult( values, indices, inverse_indices, counts, ) def unique_counts(x: ndarray, /, xp) -> UniqueCountsResult: kwargs = _unique_kwargs(xp) res = xp.unique( x, return_counts=True, return_index=False, return_inverse=False, **kwargs ) return UniqueCountsResult(*res) def unique_inverse(x: ndarray, /, xp) -> UniqueInverseResult: kwargs = _unique_kwargs(xp) values, inverse_indices = xp.unique( x, return_counts=False, return_index=False, return_inverse=True, **kwargs, ) # xp.unique() flattens inverse indices, but they need to share x's shape # See https://github.com/numpy/numpy/issues/20638 inverse_indices = inverse_indices.reshape(x.shape) return UniqueInverseResult(values, inverse_indices) def unique_values(x: ndarray, /, xp) -> ndarray: kwargs = _unique_kwargs(xp) return xp.unique( x, return_counts=False, return_index=False, return_inverse=False, **kwargs, ) def astype(x: ndarray, dtype: Dtype, /, *, copy: bool = True) -> ndarray: if not copy and dtype == x.dtype: return x return x.astype(dtype=dtype, copy=copy) # These functions have different keyword argument names def std( x: ndarray, /, xp, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, correction: Union[int, float] = 0.0, # correction instead of ddof keepdims: bool = False, **kwargs, ) -> ndarray: return xp.std(x, axis=axis, ddof=correction, keepdims=keepdims, **kwargs) def var( x: ndarray, /, xp, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, correction: Union[int, float] = 0.0, # correction instead of ddof keepdims: bool = False, **kwargs, ) -> ndarray: return xp.var(x, axis=axis, ddof=correction, keepdims=keepdims, **kwargs) # Unlike transpose(), the axes argument to permute_dims() is required. def permute_dims(x: ndarray, /, axes: Tuple[int, ...], xp) -> ndarray: return xp.transpose(x, axes) # Creation functions add the device keyword (which does nothing for NumPy) # asarray also adds the copy keyword def _asarray( obj: Union[ ndarray, bool, int, float, NestedSequence[bool | int | float], SupportsBufferProtocol, ], /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, copy: "Optional[Union[bool, np._CopyMode]]" = None, namespace = None, **kwargs, ) -> ndarray: """ Array API compatibility wrapper for asarray(). See the corresponding documentation in NumPy/CuPy and/or the array API specification for more details. """ if namespace is None: try: xp = array_namespace(obj, _use_compat=False) except ValueError: # TODO: What about lists of arrays? raise ValueError("A namespace must be specified for asarray() with non-array input") elif isinstance(namespace, ModuleType): xp = namespace elif namespace == 'numpy': import numpy as xp elif namespace == 'cupy': import cupy as xp else: raise ValueError("Unrecognized namespace argument to asarray()") _check_device(xp, device) if _is_numpy_array(obj): import numpy as np if hasattr(np, '_CopyMode'): # Not present in older NumPys COPY_FALSE = (False, np._CopyMode.IF_NEEDED) COPY_TRUE = (True, np._CopyMode.ALWAYS) else: COPY_FALSE = (False,) COPY_TRUE = (True,) else: COPY_FALSE = (False,) COPY_TRUE = (True,) if copy in COPY_FALSE: # copy=False is not yet implemented in xp.asarray raise NotImplementedError("copy=False is not yet implemented") if isinstance(obj, xp.ndarray): if dtype is not None and obj.dtype != dtype: copy = True if copy in COPY_TRUE: return xp.array(obj, copy=True, dtype=dtype) return obj return xp.asarray(obj, dtype=dtype, **kwargs) # np.reshape calls the keyword argument 'newshape' instead of 'shape' def reshape(x: ndarray, /, shape: Tuple[int, ...], xp, copy: Optional[bool] = None, **kwargs) -> ndarray: if copy is True: x = x.copy() elif copy is False: y = x.view() y.shape = shape return y return xp.reshape(x, shape, **kwargs) # The descending keyword is new in sort and argsort, and 'kind' replaced with # 'stable' def argsort( x: ndarray, /, xp, *, axis: int = -1, descending: bool = False, stable: bool = True, **kwargs, ) -> ndarray: # Note: this keyword argument is different, and the default is different. # We set it in kwargs like this because numpy.sort uses kind='quicksort' # as the default whereas cupy.sort uses kind=None. if stable: kwargs['kind'] = "stable" if not descending: res = xp.argsort(x, axis=axis, **kwargs) else: # As NumPy has no native descending sort, we imitate it here. Note that # simply flipping the results of xp.argsort(x, ...) would not # respect the relative order like it would in native descending sorts. res = xp.flip( xp.argsort(xp.flip(x, axis=axis), axis=axis, **kwargs), axis=axis, ) # Rely on flip()/argsort() to validate axis normalised_axis = axis if axis >= 0 else x.ndim + axis max_i = x.shape[normalised_axis] - 1 res = max_i - res return res def sort( x: ndarray, /, xp, *, axis: int = -1, descending: bool = False, stable: bool = True, **kwargs, ) -> ndarray: # Note: this keyword argument is different, and the default is different. # We set it in kwargs like this because numpy.sort uses kind='quicksort' # as the default whereas cupy.sort uses kind=None. if stable: kwargs['kind'] = "stable" res = xp.sort(x, axis=axis, **kwargs) if descending: res = xp.flip(res, axis=axis) return res # sum() and prod() should always upcast when dtype=None def sum( x: ndarray, /, xp, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype: Optional[Dtype] = None, keepdims: bool = False, **kwargs, ) -> ndarray: # `xp.sum` already upcasts integers, but not floats or complexes if dtype is None: if x.dtype == xp.float32: dtype = xp.float64 elif x.dtype == xp.complex64: dtype = xp.complex128 return xp.sum(x, axis=axis, dtype=dtype, keepdims=keepdims, **kwargs) def prod( x: ndarray, /, xp, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype: Optional[Dtype] = None, keepdims: bool = False, **kwargs, ) -> ndarray: if dtype is None: if x.dtype == xp.float32: dtype = xp.float64 elif x.dtype == xp.complex64: dtype = xp.complex128 return xp.prod(x, dtype=dtype, axis=axis, keepdims=keepdims, **kwargs) # ceil, floor, and trunc return integers for integer inputs def ceil(x: ndarray, /, xp, **kwargs) -> ndarray: if xp.issubdtype(x.dtype, xp.integer): return x return xp.ceil(x, **kwargs) def floor(x: ndarray, /, xp, **kwargs) -> ndarray: if xp.issubdtype(x.dtype, xp.integer): return x return xp.floor(x, **kwargs) def trunc(x: ndarray, /, xp, **kwargs) -> ndarray: if xp.issubdtype(x.dtype, xp.integer): return x return xp.trunc(x, **kwargs) # linear algebra functions def matmul(x1: ndarray, x2: ndarray, /, xp, **kwargs) -> ndarray: return xp.matmul(x1, x2, **kwargs) # Unlike transpose, matrix_transpose only transposes the last two axes. def matrix_transpose(x: ndarray, /, xp) -> ndarray: if x.ndim < 2: raise ValueError("x must be at least 2-dimensional for matrix_transpose") return xp.swapaxes(x, -1, -2) def tensordot(x1: ndarray, x2: ndarray, /, xp, *, axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = 2, **kwargs, ) -> ndarray: return xp.tensordot(x1, x2, axes=axes, **kwargs) def vecdot(x1: ndarray, x2: ndarray, /, xp, *, axis: int = -1) -> ndarray: ndim = max(x1.ndim, x2.ndim) x1_shape = (1,)*(ndim - x1.ndim) + tuple(x1.shape) x2_shape = (1,)*(ndim - x2.ndim) + tuple(x2.shape) if x1_shape[axis] != x2_shape[axis]: raise ValueError("x1 and x2 must have the same size along the given axis") if hasattr(xp, 'broadcast_tensors'): _broadcast = xp.broadcast_tensors else: _broadcast = xp.broadcast_arrays x1_, x2_ = _broadcast(x1, x2) x1_ = xp.moveaxis(x1_, axis, -1) x2_ = xp.moveaxis(x2_, axis, -1) res = x1_[..., None, :] @ x2_[..., None] return res[..., 0, 0] # isdtype is a new function in the 2022.12 array API specification. def isdtype( dtype: Dtype, kind: Union[Dtype, str, Tuple[Union[Dtype, str], ...]], xp, *, _tuple=True, # Disallow nested tuples ) -> bool: """ Returns a boolean indicating whether a provided dtype is of a specified data type ``kind``. Note that outside of this function, this compat library does not yet fully support complex numbers. See https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html for more details """ if isinstance(kind, tuple) and _tuple: return any(isdtype(dtype, k, xp, _tuple=False) for k in kind) elif isinstance(kind, str): if kind == 'bool': return dtype == xp.bool_ elif kind == 'signed integer': return xp.issubdtype(dtype, xp.signedinteger) elif kind == 'unsigned integer': return xp.issubdtype(dtype, xp.unsignedinteger) elif kind == 'integral': return xp.issubdtype(dtype, xp.integer) elif kind == 'real floating': return xp.issubdtype(dtype, xp.floating) elif kind == 'complex floating': return xp.issubdtype(dtype, xp.complexfloating) elif kind == 'numeric': return xp.issubdtype(dtype, xp.number) else: raise ValueError(f"Unrecognized data type kind: {kind!r}") else: # This will allow things that aren't required by the spec, like # isdtype(np.float64, float) or isdtype(np.int64, 'l'). Should we be # more strict here to match the type annotation? Note that the # numpy.array_api implementation will be very strict. return dtype == kind __all__ = ['arange', 'empty', 'empty_like', 'eye', 'full', 'full_like', 'linspace', 'ones', 'ones_like', 'zeros', 'zeros_like', 'UniqueAllResult', 'UniqueCountsResult', 'UniqueInverseResult', 'unique_all', 'unique_counts', 'unique_inverse', 'unique_values', 'astype', 'std', 'var', 'permute_dims', 'reshape', 'argsort', 'sort', 'sum', 'prod', 'ceil', 'floor', 'trunc', 'matmul', 'matrix_transpose', 'tensordot', 'vecdot', 'isdtype'] array-api-compat-1.4/array_api_compat/common/_helpers.py000066400000000000000000000201701450067300000234740ustar00rootroot00000000000000""" Various helper functions which are not part of the spec. Functions which start with an underscore are for internal use only but helpers that are in __all__ are intended as additional helper functions for use by end users of the compat library. """ from __future__ import annotations import sys import math def _is_numpy_array(x): # Avoid importing NumPy if it isn't already if 'numpy' not in sys.modules: return False import numpy as np # TODO: Should we reject ndarray subclasses? return isinstance(x, (np.ndarray, np.generic)) def _is_cupy_array(x): # Avoid importing NumPy if it isn't already if 'cupy' not in sys.modules: return False import cupy as cp # TODO: Should we reject ndarray subclasses? return isinstance(x, (cp.ndarray, cp.generic)) def _is_torch_array(x): # Avoid importing torch if it isn't already if 'torch' not in sys.modules: return False import torch # TODO: Should we reject ndarray subclasses? return isinstance(x, torch.Tensor) def is_array_api_obj(x): """ Check if x is an array API compatible array object. """ return _is_numpy_array(x) \ or _is_cupy_array(x) \ or _is_torch_array(x) \ or hasattr(x, '__array_namespace__') def _check_api_version(api_version): if api_version is not None and api_version != '2021.12': raise ValueError("Only the 2021.12 version of the array API specification is currently supported") def array_namespace(*xs, api_version=None, _use_compat=True): """ Get the array API compatible namespace for the arrays `xs`. `xs` should contain one or more arrays. Typical usage is def your_function(x, y): xp = array_api_compat.array_namespace(x, y) # Now use xp as the array library namespace return xp.mean(x, axis=0) + 2*xp.std(y, axis=0) api_version should be the newest version of the spec that you need support for (currently the compat library wrapped APIs only support v2021.12). """ namespaces = set() for x in xs: if isinstance(x, (tuple, list)): namespaces.add(array_namespace(*x, _use_compat=_use_compat)) elif hasattr(x, '__array_namespace__'): namespaces.add(x.__array_namespace__(api_version=api_version)) elif _is_numpy_array(x): _check_api_version(api_version) if _use_compat: from .. import numpy as numpy_namespace namespaces.add(numpy_namespace) else: import numpy as np namespaces.add(np) elif _is_cupy_array(x): _check_api_version(api_version) if _use_compat: from .. import cupy as cupy_namespace namespaces.add(cupy_namespace) else: import cupy as cp namespaces.add(cp) elif _is_torch_array(x): _check_api_version(api_version) if _use_compat: from .. import torch as torch_namespace namespaces.add(torch_namespace) else: import torch namespaces.add(torch) else: # TODO: Support Python scalars? raise TypeError("The input is not a supported array type") if not namespaces: raise TypeError("Unrecognized array input") if len(namespaces) != 1: raise TypeError(f"Multiple namespaces for array inputs: {namespaces}") xp, = namespaces return xp # backwards compatibility alias get_namespace = array_namespace def _check_device(xp, device): if xp == sys.modules.get('numpy'): if device not in ["cpu", None]: raise ValueError(f"Unsupported device for NumPy: {device!r}") # device() is not on numpy.ndarray and and to_device() is not on numpy.ndarray # or cupy.ndarray. They are not included in array objects of this library # because this library just reuses the respective ndarray classes without # wrapping or subclassing them. These helper functions can be used instead of # the wrapper functions for libraries that need to support both NumPy/CuPy and # other libraries that use devices. def device(x: "Array", /) -> "Device": """ Hardware device the array data resides on. Parameters ---------- x: array array instance from NumPy or an array API compatible library. Returns ------- out: device a ``device`` object (see the "Device Support" section of the array API specification). """ if _is_numpy_array(x): return "cpu" return x.device # Based on cupy.array_api.Array.to_device def _cupy_to_device(x, device, /, stream=None): import cupy as cp from cupy.cuda import Device as _Device from cupy.cuda import stream as stream_module from cupy_backends.cuda.api import runtime if device == x.device: return x elif device == "cpu": # allowing us to use `to_device(x, "cpu")` # is useful for portable test swapping between # host and device backends return x.get() elif not isinstance(device, _Device): raise ValueError(f"Unsupported device {device!r}") else: # see cupy/cupy#5985 for the reason how we handle device/stream here prev_device = runtime.getDevice() prev_stream: stream_module.Stream = None if stream is not None: prev_stream = stream_module.get_current_stream() # stream can be an int as specified in __dlpack__, or a CuPy stream if isinstance(stream, int): stream = cp.cuda.ExternalStream(stream) elif isinstance(stream, cp.cuda.Stream): pass else: raise ValueError('the input stream is not recognized') stream.use() try: runtime.setDevice(device.id) arr = x.copy() finally: runtime.setDevice(prev_device) if stream is not None: prev_stream.use() return arr def _torch_to_device(x, device, /, stream=None): if stream is not None: raise NotImplementedError return x.to(device) def to_device(x: "Array", device: "Device", /, *, stream: "Optional[Union[int, Any]]" = None) -> "Array": """ Copy the array from the device on which it currently resides to the specified ``device``. Parameters ---------- x: array array instance from NumPy or an array API compatible library. device: device a ``device`` object (see the "Device Support" section of the array API specification). stream: Optional[Union[int, Any]] stream object to use during copy. In addition to the types supported in ``array.__dlpack__``, implementations may choose to support any library-specific stream object with the caveat that any code using such an object would not be portable. Returns ------- out: array an array with the same data and data type as ``x`` and located on the specified ``device``. .. note:: If ``stream`` is given, the copy operation should be enqueued on the provided ``stream``; otherwise, the copy operation should be enqueued on the default stream/queue. Whether the copy is performed synchronously or asynchronously is implementation-dependent. Accordingly, if synchronization is required to guarantee data safety, this must be clearly explained in a conforming library's documentation. """ if _is_numpy_array(x): if stream is not None: raise ValueError("The stream argument to to_device() is not supported") if device == 'cpu': return x raise ValueError(f"Unsupported device {device!r}") elif _is_cupy_array(x): # cupy does not yet have to_device return _cupy_to_device(x, device, stream=stream) elif _is_torch_array(x): return _torch_to_device(x, device, stream=stream) return x.to_device(device, stream=stream) def size(x): """ Return the total number of elements of x """ if None in x.shape: return None return math.prod(x.shape) __all__ = ['is_array_api_obj', 'array_namespace', 'get_namespace', 'device', 'to_device', 'size'] array-api-compat-1.4/array_api_compat/common/_linalg.py000066400000000000000000000136711450067300000233100ustar00rootroot00000000000000from __future__ import annotations from typing import TYPE_CHECKING, NamedTuple if TYPE_CHECKING: from typing import Literal, Optional, Sequence, Tuple, Union from ._typing import ndarray from numpy.core.numeric import normalize_axis_tuple from ._aliases import matmul, matrix_transpose, tensordot, vecdot from .._internal import get_xp # These are in the main NumPy namespace but not in numpy.linalg def cross(x1: ndarray, x2: ndarray, /, xp, *, axis: int = -1, **kwargs) -> ndarray: return xp.cross(x1, x2, axis=axis, **kwargs) def outer(x1: ndarray, x2: ndarray, /, xp, **kwargs) -> ndarray: return xp.outer(x1, x2, **kwargs) class EighResult(NamedTuple): eigenvalues: ndarray eigenvectors: ndarray class QRResult(NamedTuple): Q: ndarray R: ndarray class SlogdetResult(NamedTuple): sign: ndarray logabsdet: ndarray class SVDResult(NamedTuple): U: ndarray S: ndarray Vh: ndarray # These functions are the same as their NumPy counterparts except they return # a namedtuple. def eigh(x: ndarray, /, xp, **kwargs) -> EighResult: return EighResult(*xp.linalg.eigh(x, **kwargs)) def qr(x: ndarray, /, xp, *, mode: Literal['reduced', 'complete'] = 'reduced', **kwargs) -> QRResult: return QRResult(*xp.linalg.qr(x, mode=mode, **kwargs)) def slogdet(x: ndarray, /, xp, **kwargs) -> SlogdetResult: return SlogdetResult(*xp.linalg.slogdet(x, **kwargs)) def svd(x: ndarray, /, xp, *, full_matrices: bool = True, **kwargs) -> SVDResult: return SVDResult(*xp.linalg.svd(x, full_matrices=full_matrices, **kwargs)) # These functions have additional keyword arguments # The upper keyword argument is new from NumPy def cholesky(x: ndarray, /, xp, *, upper: bool = False, **kwargs) -> ndarray: L = xp.linalg.cholesky(x, **kwargs) if upper: return get_xp(xp)(matrix_transpose)(L) return L # The rtol keyword argument of matrix_rank() and pinv() is new from NumPy. # Note that it has a different semantic meaning from tol and rcond. def matrix_rank(x: ndarray, /, xp, *, rtol: Optional[Union[float, ndarray]] = None, **kwargs) -> ndarray: # this is different from xp.linalg.matrix_rank, which supports 1 # dimensional arrays. if x.ndim < 2: raise xp.linalg.LinAlgError("1-dimensional array given. Array must be at least two-dimensional") S = xp.linalg.svd(x, compute_uv=False, **kwargs) if rtol is None: tol = S.max(axis=-1, keepdims=True) * max(x.shape[-2:]) * xp.finfo(S.dtype).eps else: # this is different from xp.linalg.matrix_rank, which does not # multiply the tolerance by the largest singular value. tol = S.max(axis=-1, keepdims=True)*xp.asarray(rtol)[..., xp.newaxis] return xp.count_nonzero(S > tol, axis=-1) def pinv(x: ndarray, /, xp, *, rtol: Optional[Union[float, ndarray]] = None, **kwargs) -> ndarray: # this is different from xp.linalg.pinv, which does not multiply the # default tolerance by max(M, N). if rtol is None: rtol = max(x.shape[-2:]) * xp.finfo(x.dtype).eps return xp.linalg.pinv(x, rcond=rtol, **kwargs) # These functions are new in the array API spec def matrix_norm(x: ndarray, /, xp, *, keepdims: bool = False, ord: Optional[Union[int, float, Literal['fro', 'nuc']]] = 'fro') -> ndarray: return xp.linalg.norm(x, axis=(-2, -1), keepdims=keepdims, ord=ord) # svdvals is not in NumPy (but it is in SciPy). It is equivalent to # xp.linalg.svd(compute_uv=False). def svdvals(x: ndarray, /, xp) -> Union[ndarray, Tuple[ndarray, ...]]: return xp.linalg.svd(x, compute_uv=False) def vector_norm(x: ndarray, /, xp, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False, ord: Optional[Union[int, float]] = 2) -> ndarray: # xp.linalg.norm tries to do a matrix norm whenever axis is a 2-tuple or # when axis=None and the input is 2-D, so to force a vector norm, we make # it so the input is 1-D (for axis=None), or reshape so that norm is done # on a single dimension. if axis is None: # Note: xp.linalg.norm() doesn't handle 0-D arrays x = x.ravel() _axis = 0 elif isinstance(axis, tuple): # Note: The axis argument supports any number of axes, whereas # xp.linalg.norm() only supports a single axis for vector norm. normalized_axis = normalize_axis_tuple(axis, x.ndim) rest = tuple(i for i in range(x.ndim) if i not in normalized_axis) newshape = axis + rest x = xp.transpose(x, newshape).reshape( (xp.prod([x.shape[i] for i in axis], dtype=int), *[x.shape[i] for i in rest])) _axis = 0 else: _axis = axis res = xp.linalg.norm(x, axis=_axis, ord=ord) if keepdims: # We can't reuse xp.linalg.norm(keepdims) because of the reshape hacks # above to avoid matrix norm logic. shape = list(x.shape) _axis = normalize_axis_tuple(range(x.ndim) if axis is None else axis, x.ndim) for i in _axis: shape[i] = 1 res = xp.reshape(res, tuple(shape)) return res # xp.diagonal and xp.trace operate on the first two axes whereas these # operates on the last two def diagonal(x: ndarray, /, xp, *, offset: int = 0, **kwargs) -> ndarray: return xp.diagonal(x, offset=offset, axis1=-2, axis2=-1, **kwargs) def trace(x: ndarray, /, xp, *, offset: int = 0, dtype=None, **kwargs) -> ndarray: if dtype is None: if x.dtype == xp.float32: dtype = xp.float64 elif x.dtype == xp.complex64: dtype = xp.complex128 return xp.asarray(xp.trace(x, offset=offset, dtype=dtype, axis1=-2, axis2=-1, **kwargs)) __all__ = ['cross', 'matmul', 'outer', 'tensordot', 'EighResult', 'QRResult', 'SlogdetResult', 'SVDResult', 'eigh', 'qr', 'slogdet', 'svd', 'cholesky', 'matrix_rank', 'pinv', 'matrix_norm', 'matrix_transpose', 'svdvals', 'vecdot', 'vector_norm', 'diagonal', 'trace'] array-api-compat-1.4/array_api_compat/common/_typing.py000066400000000000000000000006041450067300000233440ustar00rootroot00000000000000from __future__ import annotations __all__ = [ "NestedSequence", "SupportsBufferProtocol", ] from typing import ( Any, TypeVar, Protocol, ) _T_co = TypeVar("_T_co", covariant=True) class NestedSequence(Protocol[_T_co]): def __getitem__(self, key: int, /) -> _T_co | NestedSequence[_T_co]: ... def __len__(self, /) -> int: ... SupportsBufferProtocol = Any array-api-compat-1.4/array_api_compat/cupy/000077500000000000000000000000001450067300000210115ustar00rootroot00000000000000array-api-compat-1.4/array_api_compat/cupy/__init__.py000066400000000000000000000006151450067300000231240ustar00rootroot00000000000000from cupy import * # from cupy import * doesn't overwrite these builtin names from cupy import abs, max, min, round # These imports may overwrite names from the import * above. from ._aliases import * # See the comment in the numpy __init__.py __import__(__package__ + '.linalg') from .linalg import matrix_transpose, vecdot from ..common._helpers import * __array_api_version__ = '2022.12' array-api-compat-1.4/array_api_compat/cupy/_aliases.py000066400000000000000000000043711450067300000231500ustar00rootroot00000000000000from __future__ import annotations from functools import partial from ..common import _aliases from .._internal import get_xp asarray = asarray_cupy = partial(_aliases._asarray, namespace='cupy') asarray.__doc__ = _aliases._asarray.__doc__ del partial import cupy as cp bool = cp.bool_ # Basic renames acos = cp.arccos acosh = cp.arccosh asin = cp.arcsin asinh = cp.arcsinh atan = cp.arctan atan2 = cp.arctan2 atanh = cp.arctanh bitwise_left_shift = cp.left_shift bitwise_invert = cp.invert bitwise_right_shift = cp.right_shift concat = cp.concatenate pow = cp.power arange = get_xp(cp)(_aliases.arange) empty = get_xp(cp)(_aliases.empty) empty_like = get_xp(cp)(_aliases.empty_like) eye = get_xp(cp)(_aliases.eye) full = get_xp(cp)(_aliases.full) full_like = get_xp(cp)(_aliases.full_like) linspace = get_xp(cp)(_aliases.linspace) ones = get_xp(cp)(_aliases.ones) ones_like = get_xp(cp)(_aliases.ones_like) zeros = get_xp(cp)(_aliases.zeros) zeros_like = get_xp(cp)(_aliases.zeros_like) UniqueAllResult = get_xp(cp)(_aliases.UniqueAllResult) UniqueCountsResult = get_xp(cp)(_aliases.UniqueCountsResult) UniqueInverseResult = get_xp(cp)(_aliases.UniqueInverseResult) unique_all = get_xp(cp)(_aliases.unique_all) unique_counts = get_xp(cp)(_aliases.unique_counts) unique_inverse = get_xp(cp)(_aliases.unique_inverse) unique_values = get_xp(cp)(_aliases.unique_values) astype = _aliases.astype std = get_xp(cp)(_aliases.std) var = get_xp(cp)(_aliases.var) permute_dims = get_xp(cp)(_aliases.permute_dims) reshape = get_xp(cp)(_aliases.reshape) argsort = get_xp(cp)(_aliases.argsort) sort = get_xp(cp)(_aliases.sort) sum = get_xp(cp)(_aliases.sum) prod = get_xp(cp)(_aliases.prod) ceil = get_xp(cp)(_aliases.ceil) floor = get_xp(cp)(_aliases.floor) trunc = get_xp(cp)(_aliases.trunc) matmul = get_xp(cp)(_aliases.matmul) matrix_transpose = get_xp(cp)(_aliases.matrix_transpose) tensordot = get_xp(cp)(_aliases.tensordot) vecdot = get_xp(cp)(_aliases.vecdot) isdtype = get_xp(cp)(_aliases.isdtype) __all__ = _aliases.__all__ + ['asarray', 'asarray_cupy', 'bool', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'bitwise_left_shift', 'bitwise_invert', 'bitwise_right_shift', 'concat', 'pow'] array-api-compat-1.4/array_api_compat/cupy/_typing.py000066400000000000000000000011511450067300000230320ustar00rootroot00000000000000from __future__ import annotations __all__ = [ "ndarray", "Device", "Dtype", ] import sys from typing import ( Union, TYPE_CHECKING, ) from cupy import ( ndarray, dtype, int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64, ) from cupy.cuda.device import Device if TYPE_CHECKING or sys.version_info >= (3, 9): Dtype = dtype[Union[ int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64, ]] else: Dtype = dtype array-api-compat-1.4/array_api_compat/cupy/linalg.py000066400000000000000000000021451450067300000226330ustar00rootroot00000000000000from cupy.linalg import * # cupy.linalg doesn't have __all__. If it is added, replace this with # # from cupy.linalg import __all__ as linalg_all _n = {} exec('from cupy.linalg import *', _n) del _n['__builtins__'] linalg_all = list(_n) del _n from ..common import _linalg from .._internal import get_xp from ._aliases import (matmul, matrix_transpose, tensordot, vecdot) import cupy as cp cross = get_xp(cp)(_linalg.cross) outer = get_xp(cp)(_linalg.outer) EighResult = _linalg.EighResult QRResult = _linalg.QRResult SlogdetResult = _linalg.SlogdetResult SVDResult = _linalg.SVDResult eigh = get_xp(cp)(_linalg.eigh) qr = get_xp(cp)(_linalg.qr) slogdet = get_xp(cp)(_linalg.slogdet) svd = get_xp(cp)(_linalg.svd) cholesky = get_xp(cp)(_linalg.cholesky) matrix_rank = get_xp(cp)(_linalg.matrix_rank) pinv = get_xp(cp)(_linalg.pinv) matrix_norm = get_xp(cp)(_linalg.matrix_norm) svdvals = get_xp(cp)(_linalg.svdvals) vector_norm = get_xp(cp)(_linalg.vector_norm) diagonal = get_xp(cp)(_linalg.diagonal) trace = get_xp(cp)(_linalg.trace) __all__ = linalg_all + _linalg.__all__ del get_xp del cp del linalg_all del _linalg array-api-compat-1.4/array_api_compat/numpy/000077500000000000000000000000001450067300000212015ustar00rootroot00000000000000array-api-compat-1.4/array_api_compat/numpy/__init__.py000066400000000000000000000011241450067300000233100ustar00rootroot00000000000000from numpy import * # from numpy import * doesn't overwrite these builtin names from numpy import abs, max, min, round # These imports may overwrite names from the import * above. from ._aliases import * # Don't know why, but we have to do an absolute import to import linalg. If we # instead do # # from . import linalg # # It doesn't overwrite np.linalg from above. The import is generated # dynamically so that the library can be vendored. __import__(__package__ + '.linalg') from .linalg import matrix_transpose, vecdot from ..common._helpers import * __array_api_version__ = '2022.12' array-api-compat-1.4/array_api_compat/numpy/_aliases.py000066400000000000000000000043751450067300000233440ustar00rootroot00000000000000from __future__ import annotations from functools import partial from ..common import _aliases from .._internal import get_xp asarray = asarray_numpy = partial(_aliases._asarray, namespace='numpy') asarray.__doc__ = _aliases._asarray.__doc__ del partial import numpy as np bool = np.bool_ # Basic renames acos = np.arccos acosh = np.arccosh asin = np.arcsin asinh = np.arcsinh atan = np.arctan atan2 = np.arctan2 atanh = np.arctanh bitwise_left_shift = np.left_shift bitwise_invert = np.invert bitwise_right_shift = np.right_shift concat = np.concatenate pow = np.power arange = get_xp(np)(_aliases.arange) empty = get_xp(np)(_aliases.empty) empty_like = get_xp(np)(_aliases.empty_like) eye = get_xp(np)(_aliases.eye) full = get_xp(np)(_aliases.full) full_like = get_xp(np)(_aliases.full_like) linspace = get_xp(np)(_aliases.linspace) ones = get_xp(np)(_aliases.ones) ones_like = get_xp(np)(_aliases.ones_like) zeros = get_xp(np)(_aliases.zeros) zeros_like = get_xp(np)(_aliases.zeros_like) UniqueAllResult = get_xp(np)(_aliases.UniqueAllResult) UniqueCountsResult = get_xp(np)(_aliases.UniqueCountsResult) UniqueInverseResult = get_xp(np)(_aliases.UniqueInverseResult) unique_all = get_xp(np)(_aliases.unique_all) unique_counts = get_xp(np)(_aliases.unique_counts) unique_inverse = get_xp(np)(_aliases.unique_inverse) unique_values = get_xp(np)(_aliases.unique_values) astype = _aliases.astype std = get_xp(np)(_aliases.std) var = get_xp(np)(_aliases.var) permute_dims = get_xp(np)(_aliases.permute_dims) reshape = get_xp(np)(_aliases.reshape) argsort = get_xp(np)(_aliases.argsort) sort = get_xp(np)(_aliases.sort) sum = get_xp(np)(_aliases.sum) prod = get_xp(np)(_aliases.prod) ceil = get_xp(np)(_aliases.ceil) floor = get_xp(np)(_aliases.floor) trunc = get_xp(np)(_aliases.trunc) matmul = get_xp(np)(_aliases.matmul) matrix_transpose = get_xp(np)(_aliases.matrix_transpose) tensordot = get_xp(np)(_aliases.tensordot) vecdot = get_xp(np)(_aliases.vecdot) isdtype = get_xp(np)(_aliases.isdtype) __all__ = _aliases.__all__ + ['asarray', 'asarray_numpy', 'bool', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'bitwise_left_shift', 'bitwise_invert', 'bitwise_right_shift', 'concat', 'pow'] array-api-compat-1.4/array_api_compat/numpy/_typing.py000066400000000000000000000011521450067300000232230ustar00rootroot00000000000000from __future__ import annotations __all__ = [ "ndarray", "Device", "Dtype", ] import sys from typing import ( Literal, Union, TYPE_CHECKING, ) from numpy import ( ndarray, dtype, int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64, ) Device = Literal["cpu"] if TYPE_CHECKING or sys.version_info >= (3, 9): Dtype = dtype[Union[ int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64, ]] else: Dtype = dtype array-api-compat-1.4/array_api_compat/numpy/linalg.py000066400000000000000000000016741450067300000230310ustar00rootroot00000000000000from numpy.linalg import * from numpy.linalg import __all__ as linalg_all from ..common import _linalg from .._internal import get_xp from ._aliases import (matmul, matrix_transpose, tensordot, vecdot) import numpy as np cross = get_xp(np)(_linalg.cross) outer = get_xp(np)(_linalg.outer) EighResult = _linalg.EighResult QRResult = _linalg.QRResult SlogdetResult = _linalg.SlogdetResult SVDResult = _linalg.SVDResult eigh = get_xp(np)(_linalg.eigh) qr = get_xp(np)(_linalg.qr) slogdet = get_xp(np)(_linalg.slogdet) svd = get_xp(np)(_linalg.svd) cholesky = get_xp(np)(_linalg.cholesky) matrix_rank = get_xp(np)(_linalg.matrix_rank) pinv = get_xp(np)(_linalg.pinv) matrix_norm = get_xp(np)(_linalg.matrix_norm) svdvals = get_xp(np)(_linalg.svdvals) vector_norm = get_xp(np)(_linalg.vector_norm) diagonal = get_xp(np)(_linalg.diagonal) trace = get_xp(np)(_linalg.trace) __all__ = linalg_all + _linalg.__all__ del get_xp del np del linalg_all del _linalg array-api-compat-1.4/array_api_compat/torch/000077500000000000000000000000001450067300000211505ustar00rootroot00000000000000array-api-compat-1.4/array_api_compat/torch/__init__.py000066400000000000000000000010061450067300000232560ustar00rootroot00000000000000from torch import * # Several names are not included in the above import * import torch for n in dir(torch): if (n.startswith('_') or n.endswith('_') or 'cuda' in n or 'cpu' in n or 'backward' in n): continue exec(n + ' = torch.' + n) # These imports may overwrite names from the import * above. from ._aliases import * # See the comment in the numpy __init__.py __import__(__package__ + '.linalg') from ..common._helpers import * __array_api_version__ = '2022.12' array-api-compat-1.4/array_api_compat/torch/_aliases.py000066400000000000000000000640531450067300000233120ustar00rootroot00000000000000from __future__ import annotations from functools import wraps from builtins import all as builtin_all, any as builtin_any from ..common._aliases import (UniqueAllResult, UniqueCountsResult, UniqueInverseResult, matrix_transpose as _aliases_matrix_transpose, vecdot as _aliases_vecdot) from .._internal import get_xp import torch from typing import TYPE_CHECKING if TYPE_CHECKING: from typing import List, Optional, Sequence, Tuple, Union from ..common._typing import Device from torch import dtype as Dtype array = torch.Tensor _int_dtypes = { torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64, } _array_api_dtypes = { torch.bool, *_int_dtypes, torch.float32, torch.float64, torch.complex64, torch.complex128, } _promotion_table = { # bool (torch.bool, torch.bool): torch.bool, # ints (torch.int8, torch.int8): torch.int8, (torch.int8, torch.int16): torch.int16, (torch.int8, torch.int32): torch.int32, (torch.int8, torch.int64): torch.int64, (torch.int16, torch.int8): torch.int16, (torch.int16, torch.int16): torch.int16, (torch.int16, torch.int32): torch.int32, (torch.int16, torch.int64): torch.int64, (torch.int32, torch.int8): torch.int32, (torch.int32, torch.int16): torch.int32, (torch.int32, torch.int32): torch.int32, (torch.int32, torch.int64): torch.int64, (torch.int64, torch.int8): torch.int64, (torch.int64, torch.int16): torch.int64, (torch.int64, torch.int32): torch.int64, (torch.int64, torch.int64): torch.int64, # uints (torch.uint8, torch.uint8): torch.uint8, # ints and uints (mixed sign) (torch.int8, torch.uint8): torch.int16, (torch.int16, torch.uint8): torch.int16, (torch.int32, torch.uint8): torch.int32, (torch.int64, torch.uint8): torch.int64, (torch.uint8, torch.int8): torch.int16, (torch.uint8, torch.int16): torch.int16, (torch.uint8, torch.int32): torch.int32, (torch.uint8, torch.int64): torch.int64, # floats (torch.float32, torch.float32): torch.float32, (torch.float32, torch.float64): torch.float64, (torch.float64, torch.float32): torch.float64, (torch.float64, torch.float64): torch.float64, # complexes (torch.complex64, torch.complex64): torch.complex64, (torch.complex64, torch.complex128): torch.complex128, (torch.complex128, torch.complex64): torch.complex128, (torch.complex128, torch.complex128): torch.complex128, # Mixed float and complex (torch.float32, torch.complex64): torch.complex64, (torch.float32, torch.complex128): torch.complex128, (torch.float64, torch.complex64): torch.complex128, (torch.float64, torch.complex128): torch.complex128, } def _two_arg(f): @wraps(f) def _f(x1, x2, /, **kwargs): x1, x2 = _fix_promotion(x1, x2) return f(x1, x2, **kwargs) if _f.__doc__ is None: _f.__doc__ = f"""\ Array API compatibility wrapper for torch.{f.__name__}. See the corresponding PyTorch documentation and/or the array API specification for more details. """ return _f def _fix_promotion(x1, x2, only_scalar=True): if x1.dtype not in _array_api_dtypes or x2.dtype not in _array_api_dtypes: return x1, x2 # If an argument is 0-D pytorch downcasts the other argument if not only_scalar or x1.shape == (): dtype = result_type(x1, x2) x2 = x2.to(dtype) if not only_scalar or x2.shape == (): dtype = result_type(x1, x2) x1 = x1.to(dtype) return x1, x2 def result_type(*arrays_and_dtypes: Union[array, Dtype]) -> Dtype: if len(arrays_and_dtypes) == 0: raise TypeError("At least one array or dtype must be provided") if len(arrays_and_dtypes) == 1: x = arrays_and_dtypes[0] if isinstance(x, torch.dtype): return x return x.dtype if len(arrays_and_dtypes) > 2: return result_type(arrays_and_dtypes[0], result_type(*arrays_and_dtypes[1:])) x, y = arrays_and_dtypes xdt = x.dtype if not isinstance(x, torch.dtype) else x ydt = y.dtype if not isinstance(y, torch.dtype) else y if (xdt, ydt) in _promotion_table: return _promotion_table[xdt, ydt] # This doesn't result_type(dtype, dtype) for non-array API dtypes # because torch.result_type only accepts tensors. This does however, allow # cross-kind promotion. x = torch.tensor([], dtype=x) if isinstance(x, torch.dtype) else x y = torch.tensor([], dtype=y) if isinstance(y, torch.dtype) else y return torch.result_type(x, y) def can_cast(from_: Union[Dtype, array], to: Dtype, /) -> bool: if not isinstance(from_, torch.dtype): from_ = from_.dtype return torch.can_cast(from_, to) # Basic renames bitwise_invert = torch.bitwise_not # Two-arg elementwise functions # These require a wrapper to do the correct type promotion on 0-D tensors add = _two_arg(torch.add) atan2 = _two_arg(torch.atan2) bitwise_and = _two_arg(torch.bitwise_and) bitwise_left_shift = _two_arg(torch.bitwise_left_shift) bitwise_or = _two_arg(torch.bitwise_or) bitwise_right_shift = _two_arg(torch.bitwise_right_shift) bitwise_xor = _two_arg(torch.bitwise_xor) divide = _two_arg(torch.divide) # Also a rename. torch.equal does not broadcast equal = _two_arg(torch.eq) floor_divide = _two_arg(torch.floor_divide) greater = _two_arg(torch.greater) greater_equal = _two_arg(torch.greater_equal) less = _two_arg(torch.less) less_equal = _two_arg(torch.less_equal) logaddexp = _two_arg(torch.logaddexp) # logical functions are not included here because they only accept bool in the # spec, so type promotion is irrelevant. multiply = _two_arg(torch.multiply) not_equal = _two_arg(torch.not_equal) pow = _two_arg(torch.pow) remainder = _two_arg(torch.remainder) subtract = _two_arg(torch.subtract) # These wrappers are mostly based on the fact that pytorch uses 'dim' instead # of 'axis'. # torch.min and torch.max return a tuple and don't support multiple axes https://github.com/pytorch/pytorch/issues/58745 def max(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array: # https://github.com/pytorch/pytorch/issues/29137 if axis == (): return torch.clone(x) return torch.amax(x, axis, keepdims=keepdims) def min(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array: # https://github.com/pytorch/pytorch/issues/29137 if axis == (): return torch.clone(x) return torch.amin(x, axis, keepdims=keepdims) # torch.sort also returns a tuple # https://github.com/pytorch/pytorch/issues/70921 def sort(x: array, /, *, axis: int = -1, descending: bool = False, stable: bool = True, **kwargs) -> array: return torch.sort(x, dim=axis, descending=descending, stable=stable, **kwargs).values def _normalize_axes(axis, ndim): axes = [] if ndim == 0 and axis: # Better error message in this case raise IndexError(f"Dimension out of range: {axis[0]}") lower, upper = -ndim, ndim - 1 for a in axis: if a < lower or a > upper: # Match torch error message (e.g., from sum()) raise IndexError(f"Dimension out of range (expected to be in range of [{lower}, {upper}], but got {a}") if a < 0: a = a + ndim if a in axes: # Use IndexError instead of RuntimeError, and "axis" instead of "dim" raise IndexError(f"Axis {a} appears multiple times in the list of axes") axes.append(a) return sorted(axes) def _axis_none_keepdims(x, ndim, keepdims): # Apply keepdims when axis=None # (https://github.com/pytorch/pytorch/issues/71209) # Note that this is only valid for the axis=None case. if keepdims: for i in range(ndim): x = torch.unsqueeze(x, 0) return x def _reduce_multiple_axes(f, x, axis, keepdims=False, **kwargs): # Some reductions don't support multiple axes # (https://github.com/pytorch/pytorch/issues/56586). axes = _normalize_axes(axis, x.ndim) for a in reversed(axes): x = torch.movedim(x, a, -1) x = torch.flatten(x, -len(axes)) out = f(x, -1, **kwargs) if keepdims: for a in axes: out = torch.unsqueeze(out, a) return out def prod(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype: Optional[Dtype] = None, keepdims: bool = False, **kwargs) -> array: x = torch.asarray(x) ndim = x.ndim # https://github.com/pytorch/pytorch/issues/29137. Separate from the logic # below because it still needs to upcast. if axis == (): if dtype is None: # We can't upcast uint8 according to the spec because there is no # torch.uint64, so at least upcast to int64 which is what sum does # when axis=None. if x.dtype in [torch.int8, torch.int16, torch.int32, torch.uint8]: return x.to(torch.int64) return x.clone() return x.to(dtype) # torch.prod doesn't support multiple axes # (https://github.com/pytorch/pytorch/issues/56586). if isinstance(axis, tuple): return _reduce_multiple_axes(torch.prod, x, axis, keepdims=keepdims, dtype=dtype, **kwargs) if axis is None: # torch doesn't support keepdims with axis=None # (https://github.com/pytorch/pytorch/issues/71209) res = torch.prod(x, dtype=dtype, **kwargs) res = _axis_none_keepdims(res, ndim, keepdims) return res return torch.prod(x, axis, dtype=dtype, keepdims=keepdims, **kwargs) def sum(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype: Optional[Dtype] = None, keepdims: bool = False, **kwargs) -> array: x = torch.asarray(x) ndim = x.ndim # https://github.com/pytorch/pytorch/issues/29137. # Make sure it upcasts. if axis == (): if dtype is None: # We can't upcast uint8 according to the spec because there is no # torch.uint64, so at least upcast to int64 which is what sum does # when axis=None. if x.dtype in [torch.int8, torch.int16, torch.int32, torch.uint8]: return x.to(torch.int64) return x.clone() return x.to(dtype) if axis is None: # torch doesn't support keepdims with axis=None # (https://github.com/pytorch/pytorch/issues/71209) res = torch.sum(x, dtype=dtype, **kwargs) res = _axis_none_keepdims(res, ndim, keepdims) return res return torch.sum(x, axis, dtype=dtype, keepdims=keepdims, **kwargs) def any(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False, **kwargs) -> array: x = torch.asarray(x) ndim = x.ndim if axis == (): return x.to(torch.bool) # torch.any doesn't support multiple axes # (https://github.com/pytorch/pytorch/issues/56586). if isinstance(axis, tuple): res = _reduce_multiple_axes(torch.any, x, axis, keepdims=keepdims, **kwargs) return res.to(torch.bool) if axis is None: # torch doesn't support keepdims with axis=None # (https://github.com/pytorch/pytorch/issues/71209) res = torch.any(x, **kwargs) res = _axis_none_keepdims(res, ndim, keepdims) return res.to(torch.bool) # torch.any doesn't return bool for uint8 return torch.any(x, axis, keepdims=keepdims).to(torch.bool) def all(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False, **kwargs) -> array: x = torch.asarray(x) ndim = x.ndim if axis == (): return x.to(torch.bool) # torch.all doesn't support multiple axes # (https://github.com/pytorch/pytorch/issues/56586). if isinstance(axis, tuple): res = _reduce_multiple_axes(torch.all, x, axis, keepdims=keepdims, **kwargs) return res.to(torch.bool) if axis is None: # torch doesn't support keepdims with axis=None # (https://github.com/pytorch/pytorch/issues/71209) res = torch.all(x, **kwargs) res = _axis_none_keepdims(res, ndim, keepdims) return res.to(torch.bool) # torch.all doesn't return bool for uint8 return torch.all(x, axis, keepdims=keepdims).to(torch.bool) def mean(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False, **kwargs) -> array: # https://github.com/pytorch/pytorch/issues/29137 if axis == (): return torch.clone(x) if axis is None: # torch doesn't support keepdims with axis=None # (https://github.com/pytorch/pytorch/issues/71209) res = torch.mean(x, **kwargs) res = _axis_none_keepdims(res, x.ndim, keepdims) return res return torch.mean(x, axis, keepdims=keepdims, **kwargs) def std(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, correction: Union[int, float] = 0.0, keepdims: bool = False, **kwargs) -> array: # Note, float correction is not supported # https://github.com/pytorch/pytorch/issues/61492. We don't try to # implement it here for now. if isinstance(correction, float): _correction = int(correction) if correction != _correction: raise NotImplementedError("float correction in torch std() is not yet supported") # https://github.com/pytorch/pytorch/issues/29137 if axis == (): return torch.zeros_like(x) if isinstance(axis, int): axis = (axis,) if axis is None: # torch doesn't support keepdims with axis=None # (https://github.com/pytorch/pytorch/issues/71209) res = torch.std(x, tuple(range(x.ndim)), correction=_correction, **kwargs) res = _axis_none_keepdims(res, x.ndim, keepdims) return res return torch.std(x, axis, correction=_correction, keepdims=keepdims, **kwargs) def var(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, correction: Union[int, float] = 0.0, keepdims: bool = False, **kwargs) -> array: # Note, float correction is not supported # https://github.com/pytorch/pytorch/issues/61492. We don't try to # implement it here for now. # if isinstance(correction, float): # correction = int(correction) # https://github.com/pytorch/pytorch/issues/29137 if axis == (): return torch.zeros_like(x) if isinstance(axis, int): axis = (axis,) if axis is None: # torch doesn't support keepdims with axis=None # (https://github.com/pytorch/pytorch/issues/71209) res = torch.var(x, tuple(range(x.ndim)), correction=correction, **kwargs) res = _axis_none_keepdims(res, x.ndim, keepdims) return res return torch.var(x, axis, correction=correction, keepdims=keepdims, **kwargs) # torch.concat doesn't support dim=None # https://github.com/pytorch/pytorch/issues/70925 def concat(arrays: Union[Tuple[array, ...], List[array]], /, *, axis: Optional[int] = 0, **kwargs) -> array: if axis is None: arrays = tuple(ar.flatten() for ar in arrays) axis = 0 return torch.concat(arrays, axis, **kwargs) # torch.squeeze only accepts int dim and doesn't require it # https://github.com/pytorch/pytorch/issues/70924. Support for tuple dim was # added at https://github.com/pytorch/pytorch/pull/89017. def squeeze(x: array, /, axis: Union[int, Tuple[int, ...]]) -> array: if isinstance(axis, int): axis = (axis,) for a in axis: if x.shape[a] != 1: raise ValueError("squeezed dimensions must be equal to 1") axes = _normalize_axes(axis, x.ndim) # Remove this once pytorch 1.14 is released with the above PR #89017. sequence = [a - i for i, a in enumerate(axes)] for a in sequence: x = torch.squeeze(x, a) return x # torch.broadcast_to uses size instead of shape def broadcast_to(x: array, /, shape: Tuple[int, ...], **kwargs) -> array: return torch.broadcast_to(x, shape, **kwargs) # torch.permute uses dims instead of axes def permute_dims(x: array, /, axes: Tuple[int, ...]) -> array: return torch.permute(x, axes) # The axis parameter doesn't work for flip() and roll() # https://github.com/pytorch/pytorch/issues/71210. Also torch.flip() doesn't # accept axis=None def flip(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, **kwargs) -> array: if axis is None: axis = tuple(range(x.ndim)) # torch.flip doesn't accept dim as an int but the method does # https://github.com/pytorch/pytorch/issues/18095 return x.flip(axis, **kwargs) def roll(x: array, /, shift: Union[int, Tuple[int, ...]], *, axis: Optional[Union[int, Tuple[int, ...]]] = None, **kwargs) -> array: return torch.roll(x, shift, axis, **kwargs) def nonzero(x: array, /, **kwargs) -> Tuple[array, ...]: return torch.nonzero(x, as_tuple=True, **kwargs) def where(condition: array, x1: array, x2: array, /) -> array: x1, x2 = _fix_promotion(x1, x2) return torch.where(condition, x1, x2) # torch.reshape doesn't have the copy keyword def reshape(x: array, /, shape: Tuple[int, ...], copy: Optional[bool] = None, **kwargs) -> array: if copy is not None: raise NotImplementedError("torch.reshape doesn't yet support the copy keyword") return torch.reshape(x, shape, **kwargs) # torch.arange doesn't support returning empty arrays # (https://github.com/pytorch/pytorch/issues/70915), and doesn't support some # keyword argument combinations # (https://github.com/pytorch/pytorch/issues/70914) def arange(start: Union[int, float], /, stop: Optional[Union[int, float]] = None, step: Union[int, float] = 1, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, **kwargs) -> array: if stop is None: start, stop = 0, start if step > 0 and stop <= start or step < 0 and stop >= start: if dtype is None: if builtin_all(isinstance(i, int) for i in [start, stop, step]): dtype = torch.int64 else: dtype = torch.float32 return torch.empty(0, dtype=dtype, device=device, **kwargs) return torch.arange(start, stop, step, dtype=dtype, device=device, **kwargs) # torch.eye does not accept None as a default for the second argument and # doesn't support off-diagonals (https://github.com/pytorch/pytorch/issues/70910) def eye(n_rows: int, n_cols: Optional[int] = None, /, *, k: int = 0, dtype: Optional[Dtype] = None, device: Optional[Device] = None, **kwargs) -> array: if n_cols is None: n_cols = n_rows z = torch.zeros(n_rows, n_cols, dtype=dtype, device=device, **kwargs) if abs(k) <= n_rows + n_cols: z.diagonal(k).fill_(1) return z # torch.linspace doesn't have the endpoint parameter def linspace(start: Union[int, float], stop: Union[int, float], /, num: int, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, endpoint: bool = True, **kwargs) -> array: if not endpoint: return torch.linspace(start, stop, num+1, dtype=dtype, device=device, **kwargs)[:-1] return torch.linspace(start, stop, num, dtype=dtype, device=device, **kwargs) # torch.full does not accept an int size # https://github.com/pytorch/pytorch/issues/70906 def full(shape: Union[int, Tuple[int, ...]], fill_value: Union[bool, int, float, complex], *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, **kwargs) -> array: if isinstance(shape, int): shape = (shape,) return torch.full(shape, fill_value, dtype=dtype, device=device, **kwargs) # ones, zeros, and empty do not accept shape as a keyword argument def ones(shape: Union[int, Tuple[int, ...]], *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, **kwargs) -> array: return torch.ones(shape, dtype=dtype, device=device, **kwargs) def zeros(shape: Union[int, Tuple[int, ...]], *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, **kwargs) -> array: return torch.zeros(shape, dtype=dtype, device=device, **kwargs) def empty(shape: Union[int, Tuple[int, ...]], *, dtype: Optional[Dtype] = None, device: Optional[Device] = None, **kwargs) -> array: return torch.empty(shape, dtype=dtype, device=device, **kwargs) # tril and triu do not call the keyword argument k def tril(x: array, /, *, k: int = 0) -> array: return torch.tril(x, k) def triu(x: array, /, *, k: int = 0) -> array: return torch.triu(x, k) # Functions that aren't in torch https://github.com/pytorch/pytorch/issues/58742 def expand_dims(x: array, /, *, axis: int = 0) -> array: return torch.unsqueeze(x, axis) def astype(x: array, dtype: Dtype, /, *, copy: bool = True) -> array: return x.to(dtype, copy=copy) def broadcast_arrays(*arrays: array) -> List[array]: shape = torch.broadcast_shapes(*[a.shape for a in arrays]) return [torch.broadcast_to(a, shape) for a in arrays] # https://github.com/pytorch/pytorch/issues/70920 def unique_all(x: array) -> UniqueAllResult: # torch.unique doesn't support returning indices. # https://github.com/pytorch/pytorch/issues/36748. The workaround # suggested in that issue doesn't actually function correctly (it relies # on non-deterministic behavior of scatter()). raise NotImplementedError("unique_all() not yet implemented for pytorch (see https://github.com/pytorch/pytorch/issues/36748)") # values, inverse_indices, counts = torch.unique(x, return_counts=True, return_inverse=True) # # torch.unique incorrectly gives a 0 count for nan values. # # https://github.com/pytorch/pytorch/issues/94106 # counts[torch.isnan(values)] = 1 # return UniqueAllResult(values, indices, inverse_indices, counts) def unique_counts(x: array) -> UniqueCountsResult: values, counts = torch.unique(x, return_counts=True) # torch.unique incorrectly gives a 0 count for nan values. # https://github.com/pytorch/pytorch/issues/94106 counts[torch.isnan(values)] = 1 return UniqueCountsResult(values, counts) def unique_inverse(x: array) -> UniqueInverseResult: values, inverse = torch.unique(x, return_inverse=True) return UniqueInverseResult(values, inverse) def unique_values(x: array) -> array: return torch.unique(x) def matmul(x1: array, x2: array, /, **kwargs) -> array: # torch.matmul doesn't type promote (but differently from _fix_promotion) x1, x2 = _fix_promotion(x1, x2, only_scalar=False) return torch.matmul(x1, x2, **kwargs) matrix_transpose = get_xp(torch)(_aliases_matrix_transpose) _vecdot = get_xp(torch)(_aliases_vecdot) def vecdot(x1: array, x2: array, /, *, axis: int = -1) -> array: x1, x2 = _fix_promotion(x1, x2, only_scalar=False) return _vecdot(x1, x2, axis=axis) # torch.tensordot uses dims instead of axes def tensordot(x1: array, x2: array, /, *, axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = 2, **kwargs) -> array: # Note: torch.tensordot fails with integer dtypes when there is only 1 # element in the axis (https://github.com/pytorch/pytorch/issues/84530). x1, x2 = _fix_promotion(x1, x2, only_scalar=False) return torch.tensordot(x1, x2, dims=axes, **kwargs) def isdtype( dtype: Dtype, kind: Union[Dtype, str, Tuple[Union[Dtype, str], ...]], *, _tuple=True, # Disallow nested tuples ) -> bool: """ Returns a boolean indicating whether a provided dtype is of a specified data type ``kind``. Note that outside of this function, this compat library does not yet fully support complex numbers. See https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html for more details """ if isinstance(kind, tuple) and _tuple: return builtin_any(isdtype(dtype, k, _tuple=False) for k in kind) elif isinstance(kind, str): if kind == 'bool': return dtype == torch.bool elif kind == 'signed integer': return dtype in _int_dtypes and dtype.is_signed elif kind == 'unsigned integer': return dtype in _int_dtypes and not dtype.is_signed elif kind == 'integral': return dtype in _int_dtypes elif kind == 'real floating': return dtype.is_floating_point elif kind == 'complex floating': return dtype.is_complex elif kind == 'numeric': return isdtype(dtype, ('integral', 'real floating', 'complex floating')) else: raise ValueError(f"Unrecognized data type kind: {kind!r}") else: return dtype == kind def take(x: array, indices: array, /, *, axis: Optional[int] = None, **kwargs) -> array: if axis is None: if x.ndim != 1: raise ValueError("axis must be specified when ndim > 1") axis = 0 return torch.index_select(x, axis, indices, **kwargs) __all__ = ['result_type', 'can_cast', 'permute_dims', 'bitwise_invert', 'add', 'atan2', 'bitwise_and', 'bitwise_left_shift', 'bitwise_or', 'bitwise_right_shift', 'bitwise_xor', 'divide', 'equal', 'floor_divide', 'greater', 'greater_equal', 'less', 'less_equal', 'logaddexp', 'multiply', 'not_equal', 'pow', 'remainder', 'subtract', 'max', 'min', 'sort', 'prod', 'sum', 'any', 'all', 'mean', 'std', 'var', 'concat', 'squeeze', 'broadcast_to', 'flip', 'roll', 'nonzero', 'where', 'reshape', 'arange', 'eye', 'linspace', 'full', 'ones', 'zeros', 'empty', 'tril', 'triu', 'expand_dims', 'astype', 'broadcast_arrays', 'unique_all', 'unique_counts', 'unique_inverse', 'unique_values', 'matmul', 'matrix_transpose', 'vecdot', 'tensordot', 'isdtype', 'take'] array-api-compat-1.4/array_api_compat/torch/linalg.py000066400000000000000000000040631450067300000227730ustar00rootroot00000000000000from __future__ import annotations from typing import TYPE_CHECKING if TYPE_CHECKING: import torch array = torch.Tensor from torch.linalg import * # torch.linalg doesn't define __all__ # from torch.linalg import __all__ as linalg_all from torch import linalg as torch_linalg linalg_all = [i for i in dir(torch_linalg) if not i.startswith('_')] # These are implemented in torch but aren't in the linalg namespace from torch import outer, trace from ._aliases import _fix_promotion, matrix_transpose, tensordot # Note: torch.linalg.cross does not default to axis=-1 (it defaults to the # first axis with size 3), see https://github.com/pytorch/pytorch/issues/58743 def cross(x1: array, x2: array, /, *, axis: int = -1) -> array: x1, x2 = _fix_promotion(x1, x2, only_scalar=False) return torch_linalg.cross(x1, x2, dim=axis) def vecdot(x1: array, x2: array, /, *, axis: int = -1, **kwargs) -> array: from ._aliases import isdtype x1, x2 = _fix_promotion(x1, x2, only_scalar=False) # torch.linalg.vecdot doesn't support integer dtypes if isdtype(x1.dtype, 'integral') or isdtype(x2.dtype, 'integral'): if kwargs: raise RuntimeError("vecdot kwargs not supported for integral dtypes") ndim = max(x1.ndim, x2.ndim) x1_shape = (1,)*(ndim - x1.ndim) + tuple(x1.shape) x2_shape = (1,)*(ndim - x2.ndim) + tuple(x2.shape) if x1_shape[axis] != x2_shape[axis]: raise ValueError("x1 and x2 must have the same size along the given axis") x1_, x2_ = torch.broadcast_tensors(x1, x2) x1_ = torch.moveaxis(x1_, axis, -1) x2_ = torch.moveaxis(x2_, axis, -1) res = x1_[..., None, :] @ x2_[..., None] return res[..., 0, 0] return torch.linalg.vecdot(x1, x2, dim=axis, **kwargs) def solve(x1: array, x2: array, /, **kwargs) -> array: x1, x2 = _fix_promotion(x1, x2, only_scalar=False) return torch.linalg.solve(x1, x2, **kwargs) __all__ = linalg_all + ['outer', 'trace', 'matrix_transpose', 'tensordot', 'vecdot', 'solve'] del linalg_all array-api-compat-1.4/cupy-skips.txt000066400000000000000000000001411450067300000173630ustar00rootroot00000000000000# Hangs array_api_tests/test_linalg.py::test_qr array_api_tests/test_linalg.py::test_matrix_rank array-api-compat-1.4/cupy-xfails.txt000066400000000000000000000341111450067300000175240ustar00rootroot00000000000000# cupy doesn't have __index__ (and we cannot wrap the ndarray object) array_api_tests/test_array_object.py::test_scalar_casting[__index__(uint8)] array_api_tests/test_array_object.py::test_scalar_casting[__index__(uint16)] array_api_tests/test_array_object.py::test_scalar_casting[__index__(uint32)] array_api_tests/test_array_object.py::test_scalar_casting[__index__(uint64)] array_api_tests/test_array_object.py::test_scalar_casting[__index__(int8)] array_api_tests/test_array_object.py::test_scalar_casting[__index__(int16)] array_api_tests/test_array_object.py::test_scalar_casting[__index__(int32)] array_api_tests/test_array_object.py::test_scalar_casting[__index__(int64)] # testsuite bug (https://github.com/data-apis/array-api-tests/issues/172) array_api_tests/test_array_object.py::test_getitem # copy=False is not yet implemented array_api_tests/test_creation_functions.py::test_asarray_arrays # finfo test is testing that the result is a float instead of float32 (see # also https://github.com/data-apis/array-api/issues/405) array_api_tests/test_data_type_functions.py::test_finfo[float32] # Some array attributes are missing, and we do not wrap the array object array_api_tests/test_has_names.py::test_has_names[array_method-__array_namespace__] array_api_tests/test_has_names.py::test_has_names[array_method-__index__] array_api_tests/test_has_names.py::test_has_names[array_method-to_device] array_api_tests/test_has_names.py::test_has_names[array_attribute-mT] # Some linalg tests depend on .mT instead of matrix_transpose() # and some require https://github.com/data-apis/array-api-tests/pull/101 to array_api_tests/test_linalg.py::test_eigvalsh array_api_tests/test_linalg.py::test_matrix_norm array_api_tests/test_linalg.py::test_matrix_power array_api_tests/test_linalg.py::test_solve array_api_tests/test_linalg.py::test_svd array_api_tests/test_linalg.py::test_svdvals # cupy uses 2023.12 trace() behavior https://github.com/data-apis/array-api/pull/502 array_api_tests/test_linalg.py::test_trace # We cannot modify array methods array_api_tests/test_operators_and_elementwise_functions.py::test_divide[__truediv__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__floordiv__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__imod__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_subtract[__sub__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_add[__add__(x, s)] # floating point inaccuracy array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[remainder(x1, x2)] # cupy (arg)min/max wrong with infinities # https://github.com/cupy/cupy/issues/7424 array_api_tests/test_searching_functions.py::test_argmax array_api_tests/test_searching_functions.py::test_argmin array_api_tests/test_statistical_functions.py::test_min array_api_tests/test_statistical_functions.py::test_max # testsuite incorrectly thinks meshgrid doesn't have indexing argument # (https://github.com/data-apis/array-api-tests/issues/171) array_api_tests/test_signatures.py::test_func_signature[meshgrid] # testsuite issue with test_square # https://github.com/data-apis/array-api-tests/issues/190 array_api_tests/test_operators_and_elementwise_functions.py::test_square # We cannot add array attributes array_api_tests/test_signatures.py::test_array_method_signature[__array_namespace__] array_api_tests/test_signatures.py::test_array_method_signature[__index__] array_api_tests/test_signatures.py::test_array_method_signature[to_device] # We do not attempt to workaround special cases (and the operator method ones array_api_tests/test_special_cases.py::test_unary[abs(x_i is -0) -> +0] array_api_tests/test_special_cases.py::test_unary[__abs__(x_i is -0) -> +0] array_api_tests/test_special_cases.py::test_unary[asin(x_i is -0) -> -0] array_api_tests/test_special_cases.py::test_unary[asinh(x_i is -0) -> -0] array_api_tests/test_special_cases.py::test_unary[atan(x_i is -0) -> -0] array_api_tests/test_special_cases.py::test_unary[atanh(x_i is -0) -> -0] array_api_tests/test_special_cases.py::test_unary[ceil(x_i is -0) -> -0] array_api_tests/test_special_cases.py::test_unary[cos(x_i is -0) -> 1] array_api_tests/test_special_cases.py::test_unary[cosh(x_i is -0) -> 1] array_api_tests/test_special_cases.py::test_unary[exp(x_i is -0) -> 1] array_api_tests/test_special_cases.py::test_unary[expm1(x_i is -0) -> -0] array_api_tests/test_special_cases.py::test_unary[floor(x_i is -0) -> -0] array_api_tests/test_special_cases.py::test_unary[log1p(x_i is -0) -> -0] array_api_tests/test_special_cases.py::test_unary[round(x_i is -0) -> -0] array_api_tests/test_special_cases.py::test_unary[sin(x_i is -0) -> -0] array_api_tests/test_special_cases.py::test_unary[sinh(x_i is -0) -> -0] array_api_tests/test_special_cases.py::test_unary[sqrt(x_i is -0) -> -0] array_api_tests/test_special_cases.py::test_unary[tan(x_i is -0) -> -0] array_api_tests/test_special_cases.py::test_unary[tanh(x_i is -0) -> -0] array_api_tests/test_special_cases.py::test_unary[trunc(x_i is -0) -> -0] array_api_tests/test_special_cases.py::test_binary[add(x1_i is -0 and x2_i is -0) -> -0] array_api_tests/test_special_cases.py::test_binary[add(x1_i is -0 and x2_i is +0) -> +0] array_api_tests/test_special_cases.py::test_binary[add(x1_i is +0 and x2_i is -0) -> +0] array_api_tests/test_special_cases.py::test_binary[add(isfinite(x1_i) and x1_i != 0 and x2_i == -x1_i) -> +0] array_api_tests/test_special_cases.py::test_binary[__add__(x1_i is -0 and x2_i is -0) -> -0] array_api_tests/test_special_cases.py::test_binary[__add__(x1_i is -0 and x2_i is +0) -> +0] array_api_tests/test_special_cases.py::test_binary[__add__(x1_i is +0 and x2_i is -0) -> +0] array_api_tests/test_special_cases.py::test_binary[__add__(isfinite(x1_i) and x1_i != 0 and x2_i == -x1_i) -> +0] array_api_tests/test_special_cases.py::test_binary[atan2(x1_i > 0 and x2_i is -0) -> roughly +pi/2] array_api_tests/test_special_cases.py::test_binary[atan2(x1_i is +0 and x2_i is -0) -> roughly +pi] array_api_tests/test_special_cases.py::test_binary[atan2(x1_i is -0 and x2_i > 0) -> -0] array_api_tests/test_special_cases.py::test_binary[atan2(x1_i is -0 and x2_i is +0) -> -0] array_api_tests/test_special_cases.py::test_binary[atan2(x1_i is -0 and x2_i is -0) -> roughly -pi] array_api_tests/test_special_cases.py::test_binary[atan2(x1_i is -0 and x2_i < 0) -> roughly -pi] array_api_tests/test_special_cases.py::test_binary[atan2(x1_i < 0 and x2_i is -0) -> roughly -pi/2] array_api_tests/test_special_cases.py::test_binary[divide(x1_i is -0 and x2_i > 0) -> -0] array_api_tests/test_special_cases.py::test_binary[divide(x1_i is -0 and x2_i < 0) -> +0] array_api_tests/test_special_cases.py::test_binary[divide(x1_i > 0 and x2_i is -0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[divide(x1_i < 0 and x2_i is -0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is -0 and x2_i > 0) -> -0] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is -0 and x2_i < 0) -> +0] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i > 0 and x2_i is -0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i < 0 and x2_i is -0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -0 and x2_i > 0) -> -0] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -0 and x2_i < 0) -> +0] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i > 0 and x2_i is -0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i < 0 and x2_i is -0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -0 and x2_i > 0) -> -0] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -0 and x2_i < 0) -> +0] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i > 0 and x2_i is -0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i < 0 and x2_i is -0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[pow(x2_i is -0) -> 1] array_api_tests/test_special_cases.py::test_binary[pow(x1_i is -0 and x2_i > 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] array_api_tests/test_special_cases.py::test_binary[pow(x1_i is -0 and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +0] array_api_tests/test_special_cases.py::test_binary[pow(x1_i is -0 and x2_i < 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -infinity] array_api_tests/test_special_cases.py::test_binary[pow(x1_i is -0 and x2_i < 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(x2_i is -0) -> 1] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i > 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +0] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i < 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i < 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] array_api_tests/test_special_cases.py::test_binary[remainder(x1_i is +0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_binary[remainder(x1_i is -0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_binary[remainder(x1_i is +0 and x2_i < 0) -> -0] array_api_tests/test_special_cases.py::test_binary[remainder(x1_i is -0 and x2_i < 0) -> -0] array_api_tests/test_special_cases.py::test_binary[remainder(x1_i > 0 and x2_i is -0) -> NaN] array_api_tests/test_special_cases.py::test_binary[remainder(x1_i < 0 and x2_i is -0) -> NaN] array_api_tests/test_special_cases.py::test_binary[remainder(isfinite(x1_i) and x1_i > 0 and x2_i is +infinity) -> x1_i] array_api_tests/test_special_cases.py::test_binary[remainder(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> x2_i] array_api_tests/test_special_cases.py::test_binary[remainder(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> x2_i] array_api_tests/test_special_cases.py::test_binary[remainder(isfinite(x1_i) and x1_i < 0 and x2_i is -infinity) -> x1_i] array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i is +0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i is -0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i is +0 and x2_i < 0) -> -0] array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i is -0 and x2_i < 0) -> -0] array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i > 0 and x2_i is -0) -> NaN] array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i < 0 and x2_i is -0) -> NaN] array_api_tests/test_special_cases.py::test_binary[__mod__(isfinite(x1_i) and x1_i > 0 and x2_i is +infinity) -> x1_i] array_api_tests/test_special_cases.py::test_binary[__mod__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> x2_i] array_api_tests/test_special_cases.py::test_binary[__mod__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> x2_i] array_api_tests/test_special_cases.py::test_binary[__mod__(isfinite(x1_i) and x1_i < 0 and x2_i is -infinity) -> x1_i] array_api_tests/test_special_cases.py::test_iop[__iadd__(x1_i is -0 and x2_i is -0) -> -0] array_api_tests/test_special_cases.py::test_iop[__iadd__(x1_i is -0 and x2_i is +0) -> +0] array_api_tests/test_special_cases.py::test_iop[__iadd__(x1_i is +0 and x2_i is -0) -> +0] array_api_tests/test_special_cases.py::test_iop[__iadd__(isfinite(x1_i) and x1_i != 0 and x2_i == -x1_i) -> +0] array_api_tests/test_special_cases.py::test_iop[__itruediv__(x1_i is -0 and x2_i > 0) -> -0] array_api_tests/test_special_cases.py::test_iop[__itruediv__(x1_i is -0 and x2_i < 0) -> +0] array_api_tests/test_special_cases.py::test_iop[__itruediv__(x1_i > 0 and x2_i is -0) -> -infinity] array_api_tests/test_special_cases.py::test_iop[__itruediv__(x1_i < 0 and x2_i is -0) -> +infinity] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -0 and x2_i > 0) -> -0] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -0 and x2_i < 0) -> +0] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i > 0 and x2_i is -0) -> -infinity] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i < 0 and x2_i is -0) -> +infinity] array_api_tests/test_special_cases.py::test_iop[__ipow__(x2_i is -0) -> 1] array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -0 and x2_i > 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -0 and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +0] array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -0 and x2_i < 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -infinity] array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -0 and x2_i < 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] array_api_tests/test_special_cases.py::test_iop[__imod__(x1_i is +0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_iop[__imod__(x1_i is -0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_iop[__imod__(x1_i is +0 and x2_i < 0) -> -0] array_api_tests/test_special_cases.py::test_iop[__imod__(x1_i is -0 and x2_i < 0) -> -0] array_api_tests/test_special_cases.py::test_iop[__imod__(x1_i > 0 and x2_i is -0) -> NaN] array_api_tests/test_special_cases.py::test_iop[__imod__(x1_i < 0 and x2_i is -0) -> NaN] array_api_tests/test_special_cases.py::test_iop[__imod__(isfinite(x1_i) and x1_i > 0 and x2_i is +infinity) -> x1_i] array_api_tests/test_special_cases.py::test_iop[__imod__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> x2_i] array_api_tests/test_special_cases.py::test_iop[__imod__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> x2_i] array_api_tests/test_special_cases.py::test_iop[__imod__(isfinite(x1_i) and x1_i < 0 and x2_i is -infinity) -> x1_i] array-api-compat-1.4/numpy-1-21-xfails.txt000066400000000000000000000507551450067300000203060ustar00rootroot00000000000000# asarray(copy=False) is not yet implemented array_api_tests/test_creation_functions.py::test_asarray_arrays # https://github.com/data-apis/array-api-tests/issues/195 array_api_tests/test_creation_functions.py::test_linspace # finfo(float32).eps returns float32 but should return float array_api_tests/test_data_type_functions.py::test_finfo[float32] # Array methods and attributes not already on np.ndarray cannot be wrapped array_api_tests/test_has_names.py::test_has_names[array_method-__array_namespace__] array_api_tests/test_has_names.py::test_has_names[array_method-to_device] array_api_tests/test_has_names.py::test_has_names[array_attribute-device] array_api_tests/test_has_names.py::test_has_names[array_attribute-mT] # linalg tests require https://github.com/data-apis/array-api-tests/pull/101 # cleanups. Also some tests are using .mT array_api_tests/test_linalg.py::test_eigvalsh array_api_tests/test_linalg.py::test_solve array_api_tests/test_linalg.py::test_trace # Array methods and attributes not already on np.ndarray cannot be wrapped array_api_tests/test_signatures.py::test_array_method_signature[__array_namespace__] array_api_tests/test_signatures.py::test_array_method_signature[to_device] # NumPy deviates in some special cases for floordiv array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[floor_divide(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] array_api_tests/test_special_cases.py::test_binary[floor_divide(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__floordiv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] array_api_tests/test_special_cases.py::test_binary[__floordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] # https://github.com/numpy/numpy/issues/21213 array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -infinity and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +0] array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -infinity and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -0 and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +0] array_api_tests/meta/test_hypothesis_helpers.py::test_symmetric_matrices # testsuite issue with test_square # https://github.com/data-apis/array-api-tests/issues/190 array_api_tests/test_operators_and_elementwise_functions.py::test_square # NumPy 1.21 specific XFAILS ############################ # finfo has no smallest_normal array_api_tests/test_data_type_functions.py::test_finfo[float64] # dlpack stuff array_api_tests/test_has_names.py::test_has_names[creation-from_dlpack] array_api_tests/test_has_names.py::test_has_names[array_method-__dlpack__] array_api_tests/test_has_names.py::test_has_names[array_method-__dlpack_device__] array_api_tests/test_signatures.py::test_func_signature[from_dlpack] array_api_tests/test_signatures.py::test_array_method_signature[__dlpack__] array_api_tests/test_signatures.py::test_array_method_signature[__dlpack_device__] # qr() doesn't support matrix stacks array_api_tests/test_linalg.py::test_qr # argmax and argmin do not support keepdims array_api_tests/test_searching_functions.py::test_argmax array_api_tests/test_searching_functions.py::test_argmin array_api_tests/test_signatures.py::test_func_signature[argmax] array_api_tests/test_signatures.py::test_func_signature[argmin] # unique doesn't support comparing nans as unequal array_api_tests/test_set_functions.py::test_unique_all array_api_tests/test_set_functions.py::test_unique_counts array_api_tests/test_set_functions.py::test_unique_inverse array_api_tests/test_set_functions.py::test_unique_values # The test suite is incorrectly checking sums that have loss of significance # (https://github.com/data-apis/array-api-tests/issues/168) array_api_tests/test_statistical_functions.py::test_sum # NumPy 1.21 doesn't support NPY_PROMOTION_STATE=weak, so many tests fail with # type promotion issues array_api_tests/test_manipulation_functions.py::test_concat array_api_tests/test_operators_and_elementwise_functions.py::test_add[__add__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_add[__add__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_add[__iadd__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_add[add(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_atan2 array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_and[__and__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_and[__and__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_and[__iand__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_and[bitwise_and(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_left_shift[__ilshift__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_left_shift[__lshift__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_left_shift[__lshift__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_left_shift[bitwise_left_shift(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_or[__ior__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_or[__or__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_or[__or__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_or[bitwise_or(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_right_shift[__irshift__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_right_shift[__rshift__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_right_shift[__rshift__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_right_shift[bitwise_right_shift(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_xor[__ixor__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_xor[__xor__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_xor[__xor__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_xor[bitwise_xor(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_divide[__truediv__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_divide[__truediv__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_divide[divide(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_equal[equal(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__floordiv__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__floordiv__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__ifloordiv__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[floor_divide(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_greater[__gt__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_greater[greater(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_less[__lt__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_less_equal[less_equal(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_logaddexp array_api_tests/test_operators_and_elementwise_functions.py::test_multiply[__imul__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_multiply[__mul__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_multiply[__mul__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_multiply[multiply(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_pow[__ipow__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_pow[__pow__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_pow[__pow__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_pow[pow(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__imod__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[remainder(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_subtract[__isub__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_subtract[__sub__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_subtract[__sub__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_subtract[subtract(x1, x2)] array_api_tests/test_searching_functions.py::test_where array_api_tests/test_special_cases.py::test_binary[__add__((x1_i is +0 or x1_i == -0) and isfinite(x2_i) and x2_i != 0) -> x2_i] array_api_tests/test_special_cases.py::test_binary[__add__(isfinite(x1_i) and x1_i != 0 and (x2_i is +0 or x2_i == -0)) -> x1_i] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i < 0 and x2_i is +0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i < 0 and x2_i is -0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i > 0 and x2_i is +0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i > 0 and x2_i is -0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is +0 and x2_i < 0) -> -0] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is +0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -0 and x2_i < 0) -> +0] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -0 and x2_i > 0) -> -0] array_api_tests/test_special_cases.py::test_binary[__mod__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> x2_i] array_api_tests/test_special_cases.py::test_binary[__mod__(isfinite(x1_i) and x1_i < 0 and x2_i is -infinity) -> x1_i] array_api_tests/test_special_cases.py::test_binary[__mod__(isfinite(x1_i) and x1_i > 0 and x2_i is +infinity) -> x1_i] array_api_tests/test_special_cases.py::test_binary[__mod__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> x2_i] array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i is +0 and x2_i < 0) -> -0] array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i is +0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i is -0 and x2_i < 0) -> -0] array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i is -0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_binary[__pow__(abs(x1_i) < 1 and x2_i is +infinity) -> +0] array_api_tests/test_special_cases.py::test_binary[__pow__(abs(x1_i) < 1 and x2_i is -infinity) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(abs(x1_i) > 1 and x2_i is +infinity) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(abs(x1_i) > 1 and x2_i is -infinity) -> +0] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i < 0 and isfinite(x1_i) and isfinite(x2_i) and not x2_i.is_integer()) -> NaN] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is +0 and x2_i < 0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is +0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is +infinity and x2_i < 0) -> +0] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is +infinity and x2_i > 0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i < 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i < 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i > 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -infinity and x2_i < 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +0] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -infinity and x2_i < 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -infinity and x2_i > 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is NaN and not x2_i == 0) -> NaN] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i < 0 and x2_i is +0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i < 0 and x2_i is -0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i > 0 and x2_i is +0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i > 0 and x2_i is -0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is +0 and x2_i < 0) -> -0] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is +0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is -0 and x2_i < 0) -> +0] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is -0 and x2_i > 0) -> -0] array_api_tests/test_special_cases.py::test_binary[add((x1_i is +0 or x1_i == -0) and isfinite(x2_i) and x2_i != 0) -> x2_i] array_api_tests/test_special_cases.py::test_binary[add(isfinite(x1_i) and x1_i != 0 and (x2_i is +0 or x2_i == -0)) -> x1_i] array_api_tests/test_special_cases.py::test_binary[atan2(x1_i < 0 and x2_i is +0) -> roughly -pi/2] array_api_tests/test_special_cases.py::test_binary[atan2(x1_i < 0 and x2_i is -0) -> roughly -pi/2] array_api_tests/test_special_cases.py::test_binary[atan2(x1_i > 0 and x2_i is +0) -> roughly +pi/2] array_api_tests/test_special_cases.py::test_binary[atan2(x1_i > 0 and x2_i is -0) -> roughly +pi/2] array_api_tests/test_special_cases.py::test_binary[divide(x1_i < 0 and x2_i is +0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[divide(x1_i < 0 and x2_i is -0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[divide(x1_i > 0 and x2_i is +0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[divide(x1_i > 0 and x2_i is -0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[divide(x1_i is +0 and x2_i < 0) -> -0] array_api_tests/test_special_cases.py::test_binary[divide(x1_i is +0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_binary[divide(x1_i is -0 and x2_i < 0) -> +0] array_api_tests/test_special_cases.py::test_binary[divide(x1_i is -0 and x2_i > 0) -> -0] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i < 0 and x2_i is +0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i < 0 and x2_i is -0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i > 0 and x2_i is +0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i > 0 and x2_i is -0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is +0 and x2_i < 0) -> -0] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is +0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -0 and x2_i < 0) -> +0] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -0 and x2_i > 0) -> -0] array_api_tests/test_special_cases.py::test_binary[pow(abs(x1_i) < 1 and x2_i is +infinity) -> +0] array_api_tests/test_special_cases.py::test_binary[pow(abs(x1_i) < 1 and x2_i is -infinity) -> +infinity] array_api_tests/test_special_cases.py::test_binary[pow(abs(x1_i) > 1 and x2_i is +infinity) -> +infinity] array_api_tests/test_special_cases.py::test_binary[pow(abs(x1_i) > 1 and x2_i is -infinity) -> +0] array_api_tests/test_special_cases.py::test_binary[pow(x1_i < 0 and isfinite(x1_i) and isfinite(x2_i) and not x2_i.is_integer()) -> NaN] array_api_tests/test_special_cases.py::test_binary[pow(x1_i is +0 and x2_i < 0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[pow(x1_i is +0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_binary[pow(x1_i is +infinity and x2_i < 0) -> +0] array_api_tests/test_special_cases.py::test_binary[pow(x1_i is +infinity and x2_i > 0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[pow(x1_i is -0 and x2_i < 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] array_api_tests/test_special_cases.py::test_binary[pow(x1_i is -0 and x2_i < 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -infinity] array_api_tests/test_special_cases.py::test_binary[pow(x1_i is -0 and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +0] array_api_tests/test_special_cases.py::test_binary[pow(x1_i is -0 and x2_i > 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] array_api_tests/test_special_cases.py::test_binary[pow(x1_i is -infinity and x2_i < 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +0] array_api_tests/test_special_cases.py::test_binary[pow(x1_i is -infinity and x2_i < 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] array_api_tests/test_special_cases.py::test_binary[pow(x1_i is -infinity and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] array_api_tests/test_special_cases.py::test_binary[pow(x1_i is -infinity and x2_i > 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -infinity] array_api_tests/test_special_cases.py::test_binary[pow(x1_i is NaN and not x2_i == 0) -> NaN] array_api_tests/test_special_cases.py::test_binary[remainder(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> x2_i] array_api_tests/test_special_cases.py::test_binary[remainder(isfinite(x1_i) and x1_i < 0 and x2_i is -infinity) -> x1_i] array_api_tests/test_special_cases.py::test_binary[remainder(isfinite(x1_i) and x1_i > 0 and x2_i is +infinity) -> x1_i] array_api_tests/test_special_cases.py::test_binary[remainder(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> x2_i] array_api_tests/test_special_cases.py::test_binary[remainder(x1_i is +0 and x2_i < 0) -> -0] array_api_tests/test_special_cases.py::test_binary[remainder(x1_i is +0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_binary[remainder(x1_i is -0 and x2_i < 0) -> -0] array_api_tests/test_special_cases.py::test_binary[remainder(x1_i is -0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_iop[__iadd__(x1_i is -0 and x2_i is -0) -> -0] array-api-compat-1.4/numpy-skips.txt000066400000000000000000000020201450067300000175510ustar00rootroot00000000000000# These tests cause a core dump on CI, so we have to skip them entirely array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__floordiv__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__floordiv__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__ifloordiv__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__ifloordiv__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[floor_divide(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__imod__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__imod__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[remainder(x1, x2)] array-api-compat-1.4/numpy-xfails.txt000066400000000000000000000105741450067300000177230ustar00rootroot00000000000000# asarray(copy=False) is not yet implemented array_api_tests/test_creation_functions.py::test_asarray_arrays # finfo(float32).eps returns float32 but should return float array_api_tests/test_data_type_functions.py::test_finfo[float32] # Array methods and attributes not already on np.ndarray cannot be wrapped array_api_tests/test_has_names.py::test_has_names[array_method-__array_namespace__] array_api_tests/test_has_names.py::test_has_names[array_method-to_device] array_api_tests/test_has_names.py::test_has_names[array_attribute-device] array_api_tests/test_has_names.py::test_has_names[array_attribute-mT] # linalg tests require https://github.com/data-apis/array-api-tests/pull/101 # cleanups. Also some tests are using .mT array_api_tests/test_linalg.py::test_eigvalsh array_api_tests/test_linalg.py::test_solve array_api_tests/test_linalg.py::test_trace # Array methods and attributes not already on np.ndarray cannot be wrapped array_api_tests/test_signatures.py::test_array_method_signature[__array_namespace__] array_api_tests/test_signatures.py::test_array_method_signature[to_device] # NumPy deviates in some special cases for floordiv array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[floor_divide(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] array_api_tests/test_special_cases.py::test_binary[floor_divide(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__floordiv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] array_api_tests/test_special_cases.py::test_binary[__floordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] # testsuite issue with test_square # https://github.com/data-apis/array-api-tests/issues/190 array_api_tests/test_operators_and_elementwise_functions.py::test_square # https://github.com/numpy/numpy/issues/21213 array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -infinity and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +0] array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -infinity and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -0 and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +0] array_api_tests/meta/test_hypothesis_helpers.py::test_symmetric_matrices # The test suite is incorrectly checking sums that have loss of significance # (https://github.com/data-apis/array-api-tests/issues/168) array_api_tests/test_statistical_functions.py::test_sum array-api-compat-1.4/setup.py000066400000000000000000000020761450067300000162360ustar00rootroot00000000000000from setuptools import setup, find_packages with open("README.md", "r") as fh: long_description = fh.read() import array_api_compat setup( name='array_api_compat', version=array_api_compat.__version__, packages=find_packages(include=['array_api_compat*']), author="Consortium for Python Data API Standards", description="A wrapper around NumPy and other array libraries to make them compatible with the Array API standard", long_description=long_description, long_description_content_type="text/markdown", url="https://data-apis.org/array-api-compat/", license="MIT", python_requires=">=3.8", extras_require={ "numpy": "numpy", "cupy": "cupy", }, classifiers=[ "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], ) array-api-compat-1.4/test_cupy.sh000077500000000000000000000016011450067300000170730ustar00rootroot00000000000000#!/usr/bin/env bash # We cannot test cupy on CI so this script will test it manually. Assumes it # is being run in an environment that has cupy and the array-api-tests # dependencies installed set -x set -e # Run the vendoring tests in this repo pytest tmpdir=$(mktemp -d) SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) export PYTHONPATH=$SCRIPT_DIR PYTEST_ARGS="--max-examples 200 -v -rxXfE --ci" cd $tmpdir git clone https://github.com/data-apis/array-api-tests cd array-api-tests git submodule update --init # store the hypothesis examples database in this directory, so that failures # will be remembered across runs mkdir -p $SCRIPT_DIR/.hypothesis ln -s $SCRIPT_DIR/.hypothesis .hypothesis export ARRAY_API_TESTS_MODULE=array_api_compat.cupy pytest ${PYTEST_ARGS} --xfails-file $SCRIPT_DIR/cupy-xfails.txt --skips-file $SCRIPT_DIR/cupy-skips.txt "$@" array-api-compat-1.4/tests/000077500000000000000000000000001450067300000156615ustar00rootroot00000000000000array-api-compat-1.4/tests/__init__.py000066400000000000000000000003131450067300000177670ustar00rootroot00000000000000""" Basic tests for the compat library This only tests basic things like that vendoring works. The extensive tests are done by the array API test suite https://github.com/data-apis/array-api-tests """ array-api-compat-1.4/tests/_helpers.py000066400000000000000000000002621450067300000200340ustar00rootroot00000000000000from importlib import import_module import pytest def import_(library): if 'cupy' in library: return pytest.importorskip(library) return import_module(library) array-api-compat-1.4/tests/test_array_namespace.py000066400000000000000000000024521450067300000224270ustar00rootroot00000000000000import array_api_compat from array_api_compat import array_namespace from ._helpers import import_ import pytest @pytest.mark.parametrize("library", ["cupy", "numpy", "torch"]) @pytest.mark.parametrize("api_version", [None, '2021.12']) def test_array_namespace(library, api_version): lib = import_(library) array = lib.asarray([1.0, 2.0, 3.0]) namespace = array_api_compat.array_namespace(array, api_version=api_version) if 'array_api' in library: assert namespace == lib else: assert namespace == getattr(array_api_compat, library) def test_array_namespace_multiple(): import numpy as np x = np.asarray([1, 2]) assert array_namespace(x, x) == array_namespace((x, x)) == \ array_namespace((x, x), x) == array_api_compat.numpy def test_array_namespace_errors(): pytest.raises(TypeError, lambda: array_namespace([1])) pytest.raises(TypeError, lambda: array_namespace()) import numpy as np import torch x = np.asarray([1, 2]) y = torch.asarray([1, 2]) pytest.raises(TypeError, lambda: array_namespace(x, y)) pytest.raises(ValueError, lambda: array_namespace(x, api_version='2022.12')) def test_get_namespace(): # Backwards compatible wrapper assert array_api_compat.get_namespace is array_api_compat.array_namespace array-api-compat-1.4/tests/test_common.py000066400000000000000000000016221450067300000205630ustar00rootroot00000000000000from ._helpers import import_ from array_api_compat import to_device, device import pytest import numpy as np from numpy.testing import assert_allclose @pytest.mark.parametrize("library", ["cupy", "numpy", "torch"]) def test_to_device_host(library): # different libraries have different semantics # for DtoH transfers; ensure that we support a portable # shim for common array libs # see: https://github.com/scipy/scipy/issues/18286#issuecomment-1527552919 xp = import_('array_api_compat.' + library) expected = np.array([1, 2, 3]) x = xp.asarray([1, 2, 3]) x = to_device(x, "cpu") # torch will return a genuine Device object, but # the other libs will do something different with # a `device(x)` query; however, what's really important # here is that we can test portably after calling # to_device(x, "cpu") to return to host assert_allclose(x, expected) array-api-compat-1.4/tests/test_isdtype.py000066400000000000000000000070001450067300000207500ustar00rootroot00000000000000""" isdtype is not yet tested in the test suite, and it should extend properly to non-spec dtypes """ from ._helpers import import_ import pytest # Check the known dtypes by their string names def _spec_dtypes(library): if library == 'torch': # torch does not have unsigned integer dtypes return { 'bool', 'complex64', 'complex128', 'uint8', 'int8', 'int16', 'int32', 'int64', 'float32', 'float64', } else: return { 'bool', 'complex64', 'complex128', 'float32', 'float64', 'int16', 'int32', 'int64', 'int8', 'uint16', 'uint32', 'uint64', 'uint8', } dtype_categories = { 'bool': lambda d: d == 'bool', 'signed integer': lambda d: d.startswith('int'), 'unsigned integer': lambda d: d.startswith('uint'), 'integral': lambda d: dtype_categories['signed integer'](d) or dtype_categories['unsigned integer'](d), 'real floating': lambda d: 'float' in d, 'complex floating': lambda d: d.startswith('complex'), 'numeric': lambda d: dtype_categories['integral'](d) or dtype_categories['real floating'](d) or dtype_categories['complex floating'](d), } def isdtype_(dtype_, kind): # Check a dtype_ string against kind. Note that 'bool' technically has two # meanings here but they are both the same. if kind in dtype_categories: res = dtype_categories[kind](dtype_) else: res = dtype_ == kind assert type(res) is bool return res @pytest.mark.parametrize("library", ["cupy", "numpy", "torch"]) def test_isdtype_spec_dtypes(library): xp = import_('array_api_compat.' + library) isdtype = xp.isdtype for dtype_ in _spec_dtypes(library): for dtype2_ in _spec_dtypes(library): dtype = getattr(xp, dtype_) dtype2 = getattr(xp, dtype2_) res = isdtype_(dtype_, dtype2_) assert isdtype(dtype, dtype2) is res, (dtype_, dtype2_) for cat in dtype_categories: res = isdtype_(dtype_, cat) assert isdtype(dtype, cat) == res, (dtype_, cat) # Basic tuple testing (the array-api testsuite will be more complete here) for kind1_ in [*_spec_dtypes(library), *dtype_categories]: for kind2_ in [*_spec_dtypes(library), *dtype_categories]: kind1 = kind1_ if kind1_ in dtype_categories else getattr(xp, kind1_) kind2 = kind2_ if kind2_ in dtype_categories else getattr(xp, kind2_) kind = (kind1, kind2) res = isdtype_(dtype_, kind1_) or isdtype_(dtype_, kind2_) assert isdtype(dtype, kind) == res, (dtype_, (kind1_, kind2_)) additional_dtypes = [ 'float16', 'float128', 'complex256', 'bfloat16', ] @pytest.mark.parametrize("library", ["cupy", "numpy", "torch"]) @pytest.mark.parametrize("dtype_", additional_dtypes) def test_isdtype_additional_dtypes(library, dtype_): xp = import_('array_api_compat.' + library) isdtype = xp.isdtype if not hasattr(xp, dtype_): return # pytest.skip(f"{library} doesn't have dtype {dtype_}") dtype = getattr(xp, dtype_) for cat in dtype_categories: res = isdtype_(dtype_, cat) assert isdtype(dtype, cat) == res, (dtype_, cat) array-api-compat-1.4/tests/test_vendoring.py000066400000000000000000000006241450067300000212670ustar00rootroot00000000000000from pytest import skip def test_vendoring_numpy(): from vendor_test import uses_numpy uses_numpy._test_numpy() def test_vendoring_cupy(): try: import cupy except ImportError: skip("CuPy is not installed") from vendor_test import uses_cupy uses_cupy._test_cupy() def test_vendoring_torch(): from vendor_test import uses_torch uses_torch._test_torch() array-api-compat-1.4/torch-skips.txt000066400000000000000000000020201450067300000175200ustar00rootroot00000000000000# These tests cause a core dump on CI, so we have to skip them entirely array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__floordiv__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__floordiv__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__ifloordiv__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__ifloordiv__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[floor_divide(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__imod__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__imod__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x, s)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[remainder(x1, x2)] array-api-compat-1.4/torch-xfails.txt000066400000000000000000000367061450067300000176770ustar00rootroot00000000000000# Note: see array_api_compat/torch/_aliases.py for links to corresponding # pytorch issues # We cannot wrap the array object # Indexing does not support negative step array_api_tests/test_array_object.py::test_getitem array_api_tests/test_array_object.py::test_setitem # Masking doesn't suport 0 dimensions in the mask array_api_tests/test_array_object.py::test_getitem_masking # torch doesn't have uint dtypes other than uint8 array_api_tests/test_array_object.py::test_scalar_casting[__int__(uint16)] array_api_tests/test_array_object.py::test_scalar_casting[__int__(uint32)] array_api_tests/test_array_object.py::test_scalar_casting[__int__(uint64)] array_api_tests/test_array_object.py::test_scalar_casting[__index__(uint16)] array_api_tests/test_array_object.py::test_scalar_casting[__index__(uint32)] array_api_tests/test_array_object.py::test_scalar_casting[__index__(uint64)] # Overflow error from large inputs array_api_tests/test_creation_functions.py::test_arange # pytorch linspace bug (should be fixed in torch 2.0) array_api_tests/test_creation_functions.py::test_linspace # torch doesn't have higher uint dtypes array_api_tests/test_data_type_functions.py::test_iinfo[uint16] array_api_tests/test_data_type_functions.py::test_iinfo[uint32] array_api_tests/test_data_type_functions.py::test_iinfo[uint64] # We cannot wrap the tensor object array_api_tests/test_has_names.py::test_has_names[array_method-__array_namespace__] array_api_tests/test_has_names.py::test_has_names[array_method-to_device] # tensordot doesn't allow integer dtypes in some corner cases array_api_tests/test_linalg.py::test_tensordot # We cannot wrap the tensor object array_api_tests/test_operators_and_elementwise_functions.py::test_add[__add__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_and[__and__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_left_shift[__lshift__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_or[__or__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_right_shift[__rshift__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_right_shift[__irshift__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_xor[__xor__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_divide[__truediv__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_equal[__eq__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_not_equal[__ne__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_floor_divide[__floordiv__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_greater[__gt__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_greater_equal[__ge__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_less[__lt__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_less_equal[__le__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_multiply[__mul__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_pow[__pow__(x1, x2)] # This test is skipped instead of xfailed because it causes core dumps on CI # array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__mod__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[__imod__(x1, x2)] array_api_tests/test_operators_and_elementwise_functions.py::test_subtract[__sub__(x1, x2)] # overflow near float max array_api_tests/test_operators_and_elementwise_functions.py::test_log1p # torch doesn't handle shifting by more than the bit size correctly # https://github.com/pytorch/pytorch/issues/70904 array_api_tests/test_operators_and_elementwise_functions.py::test_bitwise_right_shift[bitwise_right_shift(x1, x2)] # Torch bug for remainder in some cases with large values array_api_tests/test_operators_and_elementwise_functions.py::test_remainder[remainder(x1, x2)] # unique_all cannot be implemented because torch's unique does not support # returning indices array_api_tests/test_set_functions.py::test_unique_all # unique_inverse incorrectly counts nan values # (https://github.com/pytorch/pytorch/issues/94106) array_api_tests/test_set_functions.py::test_unique_inverse # The test suite incorrectly divides by 0 here # (https://github.com/data-apis/array-api-tests/issues/170) array_api_tests/test_signatures.py::test_func_signature[floor_divide] array_api_tests/test_signatures.py::test_func_signature[remainder] array_api_tests/test_signatures.py::test_array_method_signature[__mod__] # We cannot add attributes to the tensor object array_api_tests/test_signatures.py::test_array_method_signature[__array_namespace__] array_api_tests/test_signatures.py::test_array_method_signature[to_device] # We do not attempt to work around special-case differences (most are on # tensor methods which we couldn't fix anyway). array_api_tests/test_special_cases.py::test_binary[add(isfinite(x1_i) and x1_i != 0 and x2_i == -x1_i) -> +0] array_api_tests/test_special_cases.py::test_binary[__add__(x1_i is +infinity and isfinite(x2_i)) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__add__(x1_i is -infinity and isfinite(x2_i)) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__add__(isfinite(x1_i) and x2_i is +infinity) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__add__(isfinite(x1_i) and x2_i is -infinity) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__add__((x1_i is +0 or x1_i == -0) and isfinite(x2_i) and x2_i != 0) -> x2_i] array_api_tests/test_special_cases.py::test_binary[__add__(isfinite(x1_i) and x1_i != 0 and (x2_i is +0 or x2_i == -0)) -> x1_i] array_api_tests/test_special_cases.py::test_binary[__add__(isfinite(x1_i) and x1_i != 0 and x2_i == -x1_i) -> +0] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is +0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is -0 and x2_i > 0) -> -0] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is +0 and x2_i < 0) -> -0] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is -0 and x2_i < 0) -> +0] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i > 0 and x2_i is +0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i > 0 and x2_i is -0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i < 0 and x2_i is +0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i < 0 and x2_i is -0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__truediv__(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__truediv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] array_api_tests/test_special_cases.py::test_binary[__truediv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] array_api_tests/test_special_cases.py::test_binary[__truediv__(isfinite(x1_i) and x1_i < 0 and x2_i is -infinity) -> +0] array_api_tests/test_special_cases.py::test_binary[__truediv__(isfinite(x1_i) and x1_i > 0 and x2_i is +infinity) -> +0] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[floor_divide(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] array_api_tests/test_special_cases.py::test_binary[floor_divide(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is +0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -0 and x2_i > 0) -> -0] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is +0 and x2_i < 0) -> -0] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -0 and x2_i < 0) -> +0] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i > 0 and x2_i is +0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i > 0 and x2_i is -0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i < 0 and x2_i is +0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i < 0 and x2_i is -0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__floordiv__(isfinite(x1_i) and x1_i > 0 and x2_i is +infinity) -> +0] array_api_tests/test_special_cases.py::test_binary[__floordiv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] array_api_tests/test_special_cases.py::test_binary[__floordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] array_api_tests/test_special_cases.py::test_binary[__floordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is -infinity) -> +0] array_api_tests/test_special_cases.py::test_binary[__pow__(abs(x1_i) > 1 and x2_i is +infinity) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(abs(x1_i) > 1 and x2_i is -infinity) -> +0] array_api_tests/test_special_cases.py::test_binary[__pow__(abs(x1_i) < 1 and x2_i is +infinity) -> +0] array_api_tests/test_special_cases.py::test_binary[__pow__(abs(x1_i) < 1 and x2_i is -infinity) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is +infinity and x2_i > 0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is +infinity and x2_i < 0) -> +0] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is NaN and not x2_i == 0) -> NaN] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -infinity and x2_i > 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -infinity and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -infinity and x2_i < 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -infinity and x2_i < 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +0] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is +0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is +0 and x2_i < 0) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i > 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -0] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i < 0 and x2_i.is_integer() and x2_i % 2 == 1) -> -infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i < 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i is -0 and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +0] array_api_tests/test_special_cases.py::test_binary[__pow__(x1_i < 0 and isfinite(x1_i) and isfinite(x2_i) and not x2_i.is_integer()) -> NaN] array_api_tests/test_special_cases.py::test_binary[remainder(x1_i is -0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_binary[remainder(x1_i is +0 and x2_i < 0) -> -0] array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i is +0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i is -0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i is +0 and x2_i < 0) -> -0] array_api_tests/test_special_cases.py::test_binary[__mod__(x1_i is -0 and x2_i < 0) -> -0] array_api_tests/test_special_cases.py::test_binary[__mod__(isfinite(x1_i) and x1_i > 0 and x2_i is +infinity) -> x1_i] array_api_tests/test_special_cases.py::test_binary[__mod__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> x2_i] array_api_tests/test_special_cases.py::test_binary[__mod__(isfinite(x1_i) and x1_i < 0 and x2_i is -infinity) -> x1_i] array_api_tests/test_special_cases.py::test_binary[__mod__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> x2_i] array_api_tests/test_special_cases.py::test_iop[__iadd__(isfinite(x1_i) and x1_i != 0 and x2_i == -x1_i) -> +0] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] array_api_tests/test_special_cases.py::test_iop[__imod__(x1_i is -0 and x2_i > 0) -> +0] array_api_tests/test_special_cases.py::test_iop[__imod__(x1_i is +0 and x2_i < 0) -> -0] # testsuite issue with test_square # https://github.com/data-apis/array-api-tests/issues/190 array_api_tests/test_operators_and_elementwise_functions.py::test_square # Float correction is not supported by pytorch # (https://github.com/data-apis/array-api-tests/issues/168) array_api_tests/test_special_cases.py::test_empty_arrays[std] array_api_tests/test_special_cases.py::test_empty_arrays[var] array_api_tests/test_special_cases.py::test_nan_propagation[std] array_api_tests/test_special_cases.py::test_nan_propagation[var] array_api_tests/test_statistical_functions.py::test_std array_api_tests/test_statistical_functions.py::test_var # The test suite is incorrectly checking sums that have loss of significance # (https://github.com/data-apis/array-api-tests/issues/168) array_api_tests/test_statistical_functions.py::test_sum # These functions do not yet support complex numbers array_api_tests/test_operators_and_elementwise_functions.py::test_sign array_api_tests/test_operators_and_elementwise_functions.py::test_expm1 array_api_tests/test_operators_and_elementwise_functions.py::test_round array_api_tests/test_set_functions.py::test_unique_counts array_api_tests/test_set_functions.py::test_unique_values array-api-compat-1.4/vendor_test/000077500000000000000000000000001450067300000170535ustar00rootroot00000000000000array-api-compat-1.4/vendor_test/__init__.py000066400000000000000000000000001450067300000211520ustar00rootroot00000000000000array-api-compat-1.4/vendor_test/uses_cupy.py000066400000000000000000000010371450067300000214450ustar00rootroot00000000000000# Basic test that vendoring works from .vendored._compat import cupy as cp_compat import cupy as cp def _test_cupy(): a = cp_compat.asarray([1., 2., 3.]) b = cp_compat.arange(3, dtype=cp_compat.float32) # cp.pow does not exist. Update this to use something else if it is added res = cp_compat.pow(a, b) assert res.dtype == cp_compat.float64 == cp.float64 assert isinstance(a, cp.ndarray) assert isinstance(b, cp.ndarray) assert isinstance(res, cp.ndarray) cp.testing.assert_allclose(res, [1., 2., 9.]) array-api-compat-1.4/vendor_test/uses_numpy.py000066400000000000000000000010421450067300000216310ustar00rootroot00000000000000# Basic test that vendoring works from .vendored._compat import numpy as np_compat import numpy as np def _test_numpy(): a = np_compat.asarray([1., 2., 3.]) b = np_compat.arange(3, dtype=np_compat.float32) # np.pow does not exist. Update this to use something else if it is added res = np_compat.pow(a, b) assert res.dtype == np_compat.float64 == np.float64 assert isinstance(a, np.ndarray) assert isinstance(b, np.ndarray) assert isinstance(res, np.ndarray) np.testing.assert_allclose(res, [1., 2., 9.]) array-api-compat-1.4/vendor_test/uses_torch.py000066400000000000000000000014301450067300000216010ustar00rootroot00000000000000# Basic test that vendoring works from .vendored._compat import torch as torch_compat import torch def _test_torch(): a = torch_compat.asarray([1., 2., 3.]) b = torch_compat.arange(3, dtype=torch_compat.float64) assert a.dtype == torch_compat.float32 == torch.float32 assert b.dtype == torch_compat.float64 == torch.float64 # torch.expand_dims does not exist. Update this to use something else if it is added res = torch_compat.expand_dims(a, axis=0) assert res.dtype == torch_compat.float32 == torch.float32 assert res.shape == (1, 3) assert isinstance(res.shape, torch.Size) assert isinstance(a, torch.Tensor) assert isinstance(b, torch.Tensor) assert isinstance(res, torch.Tensor) torch.testing.assert_allclose(res, [[1., 2., 3.]]) array-api-compat-1.4/vendor_test/vendored/000077500000000000000000000000001450067300000206615ustar00rootroot00000000000000array-api-compat-1.4/vendor_test/vendored/__init__.py000066400000000000000000000000001450067300000227600ustar00rootroot00000000000000array-api-compat-1.4/vendor_test/vendored/_compat000077700000000000000000000000001450067300000261632../../array_api_compat/ustar00rootroot00000000000000