pax_global_header00006660000000000000000000000064131441206420014507gustar00rootroot0000000000000052 comment=4abf0dbed5ff88a6e6c0fe81fade5dfccc5beb53 xtensor-python-0.12.4/000077500000000000000000000000001314412064200146145ustar00rootroot00000000000000xtensor-python-0.12.4/.appveyor.yml000066400000000000000000000022731314412064200172660ustar00rootroot00000000000000build: false os: Visual Studio 2015 platform: - x64 - x86 environment: matrix: - MINICONDA: C:\xtensor-conda init: - "ECHO %MINICONDA%" - C:\"Program Files (x86)"\"Microsoft Visual Studio 14.0"\VC\vcvarsall.bat %PLATFORM% - ps: if($env:Platform -eq "x64"){Start-FileDownload 'http://repo.continuum.io/miniconda/Miniconda3-latest-Windows-x86_64.exe' C:\Miniconda.exe; echo "Done"} - ps: if($env:Platform -eq "x86"){Start-FileDownload 'http://repo.continuum.io/miniconda/Miniconda3-latest-Windows-x86.exe' C:\Miniconda.exe; echo "Done"} - cmd: C:\Miniconda.exe /S /D=C:\xtensor-conda - "set PATH=%MINICONDA%;%MINICONDA%\\Scripts;%MINICONDA%\\Library\\bin;%PATH%" install: - conda config --set always_yes yes --set changeps1 no - conda update -q conda - conda info -a - conda install gtest cmake -c conda-forge - conda install xtensor==0.10.9 pytest numpy pybind11==2.1.1 -c conda-forge - "set PYTHONHOME=%MINICONDA%" - cmake -G "NMake Makefiles" -D CMAKE_INSTALL_PREFIX=%MINICONDA%\\Library -D BUILD_TESTS=ON -D PYTHON_EXECUTABLE=%MINICONDA%\\python.exe . - nmake test_xtensor_python - nmake install build_script: - py.test -s - cd test - .\test_xtensor_python xtensor-python-0.12.4/.gitignore000066400000000000000000000011171314412064200166040ustar00rootroot00000000000000# Prerequisites *.d # Compiled Object files *.slo *.lo *.o *.obj # Precompiled Headers *.gch *.pch # Compiled Dynamic libraries *.so *.dylib *.dll # Compiled Static libraries *.lai *.la *.a *.lib # Executables *.exe *.out *.app # Vim tmp files *.swp # Build directory build/ # Test build artefacts test/test_xtensor_python test/CMakeCache.txt test/Makefile test/CMakeFiles/ test/cmake_install.cmake # Documentation build artefacts docs/CMakeCache.txt docs/xml/ docs/build/ # Jupyter artefacts .ipynb_checkpoints/ # Python *.py[cod] __pycache__ build *.egg-info # py.test .cache/ xtensor-python-0.12.4/.travis.yml000066400000000000000000000056501314412064200167330ustar00rootroot00000000000000language: cpp matrix: include: - os: linux addons: apt: sources: - ubuntu-toolchain-r-test packages: - g++-4.9 env: COMPILER=gcc GCC=4.9 PY=3 - os: linux addons: apt: sources: - ubuntu-toolchain-r-test packages: - g++-5 env: COMPILER=gcc GCC=5 PY=3 - os: linux addons: apt: sources: - ubuntu-toolchain-r-test packages: - g++-5 env: COMPILER=gcc GCC=5 PY=2 - os: linux addons: apt: sources: - ubuntu-toolchain-r-test - llvm-toolchain-precise-3.6 packages: - clang-3.6 env: COMPILER=clang CLANG=3.6 PY=3 - os: linux addons: apt: sources: - ubuntu-toolchain-r-test - llvm-toolchain-precise-3.7 packages: - clang-3.7 env: COMPILER=clang CLANG=3.7 PY=3 - os: linux addons: apt: sources: - ubuntu-toolchain-r-test - llvm-toolchain-precise-3.8 packages: - clang-3.8 env: COMPILER=clang CLANG=3.8 PY=3 - os: osx osx_image: xcode8 compiler: clang env: PY=3 env: global: - MINCONDA_VERSION="latest" - MINCONDA_LINUX="Linux-x86_64" - MINCONDA_OSX="MacOSX-x86_64" before_install: - | # Configure build variables if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then if [[ "$COMPILER" == "gcc" ]]; then export CXX=g++-$GCC CC=gcc-$GCC; fi if [[ "$COMPILER" == "clang" ]]; then export CXX=clang++-$CLANG CC=clang-$CLANG; fi elif [[ "$TRAVIS_OS_NAME" == "osx" ]]; then export CXX=clang++ CC=clang PYTHONHOME=$HOME/miniconda; fi install: # Define the version of miniconda to download - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then MINCONDA_OS=$MINCONDA_LINUX; elif [[ "$TRAVIS_OS_NAME" == "osx" ]]; then MINCONDA_OS=$MINCONDA_OSX; fi - if [[ "$PY" == "3" ]]; then wget "http://repo.continuum.io/miniconda/Miniconda3-$MINCONDA_VERSION-$MINCONDA_OS.sh" -O miniconda.sh; else wget "http://repo.continuum.io/miniconda/Miniconda2-$MINCONDA_VERSION-$MINCONDA_OS.sh" -O miniconda.sh; fi - bash miniconda.sh -b -p $HOME/miniconda - export PATH="$HOME/miniconda/bin:$PATH" - hash -r - conda config --set always_yes yes --set changeps1 no - conda update -q conda # Useful for debugging any issues with conda - conda info -a - conda install xtensor==0.10.9 pytest numpy pybind11==2.1.1 -c conda-forge - conda install cmake gtest -c conda-forge - cmake -D BUILD_TESTS=ON -D CMAKE_INSTALL_PREFIX=$HOME/miniconda . - make -j2 test_xtensor_python - make install script: - py.test -s - cd test - ./test_xtensor_python xtensor-python-0.12.4/CMakeLists.txt000066400000000000000000000076751314412064200173730ustar00rootroot00000000000000############################################################################ # Copyright (c) 2016, Johan Mabille and Sylvain Corlay # # # # Distributed under the terms of the BSD 3-Clause License. # # # # The full license is in the file LICENSE, distributed with this software. # ############################################################################ cmake_minimum_required(VERSION 3.1) project(xtensor-python) set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake ${CMAKE_MODULE_PATH}) set(XTENSOR_PYTHON_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include) # Versionning # =========== set(XTENSOR_PYTHON_CONFIG_FILE "${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/xtensor_python_config.hpp") file(STRINGS ${XTENSOR_PYTHON_CONFIG_FILE} xtensor_python_version_defines REGEX "#define XTENSOR_PYTHON_VERSION_(MAJOR|MINOR|PATCH)") foreach(ver ${xtensor_python_version_defines}) if(ver MATCHES "#define XTENSOR_PYTHON_VERSION_(MAJOR|MINOR|PATCH) +([^ ]+)$") set(XTENSOR_PYTHON_VERSION_${CMAKE_MATCH_1} "${CMAKE_MATCH_2}" CACHE INTERNAL "") endif() endforeach() set(${PROJECT_NAME}_VERSION ${XTENSOR_PYTHON_VERSION_MAJOR}.${XTENSOR_PYTHON_VERSION_MINOR}.${XTENSOR_PYTHON_VERSION_PATCH}) message(STATUS "xtensor-python v${${PROJECT_NAME}_VERSION}") # Build # ===== set(XTENSOR_PYTHON_HEADERS ${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/pyarray.hpp ${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/pycontainer.hpp ${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/pystrides_adaptor.hpp ${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/pytensor.hpp ${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/pyvectorize.hpp ${XTENSOR_PYTHON_INCLUDE_DIR}/xtensor-python/xtensor_python_config.hpp ) OPTION(BUILD_TESTS "xtensor test suite" OFF) OPTION(DOWNLOAD_GTEST "build gtest from downloaded sources" OFF) if(DOWNLOAD_GTEST OR GTEST_SRC_DIR) set(BUILD_TESTS ON) endif() if(BUILD_TESTS) include_directories(${XTENSOR_PYTHON_INCLUDE_DIR}) find_package(xtensor REQUIRED) include_directories(${xtensor_INCLUDE_DIRS}) find_package(NumPy REQUIRED) include_directories(${NUMPY_INCLUDE_DIRS}) find_package(pybind11 REQUIRED) include_directories(${pybind11_INCLUDE_DIRS}) if(MSVC) set(PYTHON_MODULE_EXTENSION ".pyd") else() set(PYTHON_MODULE_EXTENSION ".so") endif() add_subdirectory(test) add_subdirectory(benchmark) endif() # Installation # ============ include(GNUInstallDirs) include(CMakePackageConfigHelpers) install(FILES ${XTENSOR_PYTHON_HEADERS} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/xtensor-python) set(XTENSOR_PYTHON_CMAKECONFIG_INSTALL_DIR "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}" CACHE STRING "install path for xtensor-pythonConfig.cmake") configure_package_config_file(${PROJECT_NAME}Config.cmake.in "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake" INSTALL_DESTINATION ${XTENSOR_PYTHON_CMAKECONFIG_INSTALL_DIR}) # xtensor-python is header-only and does not depend on the architecture. # Remove CMAKE_SIZEOF_VOID_P from xtensor-pythonConfigVersion.cmake so that an xtensor-pythonConfig.cmake # generated for a 64 bit target can be used for 32 bit targets and vice versa. set(_XTENSOR_CMAKE_SIZEOF_VOID_P ${CMAKE_SIZEOF_VOID_P}) unset(CMAKE_SIZEOF_VOID_P) write_basic_package_version_file(${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake VERSION ${${PROJECT_NAME}_VERSION} COMPATIBILITY AnyNewerVersion) set(CMAKE_SIZEOF_VOID_P ${_XTENSOR_CMAKE_SIZEOF_VOID_P}) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake DESTINATION ${XTENSOR_PYTHON_CMAKECONFIG_INSTALL_DIR}) xtensor-python-0.12.4/LICENSE000066400000000000000000000027461314412064200156320ustar00rootroot00000000000000Copyright (c) 2016, Johan Mabille and Sylvain Corlay All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xtensor-python-0.12.4/README.md000066400000000000000000000143551314412064200161030ustar00rootroot00000000000000# ![xtensor-python](http://quantstack.net/assets/images/xtensor-python.svg) [![Travis](https://travis-ci.org/QuantStack/xtensor-python.svg?branch=master)](https://travis-ci.org/QuantStack/xtensor-python) [![Appveyor](https://ci.appveyor.com/api/projects/status/qx61nsg4ebxnj8s9?svg=true)](https://ci.appveyor.com/project/QuantStack/xtensor-python) [![Documentation](http://readthedocs.org/projects/xtensor-python/badge/?version=latest)](https://xtensor-python.readthedocs.io/en/latest/?badge=latest) [![Join the Gitter Chat](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/QuantStack/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) Python bindings for the [xtensor](https://github.com/QuantStack/xtensor) C++ multi-dimensional array library. - `xtensor` is a C++ library for multi-dimensional arrays enabling numpy-style broadcasting and lazy computing. - `xtensor-python` enables inplace use of numpy arrays in C++ with all the benefits from `xtensor` - C++ universal function and broadcasting - STL - compliant APIs. - A broad coverage of numpy APIs (see [the numpy to xtensor cheat sheet](http://xtensor.readthedocs.io/en/latest/numpy.html)). The Python bindings for `xtensor` are based on the [pybind11](https://github.com/pybind/pybind11/) C++ library, which enables seemless interoperability between C++ and Python. ## Installation `xtensor-python` is a header-only library. We provide a package for the conda package manager. ```bash conda install -c conda-forge xtensor-python ``` ## Usage xtensor-python offers two container types wrapping numpy arrays inplace to provide an xtensor semantics - `pytensor` - `pyarray`. Both containers enable the numpy-style APIs of xtensor (see [the numpy to xtensor cheat sheet](http://xtensor.readthedocs.io/en/latest/numpy.html)). - On the one hand, `pyarray` has a dynamic number of dimensions. Just like numpy arrays, it can be reshaped with a shape of a different length (and the new shape is reflected on the python side). - On the other hand `pytensor` has a compile time number of dimensions, specified with a template parameter. Shapes of `pytensor` instances are stack allocated, making `pytensor` a significantly faster expression than `pyarray`. ### Example 1: Use an algorithm of the C++ standard library on a numpy array inplace. **C++ code** ```cpp #include // Standard library import for std::accumulate #include "pybind11/pybind11.h" // Pybind11 import to define Python bindings #include "xtensor/xmath.hpp" // xtensor import for the C++ universal functions #include "xtensor-python/pyarray.hpp" // Numpy bindings double sum_of_sines(xt::pyarray& m) { auto sines = xt::sin(m); // sines does not actually hold values. return std::accumulate(sines.begin(), sines.end(), 0.0); } PYBIND11_PLUGIN(xtensor_python_test) { pybind11::module m("xtensor_python_test", "Test module for xtensor python bindings"); m.def("sum_of_sines", sum_of_sines, "Sum the sines of the input values"); return m.ptr(); } ``` **Python Code** ```python import numpy as np import xtensor_python_test as xt v = np.arange(15).reshape(3, 5) s = xt.sum_of_sines(v) s ``` **Outputs** ``` 1.2853996391883833 ``` ### Example 2: Create a universal function from a C++ scalar function **C++ code** ```cpp #include "pybind11/pybind11.h" #include "xtensor-python/pyvectorize.hpp" #include #include namespace py = pybind11; double scalar_func(double i, double j) { return std::sin(i) - std::cos(j); } PYBIND11_PLUGIN(xtensor_python_test) { py::module m("xtensor_python_test", "Test module for xtensor python bindings"); m.def("vectorized_func", xt::pyvectorize(scalar_func), ""); return m.ptr(); } ``` **Python Code** ```python import numpy as np import xtensor_python_test as xt x = np.arange(15).reshape(3, 5) y = [1, 2, 3, 4, 5] z = xt.vectorized_func(x, y) z ``` **Outputs** ``` [[-0.540302, 1.257618, 1.89929 , 0.794764, -1.040465], [-1.499227, 0.136731, 1.646979, 1.643002, 0.128456], [-1.084323, -0.583843, 0.45342 , 1.073811, 0.706945]] ``` ## Installation We provide a package for the conda package manager. ```bash conda install -c conda-forge xtensor-python ``` This will pull the dependencies to xtensor-python, that is `pybind11` and `xtensor`. ## Project cookiecutter A template for a project making use of `xtensor-python` is available in the form of a cookiecutter [here](https://github.com/QuantStack/xtensor-python-cookiecutter). This project is meant to help library authors get started with the xtensor python bindings. It produces a project following the best practices for the packaging and distribution of Python extensions based on `xtensor-python`, including a `setup.py` file and a conda recipe. ## Building and Running the Tests Testing `xtensor-python` requires `pytest` ``` bash py.test . ``` To pick up changes in `xtensor-python` while rebuilding, delete the `build/` directory. ## Building the HTML Documentation `xtensor-python`'s documentation is built with three tools - [doxygen](http://www.doxygen.org) - [sphinx](http://www.sphinx-doc.org) - [breathe](https://breathe.readthedocs.io) While doxygen must be installed separately, you can install breathe by typing ```bash pip install breathe ``` Breathe can also be installed with `conda` ```bash conda install -c conda-forge breathe ``` Finally, build the documentation with ```bash make html ``` from the `docs` subdirectory. ## Dependencies on `xtensor` and `pybind11` `xtensor-python` depends on the `xtensor` and `pybind11` libraries | `xtensor-python` | `xtensor` | `pybind11` | |-------------------|------------|-------------| | master | ^0.10.9 | ^2.1.0 | | 0.12.x | ^0.10.2 | ^2.1.0 | | 0.11.x | ^0.10.0 | ^2.1.0 | | 0.10.x | ^0.9.0 | ^2.1.0 | | 0.9.x | ^0.8.1 | ^2.1.0 | These dependencies are automatically resolved when using the conda package manager. ## License We use a shared copyright model that enables all contributors to maintain the copyright on their contributions. This software is licensed under the BSD-3-Clause license. See the [LICENSE](LICENSE) file for details. xtensor-python-0.12.4/benchmark/000077500000000000000000000000001314412064200165465ustar00rootroot00000000000000xtensor-python-0.12.4/benchmark/CMakeLists.txt000066400000000000000000000070441314412064200213130ustar00rootroot00000000000000############################################################################ # Copyright (c) 2016, Johan Mabille and Sylvain Corlay # # # # Distributed under the terms of the BSD 3-Clause License. # # # # The full license is in the file LICENSE, distributed with this software. # ############################################################################ message(STATUS "Forcing tests build type to Release") set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build." FORCE) include(CheckCXXCompilerFlag) string(TOUPPER "${CMAKE_BUILD_TYPE}" U_CMAKE_BUILD_TYPE) if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" OR CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Intel") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native -Wunused-parameter -Wextra -Wreorder -Wconversion") CHECK_CXX_COMPILER_FLAG("-std=c++14" HAS_CPP14_FLAG) if (HAS_CPP14_FLAG) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14") else() message(FATAL_ERROR "Unsupported compiler -- xtensor requires C++14 support!") endif() # Enable link time optimization and set the default symbol # visibility to hidden (very important to obtain small binaries) if (NOT ${U_CMAKE_BUILD_TYPE} MATCHES DEBUG) # Default symbol visibility set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=hidden") # Check for Link Time Optimization support # (GCC/Clang) CHECK_CXX_COMPILER_FLAG("-flto" HAS_LTO_FLAG) if (HAS_LTO_FLAG) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -flto") endif() # Intel equivalent to LTO is called IPO if (CMAKE_CXX_COMPILER_ID MATCHES "Intel") CHECK_CXX_COMPILER_FLAG("-ipo" HAS_IPO_FLAG) if (HAS_IPO_FLAG) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -ipo") endif() endif() endif() endif() if(MSVC) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc /MP /bigobj") set(CMAKE_EXE_LINKER_FLAGS /MANIFEST:NO) foreach(flag_var CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) string(REPLACE "/MD" "-MT" ${flag_var} "${${flag_var}}") endforeach() endif() set(XTENSOR_PYTHON_BENCHMARK main.cpp ) set(XTENSOR_PYTHON_BENCHMARK_TARGET benchmark_xtensor_python) add_library(${XTENSOR_PYTHON_BENCHMARK_TARGET} MODULE EXCLUDE_FROM_ALL ${XTENSOR_PYTHON_BENCHMARK} ${XTENSOR_PYTHON_HEADERS}) set_target_properties(${XTENSOR_PYTHON_BENCHMARK_TARGET} PROPERTIES PREFIX "") set_target_properties(${XTENSOR_PYTHON_BENCHMARK_TARGET} PROPERTIES SUFFIX "${PYTHON_MODULE_EXTENSION}") if (APPLE) target_link_libraries(${XTENSOR_PYTHON_BENCHMARK_TARGET} PRIVATE "-undefined dynamic_lookup") elseif (MSVC) target_link_libraries(${XTENSOR_PYTHON_BENCHMARK_TARGET} ${PYTHON_LIBRARIES}) else () target_link_libraries(${XTENSOR_PYTHON_BENCHMARK_TARGET} "-shared") endif() configure_file(benchmark_pyarray.py benchmark_pyarray.py COPYONLY) configure_file(benchmark_pytensor.py benchmark_pytensor.py COPYONLY) configure_file(benchmark_pybind_array.py benchmark_pybind_array.py COPYONLY) configure_file(benchmark_pyvectorize.py benchmark_pyvectorize.py COPYONLY) configure_file(benchmark_pybind_vectorize.py benchmark_pybind_vectorize.py COPYONLY) add_custom_target(xbenchmark DEPENDS ${XTENSOR_PYTHON_BENCHMARK_TARGET}) xtensor-python-0.12.4/benchmark/benchmark_pyarray.py000066400000000000000000000003271314412064200226230ustar00rootroot00000000000000from benchmark_xtensor_python import sum_array import numpy as np u = np.ones(1000000, dtype=float) from timeit import timeit print (timeit ('sum_array(u)', setup='from __main__ import u, sum_array', number=1000)) xtensor-python-0.12.4/benchmark/benchmark_pybind_array.py000066400000000000000000000003541314412064200236170ustar00rootroot00000000000000from benchmark_xtensor_python import pybind_sum_array import numpy as np u = np.ones(1000000, dtype=float) from timeit import timeit print (timeit ('pybind_sum_array(u)', setup='from __main__ import u, pybind_sum_array', number=1000)) xtensor-python-0.12.4/benchmark/benchmark_pybind_vectorize.py000066400000000000000000000003671314412064200245170ustar00rootroot00000000000000from benchmark_xtensor_python import pybind_rect_to_polar import numpy as np from timeit import timeit w = np.ones(100000, dtype=complex) print (timeit('pybind_rect_to_polar(w[::2])', 'from __main__ import w, pybind_rect_to_polar', number=1000)) xtensor-python-0.12.4/benchmark/benchmark_pytensor.py000066400000000000000000000003601314412064200230140ustar00rootroot00000000000000from benchmark_xtensor_python import sum_tensor import numpy as np u = np.ones(1000000, dtype=float) #print(sum_tensor(u)) from timeit import timeit print (timeit ('sum_tensor(u)', setup='from __main__ import u, sum_tensor', number=1000)) xtensor-python-0.12.4/benchmark/benchmark_pyvectorize.py000066400000000000000000000003421314412064200235140ustar00rootroot00000000000000from benchmark_xtensor_python import rect_to_polar import numpy as np from timeit import timeit w = np.ones(100000, dtype=complex) print (timeit('rect_to_polar(w[::2])', 'from __main__ import w, rect_to_polar', number=1000)) xtensor-python-0.12.4/benchmark/main.cpp000066400000000000000000000032421314412064200201770ustar00rootroot00000000000000#include "pybind11/pybind11.h" #include "pybind11/numpy.h" #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #include "numpy/arrayobject.h" #include "xtensor/xtensor.hpp" #include "xtensor/xarray.hpp" #include "xtensor-python/pyarray.hpp" #include "xtensor-python/pytensor.hpp" #include "xtensor-python/pyvectorize.hpp" using complex_t = std::complex; namespace py = pybind11; PYBIND11_PLUGIN(benchmark_xtensor_python) { if(_import_array() < 0) { PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return nullptr; } py::module m("benchmark_xtensor_python", "Benchmark module for xtensor python bindings"); m.def("sum_array", [](xt::pyarray const& x) { double sum = 0; for(auto e : x) sum += e; return sum; }); m.def("sum_tensor", [](xt::pytensor const& x) { double sum = 0; for(auto e : x) sum += e; return sum; }); m.def("pybind_sum_array", [](py::array_t const& x) { double sum = 0; size_t size = x.size(); const double* data = x.data(0); for(size_t i = 0; i < size; ++i) sum += data[i]; return sum; }); m.def("rect_to_polar", [](xt::pyarray const& a) { return py::vectorize([](complex_t x) { return std::abs(x); })(a); }); m.def("pybind_rect_to_polar", [](py::array a) { if (py::isinstance>(a)) return py::vectorize([](complex_t x) { return std::abs(x); })(a); else throw py::type_error("rect_to_polar unhandled type"); }); return m.ptr(); } xtensor-python-0.12.4/benchmark/setup.py000066400000000000000000000064731314412064200202720ustar00rootroot00000000000000from setuptools import setup, Extension from setuptools.command.build_ext import build_ext import sys import os import setuptools __version__ = '0.0.1' class get_pybind_include(object): """Helper class to determine the pybind11 include path The purpose of this class is to postpone importing pybind11 until it is actually installed, so that the ``get_include()`` method can be invoked. """ def __init__(self, user=False): self.user = user def __str__(self): import pybind11 return pybind11.get_include(self.user) class get_numpy_include(object): """Helper class to determine the numpy include path The purpose of this class is to postpone importing numpy until it is actually installed, so that the ``get_include()`` method can be invoked. """ def __str__(self): import numpy return numpy.get_include() ext_modules = [ Extension( 'benchmark_xtensor_python', ['main.cpp'], include_dirs=[ # Path to pybind11 headers get_pybind_include(), get_pybind_include(user=True), # Path to numpy headers get_numpy_include(), os.path.join(sys.prefix, 'include'), os.path.join(sys.prefix, 'Library', 'include') ], language='c++' ), ] def has_flag(compiler, flagname): """Return a boolean indicating whether a flag name is supported on the specified compiler. """ import tempfile with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f: f.write('int main (int argc, char **argv) { return 0; }') try: compiler.compile([f.name], extra_postargs=[flagname]) except setuptools.distutils.errors.CompileError: return False return True def cpp_flag(compiler): """Return the -std=c++14 compiler flag and errors when the flag is no available. """ if has_flag(compiler, '-std=c++14'): return '-std=c++14' else: raise RuntimeError('C++14 support is required by xtensor!') class BuildExt(build_ext): """A custom build extension for adding compiler-specific options.""" c_opts = { 'msvc': ['/EHsc'], 'unix': [], } if sys.platform == 'darwin': c_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.7'] def build_extensions(self): ct = self.compiler.compiler_type opts = self.c_opts.get(ct, []) if ct == 'unix': opts.append('-DVERSION_INFO="%s"' % self.distribution.get_version()) opts.append(cpp_flag(self.compiler)) if has_flag(self.compiler, '-fvisibility=hidden'): opts.append('-fvisibility=hidden') elif ct == 'msvc': opts.append('/DVERSION_INFO=\\"%s\\"' % self.distribution.get_version()) for ext in self.extensions: ext.extra_compile_args = opts build_ext.build_extensions(self) setup( name='benchmark_xtensor_python', version=__version__, author='Sylvain Corlay', author_email='sylvain.corlay@gmail.com', url='https://github.com/pybind/python_example', description='An example project using xtensor-python', long_description='', ext_modules=ext_modules, install_requires=['pybind11==2.0.1'], cmdclass={'build_ext': BuildExt}, zip_safe=False, ) xtensor-python-0.12.4/cmake/000077500000000000000000000000001314412064200156745ustar00rootroot00000000000000xtensor-python-0.12.4/cmake/FindNumPy.cmake000066400000000000000000000070741314412064200205570ustar00rootroot00000000000000# - Find the NumPy libraries # This module finds if NumPy is installed, and sets the following variables # indicating where it is. # # TODO: Update to provide the libraries and paths for linking npymath lib. # # NUMPY_FOUND - was NumPy found # NUMPY_VERSION - the version of NumPy found as a string # NUMPY_VERSION_MAJOR - the major version number of NumPy # NUMPY_VERSION_MINOR - the minor version number of NumPy # NUMPY_VERSION_PATCH - the patch version number of NumPy # NUMPY_VERSION_DECIMAL - e.g. version 1.6.1 is 10601 # NUMPY_INCLUDE_DIRS - path to the NumPy include files #============================================================================ # Copyright 2012 Continuum Analytics, Inc. # # MIT License # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # #============================================================================ # Finding NumPy involves calling the Python interpreter if(NumPy_FIND_REQUIRED) find_package(PythonInterp REQUIRED) else() find_package(PythonInterp) endif() if(NOT PYTHONINTERP_FOUND) set(NUMPY_FOUND FALSE) endif() execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c" "import numpy as n; print(n.__version__); print(n.get_include());" RESULT_VARIABLE _NUMPY_SEARCH_SUCCESS OUTPUT_VARIABLE _NUMPY_VALUES ERROR_VARIABLE _NUMPY_ERROR_VALUE OUTPUT_STRIP_TRAILING_WHITESPACE) if(NOT _NUMPY_SEARCH_SUCCESS MATCHES 0) if(NumPy_FIND_REQUIRED) message(FATAL_ERROR "NumPy import failure:\n${_NUMPY_ERROR_VALUE}") endif() set(NUMPY_FOUND FALSE) endif() # Convert the process output into a list string(REGEX REPLACE ";" "\\\\;" _NUMPY_VALUES ${_NUMPY_VALUES}) string(REGEX REPLACE "\n" ";" _NUMPY_VALUES ${_NUMPY_VALUES}) list(GET _NUMPY_VALUES 0 NUMPY_VERSION) list(GET _NUMPY_VALUES 1 NUMPY_INCLUDE_DIRS) # Make sure all directory separators are '/' string(REGEX REPLACE "\\\\" "/" NUMPY_INCLUDE_DIRS ${NUMPY_INCLUDE_DIRS}) # Get the major and minor version numbers string(REGEX REPLACE "\\." ";" _NUMPY_VERSION_LIST ${NUMPY_VERSION}) list(GET _NUMPY_VERSION_LIST 0 NUMPY_VERSION_MAJOR) list(GET _NUMPY_VERSION_LIST 1 NUMPY_VERSION_MINOR) list(GET _NUMPY_VERSION_LIST 2 NUMPY_VERSION_PATCH) string(REGEX MATCH "[0-9]*" NUMPY_VERSION_PATCH ${NUMPY_VERSION_PATCH}) math(EXPR NUMPY_VERSION_DECIMAL "(${NUMPY_VERSION_MAJOR} * 10000) + (${NUMPY_VERSION_MINOR} * 100) + ${NUMPY_VERSION_PATCH}") find_package_message(NUMPY "Found NumPy: version \"${NUMPY_VERSION}\" ${NUMPY_INCLUDE_DIRS}" "${NUMPY_INCLUDE_DIRS}${NUMPY_VERSION}") set(NUMPY_FOUND TRUE) xtensor-python-0.12.4/docs/000077500000000000000000000000001314412064200155445ustar00rootroot00000000000000xtensor-python-0.12.4/docs/Doxyfile000066400000000000000000000004571314412064200172600ustar00rootroot00000000000000PROJECT_NAME = "xtensor-python" XML_OUTPUT = xml INPUT = ../include GENERATE_LATEX = NO GENERATE_MAN = NO GENERATE_RTF = NO CASE_SENSE_NAMES = NO GENERATE_HTML = NO GENERATE_XML = YES RECURSIVE = YES QUIET = YES JAVADOC_AUTOBRIEF = YES xtensor-python-0.12.4/docs/Makefile000066400000000000000000000147421314412064200172140ustar00rootroot00000000000000# You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext api default: html help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " applehelp to make an Apple Help Book" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* rm -rf xml html: doxygen $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: doxygen $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: doxygen $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: doxygen $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: doxygen $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: doxygen $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." epub: doxygen $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: doxygen $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: doxygen $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." latexpdfja: doxygen $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: doxygen $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: doxygen $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: doxygen $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: doxygen $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: doxygen $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: doxygen $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: doxygen $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: doxygen $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." coverage: doxygen $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage @echo "Testing of coverage in the sources finished, look at the " \ "results in $(BUILDDIR)/coverage/python.txt." xml: doxygen $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: doxygen $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." xtensor-python-0.12.4/docs/environment.yml000066400000000000000000000001201314412064200206240ustar00rootroot00000000000000name: xtensor-python-docs channels: - conda-forge dependencies: - breathe xtensor-python-0.12.4/docs/make.bat000066400000000000000000000161651314412064200171620ustar00rootroot00000000000000@ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source set I18NSPHINXOPTS=%SPHINXOPTS% source if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. texinfo to make Texinfo files echo. gettext to make PO message catalogs echo. changes to make an overview over all changed/added/deprecated items echo. xml to make Docutils-native XML files echo. pseudoxml to make pseudoxml-XML files for display purposes echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled echo. coverage to run coverage check of the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) REM Check if sphinx-build is available and fallback to Python version if any %SPHINXBUILD% 1>NUL 2>NUL if errorlevel 9009 goto sphinx_python goto sphinx_ok :sphinx_python set SPHINXBUILD=python -m sphinx.__init__ %SPHINXBUILD% 2> nul if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) :sphinx_ok if "%1" == "html" ( doxygen %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\packagename.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\packagename.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp if errorlevel 1 exit /b 1 echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdf" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdfja" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf-ja cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man if errorlevel 1 exit /b 1 echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "texinfo" ( %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo if errorlevel 1 exit /b 1 echo. echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. goto end ) if "%1" == "gettext" ( %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale if errorlevel 1 exit /b 1 echo. echo.Build finished. The message catalogs are in %BUILDDIR%/locale. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) if "%1" == "coverage" ( %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage if errorlevel 1 exit /b 1 echo. echo.Testing of coverage in the sources finished, look at the ^ results in %BUILDDIR%/coverage/python.txt. goto end ) if "%1" == "xml" ( %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml if errorlevel 1 exit /b 1 echo. echo.Build finished. The XML files are in %BUILDDIR%/xml. goto end ) if "%1" == "pseudoxml" ( %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml if errorlevel 1 exit /b 1 echo. echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. goto end ) :end xtensor-python-0.12.4/docs/source/000077500000000000000000000000001314412064200170445ustar00rootroot00000000000000xtensor-python-0.12.4/docs/source/api_reference.rst000066400000000000000000000006171314412064200223710ustar00rootroot00000000000000.. Copyright (c) 2016, Johan Mabille and Sylvain Corlay Distributed under the terms of the BSD 3-Clause License. The full license is in the file LICENSE, distributed with this software. API reference ============= Containers ---------- .. toctree:: :maxdepth: 2 pyarray pytensor Numpy universal functions ------------------------- .. toctree:: :maxdepth: 2 pyvectorize xtensor-python-0.12.4/docs/source/array_tensor.rst000066400000000000000000000023031314412064200223040ustar00rootroot00000000000000.. Copyright (c) 2016, Johan Mabille and Sylvain Corlay Distributed under the terms of the BSD 3-Clause License. The full license is in the file LICENSE, distributed with this software. Arrays and tensors ================== ``xtensor-python`` provides two container types wrapping numpy arrays: ``pyarray`` and ``pytensor``. They are the counterparts to ``xarray`` and ``xtensor`` containers. pyarray ------- Like ``xarray``, ``pyarray`` has a dynamic shape. This means that you can reshape the numpy array on the C++ side and see this change reflected on the python side. ``pyarray`` doesn't make a copy of the shape or the strides, but reads them each time it is needed. Therefore, if a reference on a ``pyarray`` is kept in the C++ code and the corresponding numpy array is then reshaped in the python code, this modification will reflect in the ``pyarray``. pytensor -------- Like ``xtensor``, ``pytensor`` has a static stack-allocated shape. This means that the shape of the numpy array is copied into the shape of the ``pytensor`` upon creation. As a consequence, reshapes are not reflected across languages. However, this drawback is offset by a more effective computation of shape and broadcast. xtensor-python-0.12.4/docs/source/basic_usage.rst000066400000000000000000000051071314412064200220460ustar00rootroot00000000000000.. Copyright (c) 2016, Johan Mabille and Sylvain Corlay Distributed under the terms of the BSD 3-Clause License. The full license is in the file LICENSE, distributed with this software. Basic Usage =========== Example 1: Use an algorithm of the C++ library on a numpy array inplace ----------------------------------------------------------------------- **C++ code** .. code:: #include // Standard library import for std::accumulate #include "pybind11/pybind11.h" // Pybind11 import to define Python bindings #include "xtensor/xmath.hpp" // xtensor import for the C++ universal functions #define FORCE_IMPORT_ARRAY // numpy C api loading #include "xtensor-python/pyarray.hpp" // Numpy bindings double sum_of_sines(xt::pyarray& m) { auto sines = xt::sin(m); // sines does not actually hold values. return std::accumulate(sines.cbegin(), sines.cend(), 0.0); } PYBIND11_PLUGIN(xtensor_python_test) { xt::import_numpy(); pybind11::module m("xtensor_python_test", "Test module for xtensor python bindings"); m.def("sum_of_sines", sum_of_sines, "Sum the sines of the input values"); return m.ptr(); } **Python code:** .. code:: import numpy as np import xtensor_python_test as xt a = np.arange(15).reshape(3, 5) s = xt.sum_of_sines(v) s **Outputs** .. code:: 1.2853996391883833 Example 2: Create a numpy-style universal function from a C++ scalar function ----------------------------------------------------------------------------- **C++ code** .. code:: #include "pybind11/pybind11.h" #define FORCE_IMPORT_ARRAY #include "xtensor-python/pyvectorize.hpp" #include #include namespace py = pybind11; double scalar_func(double i, double j) { return std::sin(i) - std::cos(j); } PYBIND11_PLUGIN(xtensor_python_test) { xt::import_numpy(); py::module m("xtensor_python_test", "Test module for xtensor python bindings"); m.def("vectorized_func", xt::pyvectorize(scalar_func), ""); return m.ptr(); } **Python code:** .. code:: import numpy as np import xtensor_python_test as xt x = np.arange(15).reshape(3, 5) y = [1, 2, 3, 4, 5] z = xt.vectorized_func(x, y) z **Outputs** .. code:: [[-0.540302, 1.257618, 1.89929 , 0.794764, -1.040465], [-1.499227, 0.136731, 1.646979, 1.643002, 0.128456], [-1.084323, -0.583843, 0.45342 , 1.073811, 0.706945]] xtensor-python-0.12.4/docs/source/cmake.svg000066400000000000000000000425311314412064200206520ustar00rootroot00000000000000 image/svg+xml xtensor-python-0.12.4/docs/source/conda.svg000066400000000000000000000034151314412064200206540ustar00rootroot00000000000000xtensor-python-0.12.4/docs/source/conf.py000066400000000000000000000013771314412064200203530ustar00rootroot00000000000000#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os import subprocess on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if on_rtd: subprocess.call('cd ..; doxygen', shell=True) import sphinx_rtd_theme html_theme = "sphinx_rtd_theme" html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] extensions = ['breathe'] breathe_projects = { 'xtensor-python': '../xml' } templates_path = ['_templates'] source_suffix = '.rst' master_doc = 'index' project = 'xtensor-python' copyright = '2016, Johan Mabille and Sylvain Corlay' author = 'Johan Mabille and Sylvain Corlay' html_logo = 'quantstack-white.svg' exclude_patterns = [] highlight_language = 'c++' pygments_style = 'sphinx' todo_include_todos = False htmlhelp_basename = 'xtensorpythondoc' xtensor-python-0.12.4/docs/source/cookiecutter.rst000066400000000000000000000040231314412064200222750ustar00rootroot00000000000000.. Copyright (c) 2016, Johan Mabille and Sylvain Corlay Distributed under the terms of the BSD 3-Clause License. The full license is in the file LICENSE, distributed with this software. Getting started with xtensor-python-cookiecutter ================================================ `xtensor-python-cookiecutter`_ helps extension authors create Python extension modules making use of xtensor. It takes care of the initial work of generating a project skeleton with - A complete ``setup.py`` compiling the extension module - A few examples included in the resulting project including - A universal function defined from C++ - A function making use of an algorithm from the STL on a numpy array - Unit tests - The generation of the HTML documentation with sphinx Usage ----- Install cookiecutter_ .. code:: pip install cookiecutter After installing cookiecutter, use the xtensor_cookiecutter_: .. code:: cookiecutter https://github.com/QuantStack/xtensor-python-cookiecutter.git As xtensor-python-cookiecutter runs, you will be asked for basic information about your custom extension project. You will be prompted for the following information: - ``author_name``: your name or the name of your organization, - ``author_email`` : your project's contact email, - ``github_project_name``: name of the GitHub repository for your project, - ``github_organization_name``: name of the GithHub organization for your project, - ``python_package_name``: name of the Python package created by your extension, - ``cpp_namespace``: name for the cpp namespace holding the implementation of your extension, - ``project_short_description``: a short description for your project. This will produce a directory containing all the required content for a minimal extension project making use of xtensor with all the required boilerplate for package management, together with a few basic examples. .. _xtensor_cookicutter: https://github.com/QuantStack/xtensor-python-cookiecutter .. _cookiecutter: https://github.com/audreyr/cookiecutter xtensor-python-0.12.4/docs/source/debian.svg000066400000000000000000000153171314412064200210160ustar00rootroot00000000000000 ]> xtensor-python-0.12.4/docs/source/index.rst000066400000000000000000000055271314412064200207160ustar00rootroot00000000000000.. Copyright (c) 2016, Johan Mabille and Sylvain Corlay Distributed under the terms of the BSD 3-Clause License. The full license is in the file LICENSE, distributed with this software. .. image:: xtensor-python.svg Python bindings for the xtensor_ C++ multi-dimensional array library. Introduction ------------ What are ``xtensor`` and ``xtensor-python``? - ``xtensor`` is a C++ library for multi-dimensional arrays enabling numpy-style broadcasting and lazy computing. - ``xtensor-python`` enables inplace use of numpy arrays with all the benefits from ``xtensor`` - C++ universal functions and broadcasting - STL - compliant APIs. The `numpy to xtensor cheat sheet`_ from the ``xtensor`` documentation shows how numpy APIs translate to C++ with ``xtensor``. The Python bindings for ``xtensor`` are based on the pybind11_ C++ library, which enables seemless interoperability between C++ and Python. Enabling numpy arrays in your C++ libraries ------------------------------------------- Instead of exposing new types to python, ``xtensor-python`` enables the use of NumPy_ data structures from C++ using Python's `Buffer Protocol`_. In addition to the basic accessors and iterators of ``xtensor`` containers, it also enables using numpy arrays with ``xtensor``'s expression system. Besides ``xtensor-python`` provides an API to create *Universal functions* from simple scalar functions from your C++ code. Finally, a cookiecutter template project is provided. It takes care of the initial work of generating a project skeleton for a C++ extension based on ``xtensor-python`` containing a few examples, unit tests and HTML documentation. Find out more about the xtensor-python-cookiecutter_. ``xtensor`` and ``xtensor-python`` require a modern C++ compiler supporting C++14. The following C++ compilers are supported: - On Windows platforms, Visual C++ 2015 Update 2, or more recent - On Unix platforms, gcc 4.9 or a recent version of Clang Licensing --------- We use a shared copyright model that enables all contributors to maintain the copyright on their contributions. This software is licensed under the BSD-3-Clause license. See the LICENSE file for details. .. toctree:: :caption: INSTALLATION :maxdepth: 2 installation .. toctree:: :caption: USAGE :maxdepth: 2 basic_usage array_tensor numpy_capi cookiecutter .. toctree:: :caption: API REFERENCE :maxdepth: 2 api_reference .. toctree:: :caption: DEVELOPER ZONE releasing .. _NumPy: http://www.numpy.org .. _`Buffer Protocol`: https://docs.python.org/3/c-api/buffer.html .. _`numpy to xtensor cheat sheet`: http://xtensor.readthedocs.io/en/latest/numpy.html .. _xtensor: https://github.com/QuantStack/xtensor .. _pybind11: https://github.com/pybind/pybind11 .. _xtensor-python-cookiecutter: https://github.com/QuantStack/xtensor-python-cookiecutter xtensor-python-0.12.4/docs/source/installation.rst000066400000000000000000000033431314412064200223020ustar00rootroot00000000000000.. Copyright (c) 2016, Johan Mabille and Sylvain Corlay Distributed under the terms of the BSD 3-Clause License. The full license is in the file LICENSE, distributed with this software. .. raw:: html Installation ============ Although ``xtensor-python`` is a header-only library, we provide standardized means to install it, with package managers or with cmake. Besides the xtendor-python headers, all these methods place the `cmake` project configuration file in the right location so that third-party projects can use cmake's find_package to locate xtensor-python headers. .. image:: conda.svg Using the conda package ----------------------- A package for xtensor-python is available on the conda package manager. .. code:: conda install -c conda-forge xtensor-python .. image:: debian.svg Using the Debian package ------------------------ A package for xtensor-python is available on Debian. .. code:: sudo apt-get install xtensor-python-dev .. image:: cmake.svg From source with cmake ---------------------- You can also install ``xtensor-python`` from source with cmake. On Unix platforms, from the source directory: .. code:: mkdir build cd build cmake -DCMAKE_INSTALL_PREFIX=/path/to/prefix .. make install On Windows platforms, from the source directory: .. code:: mkdir build cd build cmake -G "NMake Makefiles" -DCMAKE_INSTALL_PREFIX=/path/to/prefix .. nmake nmake install See the section of the documentation on :doc:`build-options`, for more details on how to cmake options. xtensor-python-0.12.4/docs/source/numpy_capi.rst000066400000000000000000000040451314412064200217450ustar00rootroot00000000000000.. Copyright (c) 2016, Johan Mabille and Sylvain Corlay Distributed under the terms of the BSD 3-Clause License. The full license is in the file LICENSE, distributed with this software. Importing numpy C API ===================== Importing the C API module of numpy requires more code than just including a header. ``xtensor-python`` simplifies a lot this import, however some actions are still required in the user code. Extension module with a single file ----------------------------------- When writing an extension module that is self-contained in a single file, its author should pay attention to the following points: - ``FORCE_IMPORT_ARRAY`` must be defined before including any header of ``xtensor-python``. - ``xt::import_numpy()`` must be called in the function initializing the module. Thus the basic skeleton of the module looks like: .. code:: #include "pybind11/pybind11.h" #define FORCE_IMPORT_ARRAY #include "xgtensor-python/pyarray.hpp" PYBIND11_PLUGIN(plugin_name) { xt::import_numpy(); pybind11::module m(//... //... return m.ptr(); } Extension module with multiple files ------------------------------------ If the extension module contain many source files that include ``xtensor-python`` header files, the previous points are still required. However, the source files that don't contain the initializing code of the module can directly include ``xtensor-python`` header files. Using other extension modules ----------------------------- Including an header of ``xtensor-python`` actually defines ``PY_ARRAY_UNIQUE_SYMBOL`` to ``xtensor_python_ARRAY_API``. This might be problematic if you import another library that defines its own ``PY_ARRAY_UNIQUE_SYMBOL``, or if you define yours. If so, you can override the behavior of ``xtensor-python`` by explicitly defining ``PY_ARRAY_UNIQUE_SYMBOL`` prior to including any ``stenxor-python`` header: .. code:: // in every source file #define PY_ARRAY_UNIQUE_SYMBOL my_uniqe_array_api #include "xtensor-python/pyarray.hpp" xtensor-python-0.12.4/docs/source/pyarray.rst000066400000000000000000000004331314412064200212650ustar00rootroot00000000000000.. Copyright (c) 2016, Johan Mabille and Sylvain Corlay Distributed under the terms of the BSD 3-Clause License. The full license is in the file LICENSE, distributed with this software. pyarray ======= .. doxygenclass:: xt::pyarray :project: xtensor-python :members: xtensor-python-0.12.4/docs/source/pytensor.rst000066400000000000000000000004361314412064200214640ustar00rootroot00000000000000.. Copyright (c) 2016, Johan Mabille and Sylvain Corlay Distributed under the terms of the BSD 3-Clause License. The full license is in the file LICENSE, distributed with this software. pytensor ======== .. doxygenclass:: xt::pytensor :project: xtensor-python :members: xtensor-python-0.12.4/docs/source/pyvectorize.rst000066400000000000000000000004351314412064200221630ustar00rootroot00000000000000.. Copyright (c) 2016, Johan Mabille and Sylvain Corlay Distributed under the terms of the BSD 3-Clause License. The full license is in the file LICENSE, distributed with this software. pyvectorize =========== .. doxygenfunction:: xt::pyvectorize :project: xtensor-python xtensor-python-0.12.4/docs/source/quantstack-white.svg000066400000000000000000000116361314412064200230700ustar00rootroot00000000000000 image/svg+xmlxtensor-python-0.12.4/docs/source/releasing.rst000066400000000000000000000026071314412064200215540ustar00rootroot00000000000000.. Copyright (c) 2016, Johan Mabille and Sylvain Corlay Distributed under the terms of the BSD 3-Clause License. The full license is in the file LICENSE, distributed with this software. Releasing xtensor-python ======================== Releasing a new version ----------------------- From the master branch of xtensor-python - Make sure that you are in sync with the master branch of the upstream remote. - In file ``xtensor_python_config.hpp``, set the macros for ``XTENSOR_PYTHON_VERSION_MAJOR``, ``XTENSOR_PYTHON_VERSION_MINOR`` and ``XTENSOR_PYTHON_VERSION_PATCH`` to the desired values. - Update the readme file w.r.t. dependencies on xtensor and pybind11. - Stage the changes (``git add``), commit the changes (``git commit``) and add a tag of the form ``Major.minor.patch``. It is important to not add any other content to the tag name. - Push the new commit and tag to the main repository. (``git push``, and ``git push --tags``) Updating the conda-forge recipe ------------------------------- xtensor-python has been packaged for the conda package manager. Once the new tag has been pushed on GitHub, edit the conda-forge recipe for xtensor in the following fashion: - Update the version number to the new Major.minor.patch. - Set the build number to 0. - Update the hash of the source tarball. - Check for the versions of the dependencies. - Optionally, rerender the conda-forge feedstock. xtensor-python-0.12.4/docs/source/xtensor-python.svg000066400000000000000000000375551314412064200226250ustar00rootroot00000000000000 image/svg+xmlxtensor-python-0.12.4/include/000077500000000000000000000000001314412064200162375ustar00rootroot00000000000000xtensor-python-0.12.4/include/xtensor-python/000077500000000000000000000000001314412064200212605ustar00rootroot00000000000000xtensor-python-0.12.4/include/xtensor-python/pyarray.hpp000066400000000000000000000410031314412064200234560ustar00rootroot00000000000000/*************************************************************************** * Copyright (c) 2016, Johan Mabille and Sylvain Corlay * * * * Distributed under the terms of the BSD 3-Clause License. * * * * The full license is in the file LICENSE, distributed with this software. * ****************************************************************************/ #ifndef PY_ARRAY_HPP #define PY_ARRAY_HPP #include #include #include #include "xtensor/xbuffer_adaptor.hpp" #include "xtensor/xiterator.hpp" #include "xtensor/xsemantic.hpp" #include "pycontainer.hpp" #include "pystrides_adaptor.hpp" namespace xt { template class pyarray; } namespace pybind11 { namespace detail { template struct handle_type_name> { static PYBIND11_DESCR name() { return _("numpy.ndarray[") + make_caster::name() + _("]"); } }; template struct pyobject_caster> { using type = xt::pyarray; bool load(handle src, bool convert) { if (!convert) { if (!PyArray_Check(src.ptr())) { return false; } int type_num = xt::detail::numpy_traits::type_num; if (PyArray_TYPE(reinterpret_cast(src.ptr())) != type_num) { return false; } } value = type::ensure(src); return static_cast(value); } static handle cast(const handle& src, return_value_policy, handle) { return src.inc_ref(); } PYBIND11_TYPE_CASTER(type, handle_type_name::name()); }; } } namespace xt { template class pyarray_backstrides { public: using array_type = A; using value_type = typename array_type::size_type; using size_type = typename array_type::size_type; pyarray_backstrides() = default; pyarray_backstrides(const array_type& a); value_type operator[](size_type i) const; size_type size() const; private: const array_type* p_a; }; template struct xiterable_inner_types> : xcontainer_iterable_types> { }; template struct xcontainer_inner_types> { using container_type = xbuffer_adaptor; using shape_type = std::vector; using strides_type = shape_type; using backstrides_type = pyarray_backstrides>; using inner_shape_type = xbuffer_adaptor; using inner_strides_type = pystrides_adaptor; using inner_backstrides_type = backstrides_type; using temporary_type = pyarray; }; /** * @class pyarray * @brief Multidimensional container providing the xtensor container semantics to a numpy array. * * pyarray is similar to the xarray container in that it has a dynamic dimensionality. Reshapes of * a pyarray container are reflected in the underlying numpy array. * * @tparam T The type of the element stored in the pyarray. * @sa pytensor */ template class pyarray : public pycontainer>, public xcontainer_semantic> { public: using self_type = pyarray; using semantic_base = xcontainer_semantic; using base_type = pycontainer; using container_type = typename base_type::container_type; using value_type = typename base_type::value_type; using reference = typename base_type::reference; using const_reference = typename base_type::const_reference; using pointer = typename base_type::pointer; using size_type = typename base_type::size_type; using shape_type = typename base_type::shape_type; using strides_type = typename base_type::strides_type; using backstrides_type = typename base_type::backstrides_type; using inner_shape_type = typename base_type::inner_shape_type; using inner_strides_type = typename base_type::inner_strides_type; using inner_backstrides_type = typename base_type::inner_backstrides_type; pyarray(); pyarray(const value_type& t); pyarray(nested_initializer_list_t t); pyarray(nested_initializer_list_t t); pyarray(nested_initializer_list_t t); pyarray(nested_initializer_list_t t); pyarray(nested_initializer_list_t t); pyarray(pybind11::handle h, pybind11::object::borrowed_t); pyarray(pybind11::handle h, pybind11::object::stolen_t); pyarray(const pybind11::object& o); explicit pyarray(const shape_type& shape, layout_type l = layout_type::row_major); explicit pyarray(const shape_type& shape, const_reference value, layout_type l = layout_type::row_major); explicit pyarray(const shape_type& shape, const strides_type& strides, const_reference value); explicit pyarray(const shape_type& shape, const strides_type& strides); pyarray(const self_type& rhs); self_type& operator=(const self_type& rhs); pyarray(self_type&&) = default; self_type& operator=(self_type&& e) = default; template pyarray(const xexpression& e); template self_type& operator=(const xexpression& e); using base_type::begin; using base_type::end; static self_type ensure(pybind11::handle h); static bool check_(pybind11::handle h); private: inner_shape_type m_shape; inner_strides_type m_strides; mutable inner_backstrides_type m_backstrides; container_type m_data; void init_array(const shape_type& shape, const strides_type& strides); void init_from_python(); const inner_shape_type& shape_impl() const noexcept; const inner_strides_type& strides_impl() const noexcept; const inner_backstrides_type& backstrides_impl() const noexcept; container_type& data_impl() noexcept; const container_type& data_impl() const noexcept; friend class xcontainer>; }; /************************************** * pyarray_backstrides implementation * **************************************/ template inline pyarray_backstrides::pyarray_backstrides(const array_type& a) : p_a(&a) { } template inline auto pyarray_backstrides::size() const -> size_type { return p_a->dimension(); } template inline auto pyarray_backstrides::operator[](size_type i) const -> value_type { value_type sh = p_a->shape()[i]; value_type res = sh == 1 ? 0 : (sh - 1) * p_a->strides()[i]; return res; } /************************** * pyarray implementation * **************************/ /** * @name Constructors */ //@{ template inline pyarray::pyarray() : base_type() { // TODO: avoid allocation shape_type shape = make_sequence(0, size_type(1)); strides_type strides = make_sequence(0, size_type(0)); init_array(shape, strides); m_data[0] = T(); } /** * Allocates a pyarray with nested initializer lists. */ template inline pyarray::pyarray(const value_type& t) : base_type() { base_type::reshape(xt::shape(t), layout_type::row_major); nested_copy(m_data.begin(), t); } template inline pyarray::pyarray(nested_initializer_list_t t) : base_type() { base_type::reshape(xt::shape(t), layout_type::row_major); nested_copy(m_data.begin(), t); } template inline pyarray::pyarray(nested_initializer_list_t t) : base_type() { base_type::reshape(xt::shape(t), layout_type::row_major); nested_copy(m_data.begin(), t); } template inline pyarray::pyarray(nested_initializer_list_t t) : base_type() { base_type::reshape(xt::shape(t), layout_type::row_major); nested_copy(m_data.begin(), t); } template inline pyarray::pyarray(nested_initializer_list_t t) : base_type() { base_type::reshape(xt::shape(t), layout_type::row_major); nested_copy(m_data.begin(), t); } template inline pyarray::pyarray(nested_initializer_list_t t) : base_type() { base_type::reshape(xt::shape(t), layout_type::row_major); nested_copy(m_data.begin(), t); } template inline pyarray::pyarray(pybind11::handle h, pybind11::object::borrowed_t b) : base_type(h, b) { init_from_python(); } template inline pyarray::pyarray(pybind11::handle h, pybind11::object::stolen_t s) : base_type(h, s) { init_from_python(); } template inline pyarray::pyarray(const pybind11::object& o) : base_type(o) { init_from_python(); } /** * Allocates an uninitialized pyarray with the specified shape and * layout. * @param shape the shape of the pyarray * @param l the layout of the pyarray */ template inline pyarray::pyarray(const shape_type& shape, layout_type l) : base_type() { strides_type strides(shape.size()); compute_strides(shape, l, strides); init_array(shape, strides); } /** * Allocates a pyarray with the specified shape and layout. Elements * are initialized to the specified value. * @param shape the shape of the pyarray * @param value the value of the elements * @param l the layout of the pyarray */ template inline pyarray::pyarray(const shape_type& shape, const_reference value, layout_type l) : base_type() { strides_type strides(shape.size()); compute_strides(shape, l, strides); init_array(shape, strides); std::fill(m_data.begin(), m_data.end(), value); } /** * Allocates an uninitialized pyarray with the specified shape and strides. * Elements are initialized to the specified value. * @param shape the shape of the pyarray * @param strides the strides of the pyarray * @param value the value of the elements */ template inline pyarray::pyarray(const shape_type& shape, const strides_type& strides, const_reference value) : base_type() { init_array(shape, strides); std::fill(m_data.begin(), m_data.end(), value); } /** * Allocates an uninitialized pyarray with the specified shape and strides. * @param shape the shape of the pyarray * @param strides the strides of the pyarray */ template inline pyarray::pyarray(const shape_type& shape, const strides_type& strides) : base_type() { init_array(shape, strides); } //@} /** * @name Copy semantic */ //@{ /** * The copy constructor. */ template inline pyarray::pyarray(const self_type& rhs) : base_type() { auto tmp = pybind11::reinterpret_steal( PyArray_NewLikeArray(rhs.python_array(), NPY_KEEPORDER, nullptr, 1)); if (!tmp) { throw std::runtime_error("NumPy: unable to create ndarray"); } this->m_ptr = tmp.release().ptr(); init_from_python(); std::copy(rhs.data().cbegin(), rhs.data().cend(), this->data().begin()); } /** * The assignment operator. */ template inline auto pyarray::operator=(const self_type& rhs) -> self_type& { self_type tmp(rhs); *this = std::move(tmp); return *this; } //@} /** * @name Extended copy semantic */ //@{ /** * The extended copy constructor. */ template template inline pyarray::pyarray(const xexpression& e) : base_type() { // TODO: prevent intermediary shape allocation shape_type shape = forward_sequence(e.derived_cast().shape()); strides_type strides = make_sequence(shape.size(), size_type(0)); compute_strides(shape, layout_type::row_major, strides); init_array(shape, strides); semantic_base::assign(e); } /** * The extended assignment operator. */ template template inline auto pyarray::operator=(const xexpression& e) -> self_type& { return semantic_base::operator=(e); } //@} template inline auto pyarray::ensure(pybind11::handle h) -> self_type { return base_type::ensure(h); } template inline bool pyarray::check_(pybind11::handle h) { return base_type::check_(h); } template inline void pyarray::init_array(const shape_type& shape, const strides_type& strides) { strides_type adapted_strides(strides); std::transform(strides.begin(), strides.end(), adapted_strides.begin(), [](auto v) { return sizeof(T) * v; }); int flags = NPY_ARRAY_ALIGNED; if (!std::is_const::value) { flags |= NPY_ARRAY_WRITEABLE; } int type_num = detail::numpy_traits::type_num; npy_intp* shape_data = reinterpret_cast(const_cast(shape.data())); npy_intp* strides_data = reinterpret_cast(adapted_strides.data()); auto tmp = pybind11::reinterpret_steal( PyArray_New(&PyArray_Type, static_cast(shape.size()), shape_data, type_num, strides_data, nullptr, static_cast(sizeof(T)), flags, nullptr)); if (!tmp) { throw std::runtime_error("NumPy: unable to create ndarray"); } this->m_ptr = tmp.release().ptr(); init_from_python(); } template inline void pyarray::init_from_python() { m_shape = inner_shape_type(reinterpret_cast(PyArray_SHAPE(this->python_array())), static_cast(PyArray_NDIM(this->python_array()))); m_strides = inner_strides_type(reinterpret_cast(PyArray_STRIDES(this->python_array())), static_cast(PyArray_NDIM(this->python_array()))); m_backstrides = backstrides_type(*this); m_data = container_type(reinterpret_cast(PyArray_DATA(this->python_array())), this->get_min_stride() * static_cast(PyArray_SIZE(this->python_array()))); } template inline auto pyarray::shape_impl() const noexcept -> const inner_shape_type& { return m_shape; } template inline auto pyarray::strides_impl() const noexcept -> const inner_strides_type& { return m_strides; } template inline auto pyarray::backstrides_impl() const noexcept -> const inner_backstrides_type& { // m_backstrides wraps the numpy array backstrides, which is a raw pointer. // The address of the raw pointer stored in the wrapper would be invalidated when the pyarray is copied. // Hence, we build a new backstrides object (cheap wrapper around the underlying pointer) upon access. m_backstrides = backstrides_type(*this); return m_backstrides; } template inline auto pyarray::data_impl() noexcept -> container_type& { return m_data; } template inline auto pyarray::data_impl() const noexcept -> const container_type& { return m_data; } } #endif xtensor-python-0.12.4/include/xtensor-python/pycontainer.hpp000066400000000000000000000213631314412064200243310ustar00rootroot00000000000000/*************************************************************************** * Copyright (c) 2016, Johan Mabille and Sylvain Corlay * * * * Distributed under the terms of the BSD 3-Clause License. * * * * The full license is in the file LICENSE, distributed with this software. * ****************************************************************************/ #ifndef PY_CONTAINER_HPP #define PY_CONTAINER_HPP #include #include #include #include "pybind11/common.h" #include "pybind11/complex.h" #include "pybind11/pybind11.h" #ifndef FORCE_IMPORT_ARRAY #define NO_IMPORT_ARRAY #endif #ifndef PY_ARRAY_UNIQUE_SYMBOL #define PY_ARRAY_UNIQUE_SYMBOL xtensor_python_ARRAY_API #endif #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #include "numpy/arrayobject.h" #include "xtensor/xcontainer.hpp" namespace xt { inline void import_numpy(); /** * @class pycontainer * @brief Base class for xtensor containers wrapping numpy arryays. * * The pycontainer class should not be instantiated directly. Instead, used should * use pytensor and pyarray instancs. * * @tparam D The derived type, i.e. the inheriting class for which pycontainer * provides the interface. */ template class pycontainer : public pybind11::object, public xcontainer { public: using derived_type = D; using base_type = xcontainer; using inner_types = xcontainer_inner_types; using container_type = typename inner_types::container_type; using value_type = typename container_type::value_type; using reference = typename container_type::reference; using const_reference = typename container_type::const_reference; using pointer = typename container_type::pointer; using const_pointer = typename container_type::const_pointer; using size_type = typename container_type::size_type; using difference_type = typename container_type::difference_type; using shape_type = typename inner_types::shape_type; using strides_type = typename inner_types::strides_type; using backstrides_type = typename inner_types::backstrides_type; using inner_shape_type = typename inner_types::inner_shape_type; using inner_strides_type = typename inner_types::inner_strides_type; using iterable_base = xiterable; using iterator = typename iterable_base::iterator; using const_iterator = typename iterable_base::const_iterator; using stepper = typename iterable_base::stepper; using const_stepper = typename iterable_base::const_stepper; static constexpr layout_type static_layout = layout_type::dynamic; static constexpr bool contiguous_layout = false; void reshape(const shape_type& shape); void reshape(const shape_type& shape, layout_type l); void reshape(const shape_type& shape, const strides_type& strides); layout_type layout() const; using base_type::operator(); using base_type::operator[]; using base_type::begin; using base_type::end; protected: pycontainer(); ~pycontainer() = default; pycontainer(pybind11::handle h, borrowed_t); pycontainer(pybind11::handle h, stolen_t); pycontainer(const pybind11::object& o); pycontainer(const pycontainer&) = default; pycontainer& operator=(const pycontainer&) = default; pycontainer(pycontainer&&) = default; pycontainer& operator=(pycontainer&&) = default; static derived_type ensure(pybind11::handle h); static bool check_(pybind11::handle h); static PyObject* raw_array_t(PyObject* ptr); PyArrayObject* python_array() const; size_type get_min_stride() const; }; namespace detail { template struct numpy_traits { private: constexpr static const int value_list[15] = { NPY_BOOL, NPY_BYTE, NPY_UBYTE, NPY_SHORT, NPY_USHORT, NPY_INT, NPY_UINT, NPY_LONGLONG, NPY_ULONGLONG, NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE}; public: using value_type = std::remove_const_t; static constexpr int type_num = value_list[pybind11::detail::is_fmt_numeric::index]; }; } /****************************** * pycontainer implementation * ******************************/ template inline pycontainer::pycontainer() : pybind11::object() { } template inline pycontainer::pycontainer(pybind11::handle h, borrowed_t b) : pybind11::object(h, b) { } template inline pycontainer::pycontainer(pybind11::handle h, stolen_t s) : pybind11::object(h, s) { } template inline pycontainer::pycontainer(const pybind11::object& o) : pybind11::object(raw_array_t(o.ptr()), pybind11::object::stolen_t{}) { if (!this->m_ptr) { throw pybind11::error_already_set(); } } template inline auto pycontainer::ensure(pybind11::handle h) -> derived_type { auto result = pybind11::reinterpret_steal(raw_array_t(h.ptr())); if (result.ptr() == nullptr) { PyErr_Clear(); } return result; } template inline bool pycontainer::check_(pybind11::handle h) { int type_num = detail::numpy_traits::type_num; return PyArray_Check(h.ptr()) && PyArray_EquivTypenums(PyArray_TYPE(reinterpret_cast(h.ptr())), type_num); } template inline PyObject* pycontainer::raw_array_t(PyObject* ptr) { if (ptr == nullptr) { return nullptr; } int type_num = detail::numpy_traits::type_num; auto res = PyArray_FromAny(ptr, PyArray_DescrFromType(type_num), 0, 0, NPY_ARRAY_ENSUREARRAY | NPY_ARRAY_FORCECAST, nullptr); return res; } template inline PyArrayObject* pycontainer::python_array() const { return reinterpret_cast(this->m_ptr); } template inline auto pycontainer::get_min_stride() const -> size_type { const size_type& (*min)(const size_type&, const size_type&) = std::min; return std::max(size_type(1), std::accumulate(this->strides().cbegin(), this->strides().cend(), std::numeric_limits::max(), min)); } /** * Reshapes the container. * @param shape the new shape */ template inline void pycontainer::reshape(const shape_type& shape) { if (shape.size() != this->dimension() || !std::equal(shape.begin(), shape.end(), this->shape().begin())) { reshape(shape, layout_type::row_major); } } /** * Reshapes the container. * @param shape the new shape * @param l the new layout */ template inline void pycontainer::reshape(const shape_type& shape, layout_type l) { strides_type strides = make_sequence(shape.size(), size_type(1)); compute_strides(shape, l, strides); reshape(shape, strides); } /** * Reshapes the container. * @param shape the new shape * @param strides the new strides */ template inline void pycontainer::reshape(const shape_type& shape, const strides_type& strides) { derived_type tmp(shape, strides); *static_cast(this) = std::move(tmp); } /** * Return the layout_type of the container * @return layout_type of the container */ template inline layout_type pycontainer::layout() const { if (PyArray_CHKFLAGS(python_array(), NPY_ARRAY_C_CONTIGUOUS)) return layout_type::row_major; else if (PyArray_CHKFLAGS(python_array(), NPY_ARRAY_F_CONTIGUOUS)) return layout_type::column_major; else return layout_type::dynamic; } /** * Import the numpy Python module. */ inline void import_numpy() { #ifdef FORCE_IMPORT_ARRAY if (_import_array() < 0) { PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); } #endif } } #endif xtensor-python-0.12.4/include/xtensor-python/pystrides_adaptor.hpp000066400000000000000000000150041314412064200255310ustar00rootroot00000000000000/*************************************************************************** * Copyright (c) 2016, Johan Mabille and Sylvain Corlay * * * * Distributed under the terms of the BSD 3-Clause License. * * * * The full license is in the file LICENSE, distributed with this software. * ****************************************************************************/ #ifndef PYSTRIDES_ADAPTOR_HPP #define PYSTRIDES_ADAPTOR_HPP #include #include namespace xt { template class pystrides_iterator; template class pystrides_adaptor { public: using value_type = std::size_t; using const_reference = value_type; using const_pointer = const value_type*; using size_type = std::size_t; using difference_type = std::ptrdiff_t; using const_iterator = pystrides_iterator; pystrides_adaptor() = default; pystrides_adaptor(const_pointer data, size_type size); bool empty() const noexcept; size_type size() const noexcept; const_reference operator[](size_type i) const; const_reference front() const; const_reference back() const; const_iterator begin() const; const_iterator end() const; const_iterator cbegin() const; const_iterator cend() const; private: const_pointer p_data; size_type m_size; }; /************************************* * pystrides_iterator implementation * *************************************/ template class pystrides_iterator { public: using self_type = pystrides_iterator; using value_type = typename pystrides_adaptor::value_type; using pointer = typename pystrides_adaptor::const_pointer; using reference = typename pystrides_adaptor::const_reference; using difference_type = typename pystrides_adaptor::difference_type; using iterator_category = std::random_access_iterator_tag; inline pystrides_iterator(pointer current) : p_current(current) { } inline reference operator*() const { return *p_current / N; } inline pointer operator->() const { return p_current; } inline reference operator[](difference_type n) const { return *(p_current + n) / N; } inline self_type& operator++() { ++p_current; return *this; } inline self_type& operator--() { --p_current; return *this; } inline self_type operator++(int) { self_type tmp(*this); ++p_current; return tmp; } inline self_type operator--(int) { self_type tmp(*this); --p_current; return tmp; } inline self_type& operator+=(difference_type n) { p_current += n; return *this; } inline self_type& operator-=(difference_type n) { p_current -= n; return *this; } inline self_type operator+(difference_type n) const { return self_type(p_current + n); } inline self_type operator-(difference_type n) const { return self_type(p_current - n); } inline self_type operator-(const self_type& rhs) const { self_type tmp(*this); tmp -= (p_current - rhs.p_current); return tmp; } pointer get_pointer() const { return p_current; } private: pointer p_current; }; template inline bool operator==(const pystrides_iterator& lhs, const pystrides_iterator& rhs) { return lhs.get_pointer() == rhs.get_pointer(); } template inline bool operator!=(const pystrides_iterator& lhs, const pystrides_iterator& rhs) { return !(lhs == rhs); } template inline bool operator<(const pystrides_iterator& lhs, const pystrides_iterator& rhs) { return lhs.get_pointer() < rhs.get_pointer(); } template inline bool operator<=(const pystrides_iterator& lhs, const pystrides_iterator& rhs) { return (lhs < rhs) || (lhs == rhs); } template inline bool operator>(const pystrides_iterator& lhs, const pystrides_iterator& rhs) { return !(lhs <= rhs); } template inline bool operator>=(const pystrides_iterator& lhs, const pystrides_iterator& rhs) { return !(lhs < rhs); } /************************************ * pystrides_adaptor implementation * ************************************/ template inline pystrides_adaptor::pystrides_adaptor(const_pointer data, size_type size) : p_data(data), m_size(size) { } template inline bool pystrides_adaptor::empty() const noexcept { return m_size == 0; } template inline auto pystrides_adaptor::size() const noexcept -> size_type { return m_size; } template inline auto pystrides_adaptor::operator[](size_type i) const -> const_reference { return p_data[i] / N; } template inline auto pystrides_adaptor::front() const -> const_reference { return p_data[0] / N; } template inline auto pystrides_adaptor::back() const -> const_reference { return p_data[m_size - 1] / N; } template inline auto pystrides_adaptor::begin() const -> const_iterator { return const_iterator(p_data); } template inline auto pystrides_adaptor::end() const -> const_iterator { return const_iterator(p_data + m_size); } template inline auto pystrides_adaptor::cbegin() const -> const_iterator { return begin(); } template inline auto pystrides_adaptor::cend() const -> const_iterator { return end(); } } #endif xtensor-python-0.12.4/include/xtensor-python/pytensor.hpp000066400000000000000000000356631314412064200236710ustar00rootroot00000000000000/*************************************************************************** * Copyright (c) 2016, Johan Mabille and Sylvain Corlay * * * * Distributed under the terms of the BSD 3-Clause License. * * * * The full license is in the file LICENSE, distributed with this software. * ****************************************************************************/ #ifndef PY_TENSOR_HPP #define PY_TENSOR_HPP #include #include #include #include "xtensor/xbuffer_adaptor.hpp" #include "xtensor/xiterator.hpp" #include "xtensor/xsemantic.hpp" #include "xtensor/xutils.hpp" #include "pycontainer.hpp" #include "pystrides_adaptor.hpp" namespace xt { template class pytensor; } namespace pybind11 { namespace detail { template struct handle_type_name> { static PYBIND11_DESCR name() { return _("numpy.ndarray[") + make_caster::name() + _("]"); } }; template struct pyobject_caster> { using type = xt::pytensor; bool load(handle src, bool convert) { if (!convert) { if (!PyArray_Check(src.ptr())) { return false; } int type_num = xt::detail::numpy_traits::type_num; if (PyArray_TYPE(reinterpret_cast(src.ptr())) != type_num) { return false; } } value = type::ensure(src); return static_cast(value); } static handle cast(const handle& src, return_value_policy, handle) { return src.inc_ref(); } PYBIND11_TYPE_CASTER(type, handle_type_name::name()); }; } } namespace xt { template struct xiterable_inner_types> : xcontainer_iterable_types> { }; template struct xcontainer_inner_types> { using container_type = xbuffer_adaptor; using shape_type = std::array; using strides_type = shape_type; using backstrides_type = shape_type; using inner_shape_type = shape_type; using inner_strides_type = strides_type; using inner_backstrides_type = backstrides_type; using temporary_type = pytensor; }; /** * @class pytensor * @brief Multidimensional container providing the xtensor container semantics wrapping a numpy array. * * pytensor is similar to the xtensor container in that it has a static dimensionality. * * Unlike with the pyarray container, pytensor cannot be reshaped with a different number of dimensions * and reshapes are not reflected on the Python side. However, pytensor has benefits compared to pyarray * in terms of performances. pytensor shapes are stack-allocated which makes iteration upon pytensor * faster than with pyarray. * * @tparam T The type of the element stored in the pyarray. * @sa pyarray */ template class pytensor : public pycontainer>, public xcontainer_semantic> { public: using self_type = pytensor; using semantic_base = xcontainer_semantic; using base_type = pycontainer; using container_type = typename base_type::container_type; using value_type = typename base_type::value_type; using reference = typename base_type::reference; using const_reference = typename base_type::const_reference; using pointer = typename base_type::pointer; using size_type = typename base_type::size_type; using shape_type = typename base_type::shape_type; using strides_type = typename base_type::strides_type; using backstrides_type = typename base_type::backstrides_type; using inner_shape_type = typename base_type::inner_shape_type; using inner_strides_type = typename base_type::inner_strides_type; using inner_backstrides_type = typename base_type::inner_backstrides_type; pytensor(); pytensor(nested_initializer_list_t t); pytensor(pybind11::handle h, pybind11::object::borrowed_t); pytensor(pybind11::handle h, pybind11::object::stolen_t); pytensor(const pybind11::object& o); explicit pytensor(const shape_type& shape, layout_type l = layout_type::row_major); explicit pytensor(const shape_type& shape, const_reference value, layout_type l = layout_type::row_major); explicit pytensor(const shape_type& shape, const strides_type& strides, const_reference value); explicit pytensor(const shape_type& shape, const strides_type& strides); pytensor(const self_type& rhs); self_type& operator=(const self_type& rhs); pytensor(self_type&&) = default; self_type& operator=(self_type&& e) = default; template pytensor(const xexpression& e); template self_type& operator=(const xexpression& e); using base_type::begin; using base_type::end; static self_type ensure(pybind11::handle h); static bool check_(pybind11::handle h); private: inner_shape_type m_shape; inner_strides_type m_strides; inner_backstrides_type m_backstrides; container_type m_data; void init_tensor(const shape_type& shape, const strides_type& strides); void init_from_python(); inner_shape_type& shape_impl() noexcept; const inner_shape_type& shape_impl() const noexcept; inner_strides_type& strides_impl() noexcept; const inner_strides_type& strides_impl() const noexcept; inner_backstrides_type& backstrides_impl() noexcept; const inner_backstrides_type& backstrides_impl() const noexcept; container_type& data_impl() noexcept; const container_type& data_impl() const noexcept; friend class xcontainer>; }; /*************************** * pytensor implementation * ***************************/ /** * @name Constructors */ //@{ /** * Allocates an uninitialized pytensor that holds 1 element. */ template inline pytensor::pytensor() : base_type() { m_shape = make_sequence(N, size_type(1)); m_strides = make_sequence(N, size_type(0)); init_tensor(m_shape, m_strides); m_data[0] = T(); } /** * Allocates a pytensor with a nested initializer list. */ template inline pytensor::pytensor(nested_initializer_list_t t) : base_type() { base_type::reshape(xt::shape(t), layout_type::row_major); nested_copy(m_data.begin(), t); } template inline pytensor::pytensor(pybind11::handle h, pybind11::object::borrowed_t b) : base_type(h, b) { init_from_python(); } template inline pytensor::pytensor(pybind11::handle h, pybind11::object::stolen_t s) : base_type(h, s) { init_from_python(); } template inline pytensor::pytensor(const pybind11::object& o) : base_type(o) { init_from_python(); } /** * Allocates an uninitialized pytensor with the specified shape and * layout. * @param shape the shape of the pytensor * @param l the layout_type of the pytensor */ template inline pytensor::pytensor(const shape_type& shape, layout_type l) { compute_strides(shape, l, m_strides); init_tensor(shape, m_strides); } /** * Allocates a pytensor with the specified shape and layout. Elements * are initialized to the specified value. * @param shape the shape of the pytensor * @param value the value of the elements * @param l the layout_type of the pytensor */ template inline pytensor::pytensor(const shape_type& shape, const_reference value, layout_type l) { compute_strides(shape, l, m_strides); init_tensor(shape, m_strides); std::fill(m_data.begin(), m_data.end(), value); } /** * Allocates an uninitialized pytensor with the specified shape and strides. * Elements are initialized to the specified value. * @param shape the shape of the pytensor * @param strides the strides of the pytensor * @param value the value of the elements */ template inline pytensor::pytensor(const shape_type& shape, const strides_type& strides, const_reference value) { init_tensor(shape, strides); std::fill(m_data.begin(), m_data.end(), value); } /** * Allocates an uninitialized pytensor with the specified shape and strides. * @param shape the shape of the pytensor * @param strides the strides of the pytensor */ template inline pytensor::pytensor(const shape_type& shape, const strides_type& strides) { init_tensor(shape, strides); } //@} /** * @name Copy semantic */ //@{ /** * The copy constructor. */ template inline pytensor::pytensor(const self_type& rhs) : base_type() { init_tensor(rhs.shape(), rhs.strides()); std::copy(rhs.data().cbegin(), rhs.data().cend(), this->data().begin()); } /** * The assignment operator. */ template inline auto pytensor::operator=(const self_type& rhs) -> self_type& { self_type tmp(rhs); *this = std::move(tmp); return *this; } //@} /** * @name Extended copy semantic */ //@{ /** * The extended copy constructor. */ template template inline pytensor::pytensor(const xexpression& e) : base_type() { shape_type shape = forward_sequence(e.derived_cast().shape()); strides_type strides = make_sequence(N, size_type(0)); compute_strides(shape, layout_type::row_major, strides); init_tensor(shape, strides); semantic_base::assign(e); } /** * The extended assignment operator. */ template template inline auto pytensor::operator=(const xexpression& e) -> self_type& { return semantic_base::operator=(e); } //@} template inline auto pytensor::ensure(pybind11::handle h) -> self_type { return base_type::ensure(h); } template inline bool pytensor::check_(pybind11::handle h) { return base_type::check_(h); } template inline void pytensor::init_tensor(const shape_type& shape, const strides_type& strides) { npy_intp python_strides[N]; std::transform(strides.begin(), strides.end(), python_strides, [](auto v) { return sizeof(T) * v; }); int flags = NPY_ARRAY_ALIGNED; if (!std::is_const::value) { flags |= NPY_ARRAY_WRITEABLE; } int type_num = detail::numpy_traits::type_num; auto tmp = pybind11::reinterpret_steal( PyArray_New(&PyArray_Type, N, const_cast(shape.data()), type_num, python_strides, nullptr, sizeof(T), flags, nullptr)); if (!tmp) { throw std::runtime_error("NumPy: unable to create ndarray"); } this->m_ptr = tmp.release().ptr(); m_shape = shape; m_strides = strides; adapt_strides(m_shape, m_strides, m_backstrides); m_data = container_type(reinterpret_cast(PyArray_DATA(this->python_array())), static_cast(PyArray_SIZE(this->python_array()))); } template inline void pytensor::init_from_python() { if (PyArray_NDIM(this->python_array()) != N) { throw std::runtime_error("NumPy: ndarray has incorrect number of dimensions"); } std::copy(PyArray_DIMS(this->python_array()), PyArray_DIMS(this->python_array()) + N, m_shape.begin()); std::transform(PyArray_STRIDES(this->python_array()), PyArray_STRIDES(this->python_array()) + N, m_strides.begin(), [](auto v) { return v / sizeof(T); }); adapt_strides(m_shape, m_strides, m_backstrides); m_data = container_type(reinterpret_cast(PyArray_DATA(this->python_array())), this->get_min_stride() * static_cast(PyArray_SIZE(this->python_array()))); } template inline auto pytensor::shape_impl() noexcept -> inner_shape_type& { return m_shape; } template inline auto pytensor::shape_impl() const noexcept -> const inner_shape_type& { return m_shape; } template inline auto pytensor::strides_impl() noexcept -> inner_strides_type& { return m_strides; } template inline auto pytensor::strides_impl() const noexcept -> const inner_strides_type& { return m_strides; } template inline auto pytensor::backstrides_impl() noexcept -> inner_backstrides_type& { return m_backstrides; } template inline auto pytensor::backstrides_impl() const noexcept -> const inner_backstrides_type& { return m_backstrides; } template inline auto pytensor::data_impl() noexcept -> container_type& { return m_data; } template inline auto pytensor::data_impl() const noexcept -> const container_type& { return m_data; } } #endif xtensor-python-0.12.4/include/xtensor-python/pyvectorize.hpp000066400000000000000000000036771314412064200243710ustar00rootroot00000000000000/*************************************************************************** * Copyright (c) 2016, Johan Mabille and Sylvain Corlay * * * * Distributed under the terms of the BSD 3-Clause License. * * * * The full license is in the file LICENSE, distributed with this software. * ****************************************************************************/ #ifndef PY_VECTORIZE_HPP #define PY_VECTORIZE_HPP #include #include "pyarray.hpp" #include "xtensor/xvectorize.hpp" namespace xt { template struct pyvectorizer { xvectorizer m_vectorizer; template , pyvectorizer>::value>> pyvectorizer(F&& func) : m_vectorizer(std::forward(func)) { } inline pyarray operator()(const pyarray&... args) const { pyarray res = m_vectorizer(args...); return res; } }; /** * @brief Create numpy universal function from scalar function. */ template inline pyvectorizer pyvectorize(R (*f)(Args...)) { return pyvectorizer(f); } /// @cond DOXYGEN_INCLUDE_OVERLOADS template inline pyvectorizer pyvectorize(F&& f, R (*)(Args...)) { return pyvectorizer(std::forward(f)); } template inline auto pyvectorize(F&& f) -> decltype(pyvectorize(std::forward(f), (detail::get_function_type*)nullptr)) { return pyvectorize(std::forward(f), (detail::get_function_type*)nullptr); } /// @endcond } #endif xtensor-python-0.12.4/include/xtensor-python/xtensor_python_config.hpp000066400000000000000000000013401314412064200264170ustar00rootroot00000000000000/*************************************************************************** * Copyright (c) 2016, Johan Mabille and Sylvain Corlay * * * * Distributed under the terms of the BSD 3-Clause License. * * * * The full license is in the file LICENSE, distributed with this software. * ****************************************************************************/ #ifndef XTENSOR_PYTHON_CONFIG_HPP #define XTENSOR_PYTHON_CONFIG_HPP #define XTENSOR_PYTHON_VERSION_MAJOR 0 #define XTENSOR_PYTHON_VERSION_MINOR 12 #define XTENSOR_PYTHON_VERSION_PATCH 4 #endif xtensor-python-0.12.4/readthedocs.yml000066400000000000000000000000461314412064200176240ustar00rootroot00000000000000conda: file: docs/environment.yml xtensor-python-0.12.4/test/000077500000000000000000000000001314412064200155735ustar00rootroot00000000000000xtensor-python-0.12.4/test/CMakeLists.txt000066400000000000000000000101421314412064200203310ustar00rootroot00000000000000############################################################################ # Copyright (c) 2016, Johan Mabille and Sylvain Corlay # # # # Distributed under the terms of the BSD 3-Clause License. # # # # The full license is in the file LICENSE, distributed with this software. # ############################################################################ cmake_minimum_required(VERSION 3.1) if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR) project(xtensor-python-test) find_package(pybind11 REQUIRED) set(PYBIND11_INCLUDE_DIR ${pybind11_INCLUDE_DIRS}) find_package(xtensor REQUIRED CONFIG) set(XTENSOR_INCLUDE_DIR ${xtensor_INCLUDE_DIRS}) find_package(xtensor-python REQUIRED CONFIG) set(XTENSOR_PYTHON_INCLUDE_DIR ${xtensor-python_INCLUDE_DIRS}) endif () message(STATUS "Forcing tests build type to Release") set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build." FORCE) include(CheckCXXCompilerFlag) string(TOUPPER "${CMAKE_BUILD_TYPE}" U_CMAKE_BUILD_TYPE) if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" OR CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Intel") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native -Wunused-parameter -Wextra -Wreorder -Wconversion") CHECK_CXX_COMPILER_FLAG("-std=c++14" HAS_CPP14_FLAG) if (HAS_CPP14_FLAG) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14") else() message(FATAL_ERROR "Unsupported compiler -- xtensor requires C++14 support!") endif() endif() if(MSVC) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc /MP /bigobj") set(CMAKE_EXE_LINKER_FLAGS /MANIFEST:NO) foreach(flag_var CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) string(REPLACE "/MD" "-MT" ${flag_var} "${${flag_var}}") endforeach() endif() if (DOWNLOAD_GTEST OR GTEST_SRC_DIR) if(DOWNLOAD_GTEST) # Download and unpack googletest at configure time configure_file(downloadGTest.cmake.in googletest-download/CMakeLists.txt) else() # Copy local source of googletest at configure time configure_file(copyGTest.cmake.in googletest-download/CMakeLists.txt) endif() execute_process(COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}" . RESULT_VARIABLE result WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/googletest-download ) if(result) message(FATAL_ERROR "CMake step for googletest failed: ${result}") endif() execute_process(COMMAND ${CMAKE_COMMAND} --build . RESULT_VARIABLE result WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/googletest-download ) if(result) message(FATAL_ERROR "Build step for googletest failed: ${result}") endif() # Add googletest directly to our build. This defines # the gtest and gtest_main targets. add_subdirectory(${CMAKE_CURRENT_BINARY_DIR}/googletest-src ${CMAKE_CURRENT_BINARY_DIR}/googletest-build) set(GTEST_INCLUDE_DIRS "${gtest_SOURCE_DIR}/include") set(GTEST_BOTH_LIBRARIES gtest_main gtest) else() find_package(GTest REQUIRED) endif() find_package(Threads) include_directories(${XTENSOR_INCLUDE_DIR}) include_directories(${XTENSOR_PYTHON_INCLUDE_DIR}) include_directories(${GTEST_INCLUDE_DIRS}) include_directories(${PYBIND11_INCLUDE_DIR}) set(XTENSOR_PYTHON_TESTS main.cpp test_pyarray.cpp test_pytensor.cpp test_pyvectorize.cpp ) set(XTENSOR_PYTHON_TARGET test_xtensor_python) add_executable(${XTENSOR_PYTHON_TARGET} ${XTENSOR_PYTHON_TESTS} ${XTENSOR_PYTHON_HEADERS} ${XTENSOR_HEADERS}) target_link_libraries(${XTENSOR_PYTHON_TARGET} ${GTEST_BOTH_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT} ${PYTHON_LIBRARIES}) if(DOWNLOAD_GTEST OR GTEST_SRC_DIR) add_dependencies(${XTENSOR_PYTHON_TARGET} gtest_main) endif() add_custom_target(xtest COMMAND ./test_xtensor_python DEPENDS ${XTENSOR_PYTHON_TARGET}) xtensor-python-0.12.4/test/copyGTest.cmake.in000066400000000000000000000016661314412064200211340ustar00rootroot00000000000000############################################################################ # Copyright (c) 2016, Johan Mabille and Sylvain Corlay # # # # Distributed under the terms of the BSD 3-Clause License. # # # # The full license is in the file LICENSE, distributed with this software. # ############################################################################ cmake_minimum_required(VERSION 2.8.2) project(googletest-download NONE) include(ExternalProject) ExternalProject_Add(googletest URL "${GTEST_SRC_DIR}" SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/googletest-src" BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/googletest-build" CONFIGURE_COMMAND "" BUILD_COMMAND "" INSTALL_COMMAND "" TEST_COMMAND "" ) xtensor-python-0.12.4/test/downloadGTest.cmake.in000066400000000000000000000017601314412064200217640ustar00rootroot00000000000000############################################################################ # Copyright (c) 2016, Johan Mabille and Sylvain Corlay # # # # Distributed under the terms of the BSD 3-Clause License. # # # # The full license is in the file LICENSE, distributed with this software. # ############################################################################ cmake_minimum_required(VERSION 2.8.2) project(googletest-download NONE) include(ExternalProject) ExternalProject_Add(googletest GIT_REPOSITORY https://github.com/google/googletest.git GIT_TAG release-1.8.0 SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/googletest-src" BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/googletest-build" CONFIGURE_COMMAND "" BUILD_COMMAND "" INSTALL_COMMAND "" TEST_COMMAND "" ) xtensor-python-0.12.4/test/main.cpp000066400000000000000000000020071314412064200172220ustar00rootroot00000000000000/*************************************************************************** * Copyright (c) 2016, Johan Mabille and Sylvain Corlay * * * * Distributed under the terms of the BSD 3-Clause License. * * * * The full license is in the file LICENSE, distributed with this software. * ****************************************************************************/ #include #include "pybind11/numpy.h" #define FORCE_IMPORT_ARRAY #include "xtensor-python/pyarray.hpp" #include "gtest/gtest.h" #include int main(int argc, char* argv[]) { // Initialize all the things (google-test and Python interpreter) Py_Initialize(); xt::import_numpy(); ::testing::InitGoogleTest(&argc, argv); // Run test suite int ret = RUN_ALL_TESTS(); // Closure of the Python interpreter Py_Finalize(); return ret; } xtensor-python-0.12.4/test/test_common.hpp000066400000000000000000000441141314412064200206370ustar00rootroot00000000000000/*************************************************************************** * Copyright (c) 2016, Johan Mabille and Sylvain Corlay * * * * Distributed under the terms of the BSD 3-Clause License. * * * * The full license is in the file LICENSE, distributed with this software. * ****************************************************************************/ #ifndef TEST_COMMON_HPP #define TEST_COMMON_HPP #include #include #include "xtensor/xstrides.hpp" #include "xtensor/xcontainer.hpp" #include "xtensor/xstorage.hpp" namespace xt { template bool operator==(const uvector& lhs, const std::vector& rhs) { return lhs.size() == rhs.size() && std::equal(lhs.begin(), lhs.end(), rhs.begin()); } template bool operator==(const std::vector& lhs, const uvector& rhs) { return rhs == lhs; } template > struct layout_result { using vector_type = uvector; using size_type = typename C::value_type; using shape_type = C; using strides_type = C; using assigner_type = std::vector>; inline layout_result() { m_shape = { 3, 2, 4 }; m_assigner.resize(m_shape[0]); for (std::size_t i = 0; i < std::size_t(m_shape[0]); ++i) { m_assigner[i].resize(m_shape[1]); } m_assigner[0][0] = { -1, 1, 2, 3 }; m_assigner[0][1] = { 4, 5, 6, 7 }; m_assigner[1][0] = { 8, 9, 10, 11 }; m_assigner[1][1] = { 12, 13, 14, 15 }; m_assigner[2][0] = { 16, 17, 18, 19 }; m_assigner[2][1] = { 20, 21, 22, 23 }; } shape_type m_shape; strides_type m_strides; strides_type m_backstrides; vector_type m_data; assigner_type m_assigner; inline size_type size() const { return m_data.size(); } inline const shape_type& shape() const { return m_shape; } inline const strides_type& strides() const { return m_strides; } inline const strides_type& backstrides() const { return m_backstrides; } inline const vector_type& data() const { return m_data; } }; template > struct row_major_result : layout_result { inline row_major_result() { this->m_strides = { 8, 4, 1 }; this->m_backstrides = {16, 4, 3}; this->m_data = { -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 }; } }; template > struct column_major_result : layout_result { inline column_major_result() { this->m_strides = { 1, 3, 6 }; this->m_backstrides = { 2, 3, 18 }; this->m_data = { -1, 8, 16, 4, 12, 20, 1, 9, 17, 5, 13, 21, 2, 10, 18, 6, 14, 22, 3, 11, 19, 7, 15, 23 }; } }; template > struct central_major_result : layout_result { inline central_major_result() { this->m_strides = { 8, 1, 2 }; this->m_backstrides = { 16, 1, 6}; this->m_data = { -1, 4, 1, 5, 2, 6, 3, 7, 8, 12, 9, 13, 10, 14, 11, 15, 16, 20, 17, 21, 18, 22, 19, 23 }; } }; template > struct unit_shape_result { using vector_type = std::vector; using size_type = typename C::value_type; using shape_type = C; using strides_type = C; using assigner_type = std::vector>; inline unit_shape_result() { m_shape = { 3, 1, 4 }; m_strides = { 4, 0, 1 }; m_backstrides = { 8, 0, 3 }; m_data = { -1, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19 }; m_assigner.resize(m_shape[0]); for (std::size_t i = 0; i < std::size_t(m_shape[0]); ++i) { m_assigner[i].resize(m_shape[1]); } m_assigner[0][0] = { -1, 1, 2, 3 }; m_assigner[1][0] = { 8, 9, 10, 11 }; m_assigner[2][0] = { 16, 17, 18, 19 }; } shape_type m_shape; strides_type m_strides; strides_type m_backstrides; vector_type m_data; assigner_type m_assigner; inline size_type size() const { return m_data.size(); } inline const shape_type& shape() const { return m_shape; } inline const strides_type& strides() const { return m_strides; } inline const strides_type& backstrides() const { return m_backstrides; } inline const vector_type& data() const { return m_data; } }; template void compare_shape(V& vec, const R& result) { EXPECT_TRUE(std::equal(vec.shape().cbegin(), vec.shape().cend(), result.shape().cbegin())); EXPECT_TRUE(std::equal(vec.strides().cbegin(), vec.strides().cend(), result.strides().cbegin())); EXPECT_EQ(vec.size(), result.size()); } template > void test_reshape(V& vec) { { SCOPED_TRACE("row_major reshape"); row_major_result rm; vec.reshape(rm.m_shape, layout_type::row_major); compare_shape(vec, rm); } { SCOPED_TRACE("column_major reshape"); column_major_result cm; vec.reshape(cm.m_shape, layout_type::column_major); compare_shape(vec, cm); } { SCOPED_TRACE("central_major reshape"); central_major_result cem; vec.reshape(cem.m_shape, cem.m_strides); compare_shape(vec, cem); } { SCOPED_TRACE("unit_shape reshape"); unit_shape_result usr; vec.reshape(usr.m_shape, layout_type::row_major); compare_shape(vec, usr); } } // TODO: add back when in place transpose methods have been added // into xtensor /*template > void test_transpose(V& vec) { using inner_shape_type = typename V::inner_shape_type; using shape_type = typename V::shape_type; using strides_type = typename V::strides_type; { SCOPED_TRACE("transpose"); inner_shape_type shape_new = vec.shape(); vec.transpose(); std::reverse(shape_new.begin(), shape_new.end()); EXPECT_EQ(vec.shape(), shape_new); } { SCOPED_TRACE("transpose with data"); row_major_result rm; vec.reshape(rm.shape(), layout::row_major); assign_array(vec, rm.m_assigner); EXPECT_TRUE(std::equal(vec.data().begin(), vec.data().end(), rm.m_data.begin())); auto vec_copy = vec; shape_type shape_new(rm.shape()); vec.transpose(); std::reverse(shape_new.begin(), shape_new.end()); EXPECT_EQ(vec.shape(), shape_new); EXPECT_TRUE(std::equal(vec.data().cbegin(), vec.data().cend(), rm.m_data.cbegin())); strides_type new_strides = {rm.m_strides[2], rm.m_strides[1], rm.m_strides[0]}; EXPECT_EQ(vec.strides(), new_strides); strides_type new_backstrides = {rm.m_backstrides[2], rm.m_backstrides[1], rm.m_backstrides[0]}; EXPECT_EQ(vec.backstrides(), new_backstrides); EXPECT_EQ(vec_copy(0, 0, 0), vec(0, 0, 0)); EXPECT_EQ(vec_copy(0, 1, 0), vec(0, 1, 0)); EXPECT_EQ(vec_copy(1, 1, 0), vec(0, 1, 1)); EXPECT_EQ(vec_copy(1, 1, 2), vec(2, 1, 1)); } { SCOPED_TRACE("transpose with permutation"); row_major_result rm; vec.reshape(rm.shape(), layout::row_major); assign_array(vec, rm.m_assigner); EXPECT_TRUE(std::equal(vec.data().cbegin(), vec.data().cend(), rm.m_data.cbegin())); auto vec_copy = vec; inner_shape_type a = vec.shape(); vec.transpose({1, 0, 2}); shape_type shape_new = {a[1], a[0], a[2]}; EXPECT_TRUE(std::equal(vec.shape().cbegin(), vec.shape().cend(), shape_new.begin())); EXPECT_TRUE(std::equal(vec.data().cbegin(), vec.data().cend(), rm.m_data.cbegin())); strides_type new_strides = {rm.m_strides[1], rm.m_strides[0], rm.m_strides[2]}; EXPECT_EQ(vec.strides(), new_strides); strides_type new_backstrides = {rm.m_backstrides[1], rm.m_backstrides[0], rm.m_backstrides[2]}; EXPECT_EQ(vec.backstrides(), new_backstrides); EXPECT_EQ(vec_copy(0, 0, 0), vec(0, 0, 0)); EXPECT_EQ(vec_copy(0, 1, 0), vec(1, 0, 0)); EXPECT_EQ(vec_copy(1, 1, 0), vec(1, 1, 0)); EXPECT_EQ(vec_copy(1, 1, 2), vec(1, 1, 2)); // Compilation check only std::vector perm = { 1, 0, 2 }; vec.transpose(perm); } { SCOPED_TRACE("transpose permutation throws"); row_major_result rm; vec.reshape(rm.shape(), layout::row_major); EXPECT_THROW(vec.transpose({1, 1, 0}, check_policy::full()), transpose_error); EXPECT_THROW(vec.transpose({1, 0, 2, 3}, check_policy::full()), transpose_error); EXPECT_THROW(vec.transpose({1, 2}, check_policy::full()), transpose_error); EXPECT_THROW(vec.transpose({3, 0, 1}, check_policy::full()), transpose_error); } }*/ template void assign_array(V1& dst, const V2& src) { for (std::size_t i = 0; i < std::size_t(dst.shape()[0]); ++i) { for (std::size_t j = 0; j < std::size_t(dst.shape()[1]); ++j) { for (std::size_t k = 0; k < std::size_t(dst.shape()[2]); ++k) { dst(i, j, k) = src[i][j][k]; } } } } template void test_bound_check(V& vec) { #ifdef XTENSOR_ENABLE_ASSERT EXPECT_ANY_THROW(vec(10,10,10)); #else (void)vec; #endif } template > void test_access(V& vec) { { SCOPED_TRACE("row_major access"); row_major_result rm; vec.reshape(rm.m_shape, layout_type::row_major); assign_array(vec, rm.m_assigner); EXPECT_TRUE(std::equal(vec.data().cbegin(), vec.data().cend(), rm.m_data.cbegin())); EXPECT_EQ(vec(0, 1, 1), vec(1, 1)); EXPECT_EQ(vec(2, 1, 3), vec(2, 2, 2, 1, 3)); test_bound_check(vec); } { SCOPED_TRACE("column_major access"); column_major_result cm; vec.reshape(cm.m_shape, layout_type::column_major); assign_array(vec, cm.m_assigner); EXPECT_TRUE(std::equal(vec.data().cbegin(), vec.data().cend(), cm.m_data.cbegin())); EXPECT_EQ(vec(0, 1, 1), vec(1, 1)); EXPECT_EQ(vec(2, 1, 3), vec(2, 2, 2, 1, 3)); test_bound_check(vec); } { SCOPED_TRACE("central_major access"); central_major_result cem; vec.reshape(cem.m_shape, cem.m_strides); assign_array(vec, cem.m_assigner); EXPECT_TRUE(std::equal(vec.data().cbegin(), vec.data().cend(), cem.m_data.cbegin())); EXPECT_EQ(vec(0, 1, 1), vec(1, 1)); EXPECT_EQ(vec(2, 1, 3), vec(2, 2, 2, 1, 3)); test_bound_check(vec); } { SCOPED_TRACE("unit_shape access"); unit_shape_result usr; vec.reshape(usr.m_shape, layout_type::row_major); assign_array(vec, usr.m_assigner); EXPECT_TRUE(std::equal(vec.data().cbegin(), vec.data().cend(), usr.m_data.cbegin())); EXPECT_EQ(vec(0, 1, 0), vec(1, 0)); EXPECT_EQ(vec(2, 0, 3), vec(2, 2, 2, 0, 3)); test_bound_check(vec); } } template void indexed_assign_array(V1& dst, const V2& src) { xindex index(dst.dimension()); for (std::size_t i = 0; i < std::size_t(dst.shape()[0]); ++i) { index[0] = i; for (std::size_t j = 0; j < std::size_t(dst.shape()[1]); ++j) { index[1] = j; for (std::size_t k = 0; k < std::size_t(dst.shape()[2]); ++k) { index[2] = k; dst[index] = src[i][j][k]; } } } } template > void test_indexed_access(V& vec) { xindex index1 = {1, 1}; xindex index2 = {2, 2, 2, 1, 3}; { SCOPED_TRACE("row_major access"); row_major_result rm; vec.reshape(rm.m_shape, layout_type::row_major); indexed_assign_array(vec, rm.m_assigner); EXPECT_TRUE(std::equal(vec.data().cbegin(), vec.data().cend(), rm.m_data.cbegin())); EXPECT_EQ(vec(0, 1, 1), vec[index1]); EXPECT_EQ(vec(2, 1, 3), vec[index2]); } { SCOPED_TRACE("column_major access"); column_major_result cm; vec.reshape(cm.m_shape, layout_type::column_major); indexed_assign_array(vec, cm.m_assigner); EXPECT_TRUE(std::equal(vec.data().cbegin(), vec.data().cend(), cm.m_data.cbegin())); EXPECT_EQ(vec(0, 1, 1), vec[index1]); EXPECT_EQ(vec(2, 1, 3), vec[index2]); } { SCOPED_TRACE("central_major access"); central_major_result cem; vec.reshape(cem.m_shape, cem.m_strides); indexed_assign_array(vec, cem.m_assigner); EXPECT_TRUE(std::equal(vec.data().cbegin(), vec.data().cend(), cem.m_data.cbegin())); EXPECT_EQ(vec(0, 1, 1), vec[index1]); EXPECT_EQ(vec(2, 1, 3), vec[index2]); } { SCOPED_TRACE("unit_shape access"); unit_shape_result usr; vec.reshape(usr.m_shape, layout_type::row_major); indexed_assign_array(vec, usr.m_assigner); EXPECT_TRUE(std::equal(vec.data().cbegin(), vec.data().cend(), usr.m_data.cbegin())); xindex id1 = { 1, 0 }; xindex id2 = { 2, 2, 2, 0, 3 }; EXPECT_EQ(vec(0, 1, 0), vec[id1]); EXPECT_EQ(vec(2, 0, 3), vec[id2]); } } template void test_broadcast(V& vec) { using shape_type = typename V::shape_type; shape_type s = { 3, 1, 4, 2 }; vec.reshape(s); { SCOPED_TRACE("same shape"); shape_type s1 = s; bool res = vec.broadcast_shape(s1); EXPECT_EQ(s1, s); EXPECT_TRUE(res); } { SCOPED_TRACE("different shape"); shape_type s2 = { 3, 5, 1, 2 }; shape_type s2r = {3, 5, 4, 2}; bool res = vec.broadcast_shape(s2); EXPECT_EQ(s2, s2r); EXPECT_FALSE(res); } { SCOPED_TRACE("incompatible shapes"); shape_type s4 = { 2, 1, 3, 2 }; bool wit = false; try { vec.broadcast_shape(s4); } catch(broadcast_error&) { wit = true; } EXPECT_TRUE(wit); } } template void test_broadcast2(V& vec) { using shape_type = typename V::shape_type; shape_type s = { 3, 1, 4, 2 }; vec.reshape(s); { SCOPED_TRACE("different dimensions"); shape_type s3 = {5, 3, 1, 4, 2}; shape_type s3r = s3; bool res = vec.broadcast_shape(s3); EXPECT_EQ(s3, s3r); EXPECT_FALSE(res); } } template > void test_iterator(V& vec) { { SCOPED_TRACE("row_major storage iterator"); row_major_result rm; vec.reshape(rm.m_shape, layout_type::row_major); std::copy(rm.data().cbegin(), rm.data().cend(), vec.begin()); EXPECT_TRUE(std::equal(rm.data().cbegin(), rm.data().cend(), vec.data().cbegin())); } { SCOPED_TRACE("column_major storage iterator"); column_major_result cm; vec.reshape(cm.m_shape, layout_type::column_major); std::copy(cm.data().cbegin(), cm.data().cend(), vec.begin()); EXPECT_TRUE(std::equal(cm.data().cbegin(), cm.data().cend(), vec.data().cbegin())); } { SCOPED_TRACE("central_major storage iterator"); central_major_result cem; vec.reshape(cem.m_shape, cem.m_strides); std::copy(cem.data().cbegin(), cem.data().cend(), vec.begin()); EXPECT_TRUE(std::equal(cem.data().cbegin(), cem.data().cend(), vec.data().cbegin())); } { SCOPED_TRACE("unit_shape storage iterator"); unit_shape_result usr; vec.reshape(usr.m_shape, layout_type::row_major); std::copy(usr.data().cbegin(), usr.data().cend(), vec.begin()); EXPECT_TRUE(std::equal(usr.data().cbegin(), usr.data().cend(), vec.data().cbegin())); } } } #endif xtensor-python-0.12.4/test/test_pyarray.cpp000066400000000000000000000132461314412064200210330ustar00rootroot00000000000000/*************************************************************************** * Copyright (c) 2016, Johan Mabille and Sylvain Corlay * * * * Distributed under the terms of the BSD 3-Clause License. * * * * The full license is in the file LICENSE, distributed with this software. * ****************************************************************************/ #include "gtest/gtest.h" #include "test_common.hpp" #include "xtensor-python/pyarray.hpp" #include "xtensor/xarray.hpp" namespace xt { using container_type = std::vector; TEST(pyarray, initializer_constructor) { pyarray t {{{ 0, 1, 2}, { 3, 4, 5}, { 6, 7, 8}}, {{ 9, 10, 11}, {12, 13, 14}, {15, 16, 17}}}; EXPECT_EQ(t.dimension(), 3); EXPECT_EQ(t(0, 0, 1), 1); EXPECT_EQ(t.shape()[0], 2); } TEST(pyarray, shaped_constructor) { { SCOPED_TRACE("row_major constructor"); row_major_result<> rm; pyarray ra(rm.m_shape); compare_shape(ra, rm); EXPECT_EQ(layout_type::row_major, ra.layout()); } { SCOPED_TRACE("column_major constructor"); column_major_result<> cm; pyarray ca(cm.m_shape, layout_type::column_major); compare_shape(ca, cm); EXPECT_EQ(layout_type::column_major, ca.layout()); } } TEST(pyarray, strided_constructor) { central_major_result<> cmr; pyarray cma(cmr.m_shape, cmr.m_strides); compare_shape(cma, cmr); } TEST(pyarray, valued_constructor) { { SCOPED_TRACE("row_major valued constructor"); row_major_result<> rm; int value = 2; pyarray ra(rm.m_shape, value); compare_shape(ra, rm); std::vector vec(ra.size(), value); EXPECT_TRUE(std::equal(vec.cbegin(), vec.cend(), ra.data().cbegin())); } { SCOPED_TRACE("column_major valued constructor"); column_major_result<> cm; int value = 2; pyarray ca(cm.m_shape, value, layout_type::column_major); compare_shape(ca, cm); std::vector vec(ca.size(), value); EXPECT_TRUE(std::equal(vec.cbegin(), vec.cend(), ca.data().cbegin())); } } TEST(pyarray, strided_valued_constructor) { central_major_result<> cmr; int value = 2; pyarray cma(cmr.m_shape, cmr.m_strides, value); compare_shape(cma, cmr); std::vector vec(cma.size(), value); EXPECT_TRUE(std::equal(vec.cbegin(), vec.cend(), cma.data().cbegin())); } TEST(pyarray, copy_semantic) { central_major_result<> res; int value = 2; pyarray a(res.m_shape, res.m_strides, value); { SCOPED_TRACE("copy constructor"); pyarray b(a); compare_shape(a, b); EXPECT_EQ(a.data(), b.data()); a.data()[0] += 1; EXPECT_NE(a.data()[0], b.data()[0]); } { SCOPED_TRACE("assignment operator"); row_major_result<> r; pyarray c(r.m_shape, 0); EXPECT_NE(a.data(), c.data()); c = a; compare_shape(a, c); EXPECT_EQ(a.data(), c.data()); a.data()[0] += 1; EXPECT_NE(a.data()[0], c.data()[0]); } } TEST(pyarray, move_semantic) { central_major_result<> res; int value = 2; pyarray a(res.m_shape, res.m_strides, value); { SCOPED_TRACE("move constructor"); pyarray tmp(a); pyarray b(std::move(tmp)); compare_shape(a, b); EXPECT_EQ(a.data(), b.data()); } { SCOPED_TRACE("move assignment"); row_major_result<> r; pyarray c(r.m_shape, 0); EXPECT_NE(a.data(), c.data()); pyarray tmp(a); c = std::move(tmp); compare_shape(a, c); EXPECT_EQ(a.data(), c.data()); } } TEST(pyarray, extended_constructor) { xt::xarray a1 = { { 1, 2 },{ 3, 4 } }; xt::xarray a2 = { { 1, 2 },{ 3, 4 } }; pyarray c = a1 + a2; EXPECT_EQ(c(0, 0), a1(0, 0) + a2(0, 0)); EXPECT_EQ(c(0, 1), a1(0, 1) + a2(0, 1)); EXPECT_EQ(c(1, 0), a1(1, 0) + a2(1, 0)); EXPECT_EQ(c(1, 1), a1(1, 1) + a2(1, 1)); } TEST(pyarray, reshape) { pyarray a; test_reshape(a); } /* TEST(pyarray, transpose) { pyarray a; test_transpose(a); } */ TEST(pyarray, access) { pyarray a; test_access(a); } TEST(pyarray, indexed_access) { pyarray a; test_indexed_access(a); } TEST(pyarray, broadcast_shape) { pyarray a; test_broadcast(a); test_broadcast2(a); } TEST(pyarray, iterator) { pyarray a; test_iterator(a); } TEST(pyarray, initializer_list) { pyarray a0(1); pyarray a1({1, 2}); pyarray a2({{1, 2}, {2, 4}, {5, 6}}); EXPECT_EQ(1, a0()); EXPECT_EQ(2, a1(1)); EXPECT_EQ(4, a2(1, 1)); } TEST(pyarray, zerod) { pyarray a; EXPECT_EQ(0, a()); } } xtensor-python-0.12.4/test/test_pytensor.cpp000066400000000000000000000134461314412064200212310ustar00rootroot00000000000000/*************************************************************************** * Copyright (c) 2016, Johan Mabille and Sylvain Corlay * * * * Distributed under the terms of the BSD 3-Clause License. * * * * The full license is in the file LICENSE, distributed with this software. * ****************************************************************************/ #include "gtest/gtest.h" #include "test_common.hpp" #include "xtensor-python/pytensor.hpp" #include "xtensor/xtensor.hpp" namespace xt { using container_type = std::array; TEST(pytensor, initializer_constructor) { pytensor t {{{ 0, 1, 2}, { 3, 4, 5}, { 6, 7, 8}}, {{ 9, 10, 11}, {12, 13, 14}, {15, 16, 17}}}; EXPECT_EQ(t.dimension(), 3); EXPECT_EQ(t(0, 0, 1), 1); EXPECT_EQ(t.shape()[0], 2); } TEST(pytensor, shaped_constructor) { { SCOPED_TRACE("row_major constructor"); row_major_result rm; pytensor ra(rm.m_shape); compare_shape(ra, rm); EXPECT_EQ(layout_type::row_major, ra.layout()); } { SCOPED_TRACE("column_major constructor"); column_major_result cm; pytensor ca(cm.m_shape, layout_type::column_major); compare_shape(ca, cm); EXPECT_EQ(layout_type::column_major, ca.layout()); } } TEST(pytensor, strided_constructor) { central_major_result cmr; pytensor cma(cmr.m_shape, cmr.m_strides); compare_shape(cma, cmr); } TEST(pytensor, valued_constructor) { { SCOPED_TRACE("row_major valued constructor"); row_major_result rm; int value = 2; pytensor ra(rm.m_shape, value); compare_shape(ra, rm); std::vector vec(ra.size(), value); EXPECT_TRUE(std::equal(vec.cbegin(), vec.cend(), ra.data().cbegin())); } { SCOPED_TRACE("column_major valued constructor"); column_major_result cm; int value = 2; pytensor ca(cm.m_shape, value, layout_type::column_major); compare_shape(ca, cm); std::vector vec(ca.size(), value); EXPECT_TRUE(std::equal(vec.cbegin(), vec.cend(), ca.data().cbegin())); } } TEST(pytensor, strided_valued_constructor) { central_major_result cmr; int value = 2; pytensor cma(cmr.m_shape, cmr.m_strides, value); compare_shape(cma, cmr); std::vector vec(cma.size(), value); EXPECT_TRUE(std::equal(vec.cbegin(), vec.cend(), cma.data().cbegin())); } TEST(pytensor, copy_semantic) { central_major_result res; int value = 2; pytensor a(res.m_shape, res.m_strides, value); { SCOPED_TRACE("copy constructor"); pytensor b(a); compare_shape(a, b); EXPECT_EQ(a.data(), b.data()); a.data()[0] += 1; EXPECT_NE(a.data()[0], b.data()[0]); } { SCOPED_TRACE("assignment operator"); row_major_result r; pytensor c(r.m_shape, 0); EXPECT_NE(a.data(), c.data()); c = a; compare_shape(a, c); EXPECT_EQ(a.data(), c.data()); a.data()[0] += 1; EXPECT_NE(a.data()[0], c.data()[0]); } } TEST(pytensor, move_semantic) { central_major_result res; int value = 2; pytensor a(res.m_shape, res.m_strides, value); { SCOPED_TRACE("move constructor"); pytensor tmp(a); pytensor b(std::move(tmp)); compare_shape(a, b); EXPECT_EQ(a.data(), b.data()); } { SCOPED_TRACE("move assignment"); row_major_result r; pytensor c(r.m_shape, 0); EXPECT_NE(a.data(), c.data()); pytensor tmp(a); c = std::move(tmp); compare_shape(a, c); EXPECT_EQ(a.data(), c.data()); } } TEST(pytensor, extended_constructor) { xt::xtensor a1 = { {1, 2}, {3, 4} }; xt::xtensor a2 = { {1, 2}, {3, 4} }; pytensor c = a1 + a2; EXPECT_EQ(c(0, 0), a1(0, 0) + a2(0, 0)); EXPECT_EQ(c(0, 1), a1(0, 1) + a2(0, 1)); EXPECT_EQ(c(1, 0), a1(1, 0) + a2(1, 0)); EXPECT_EQ(c(1, 1), a1(1, 1) + a2(1, 1)); } TEST(pytensor, reshape) { pytensor a; test_reshape, container_type>(a); } /*TEST(pytensor, transpose) { pytensor a; test_transpose, container_type>(a); }*/ TEST(pytensor, access) { pytensor a; test_access, container_type>(a); } TEST(pytensor, indexed_access) { pytensor a; test_indexed_access, container_type>(a); } TEST(pytensor, broadcast_shape) { pytensor a; test_broadcast(a); } TEST(pytensor, iterator) { pytensor a; test_iterator, container_type>(a); } TEST(pytensor, zerod) { pytensor a; EXPECT_EQ(0, a()); } } xtensor-python-0.12.4/test/test_pyvectorize.cpp000066400000000000000000000034541314412064200217270ustar00rootroot00000000000000/*************************************************************************** * Copyright (c) 2016, Johan Mabille and Sylvain Corlay * * * * Distributed under the terms of the BSD 3-Clause License. * * * * The full license is in the file LICENSE, distributed with this software. * ****************************************************************************/ #include "gtest/gtest.h" #include "test_common.hpp" #include "xtensor-python/pytensor.hpp" #include "xtensor-python/pyvectorize.hpp" #include "pybind11/pybind11.h" #include "pybind11/numpy.h" namespace xt { double f1(double a, double b) { return a + b; } using shape_type = std::vector; TEST(pyvectorize, function) { auto vecf1 = pyvectorize(f1); shape_type shape = { 3, 2 }; pyarray a(shape, 1.5); pyarray b(shape, 2.3); pyarray c = vecf1(a, b); EXPECT_EQ(a(0, 0) + b(0, 0), c(0, 0)); } TEST(pyvectorize, lambda) { auto vecf1 = pyvectorize([](double a, double b) { return a + b; }); shape_type shape = { 3, 2 }; pyarray a(shape, 1.5); pyarray b(shape, 2.3); pyarray c = vecf1(a, b); EXPECT_EQ(a(0, 0) + b(0, 0), c(0, 0)); } TEST(pyvectorize, complex) { using complex_t = std::complex; shape_type shape = { 3, 2 }; pyarray a(shape, complex_t(1.2, 2.5)); auto f = pyvectorize([](complex_t x) { return std::abs(x); }); auto res = f(a); double exp = std::abs(a(1, 1)); EXPECT_EQ(exp, res(1, 1)); } } xtensor-python-0.12.4/test_python/000077500000000000000000000000001314412064200171745ustar00rootroot00000000000000xtensor-python-0.12.4/test_python/main.cpp000066400000000000000000000045601314412064200206310ustar00rootroot00000000000000/*************************************************************************** * Copyright (c) 2016, Johan Mabille and Sylvain Corlay * * * * Distributed under the terms of the BSD 3-Clause License. * * * * The full license is in the file LICENSE, distributed with this software. * ****************************************************************************/ #include #include "xtensor/xmath.hpp" #include "xtensor/xarray.hpp" #define FORCE_IMPORT_ARRAY #include "xtensor-python/pyarray.hpp" #include "xtensor-python/pyvectorize.hpp" namespace py = pybind11; using complex_t = std::complex; // Examples double example1(xt::pyarray& m) { return m(0); } xt::pyarray example2(xt::pyarray& m) { return m + 2; } // Readme Examples double readme_example1(xt::pyarray& m) { auto sines = xt::sin(m); return std::accumulate(sines.cbegin(), sines.cend(), 0.0); } double readme_example2(double i, double j) { return std::sin(i) - std::cos(j); } auto complex_overload(const xt::pyarray>& a) { return a; } auto no_complex_overload(const xt::pyarray& a) { return a; } auto complex_overload_reg(const std::complex& a) { return a; } auto no_complex_overload_reg(const double& a) { return a; } // Vectorize Examples int add(int i, int j) { return i + j; } PYBIND11_PLUGIN(xtensor_python_test) { xt::import_numpy(); py::module m("xtensor_python_test", "Test module for xtensor python bindings"); m.def("example1", example1); m.def("example2", example2); m.def("complex_overload", no_complex_overload); m.def("complex_overload", complex_overload); m.def("complex_overload_reg", no_complex_overload_reg); m.def("complex_overload_reg", complex_overload_reg); m.def("readme_example1", readme_example1); m.def("readme_example2", xt::pyvectorize(readme_example2)); m.def("vectorize_example1", xt::pyvectorize(add)); m.def("rect_to_polar", xt::pyvectorize([](complex_t x) { return std::abs(x); })); m.def("compare_shapes", [](const xt::pyarray& a, const xt::pyarray& b) { return a.shape() == b.shape(); }); return m.ptr(); } xtensor-python-0.12.4/test_python/setup.py000066400000000000000000000075151314412064200207160ustar00rootroot00000000000000############################################################################ # Copyright (c) 2016, Johan Mabille and Sylvain Corlay # # # # Distributed under the terms of the BSD 3-Clause License. # # # # The full license is in the file LICENSE, distributed with this software. # ############################################################################ from setuptools import setup, Extension from setuptools.command.build_ext import build_ext import sys import os import setuptools __version__ = '0.0.1' class get_pybind_include(object): """Helper class to determine the pybind11 include path The purpose of this class is to postpone importing pybind11 until it is actually installed, so that the ``get_include()`` method can be invoked. """ def __init__(self, user=False): self.user = user def __str__(self): import pybind11 return pybind11.get_include(self.user) class get_numpy_include(object): """Helper class to determine the numpy include path The purpose of this class is to postpone importing numpy until it is actually installed, so that the ``get_include()`` method can be invoked. """ def __str__(self): import numpy return numpy.get_include() ext_modules = [ Extension( 'xtensor_python_test', ['main.cpp'], include_dirs=[ # Path to pybind11 headers get_pybind_include(), get_pybind_include(user=True), # Path to numpy headers get_numpy_include(), os.path.join(sys.prefix, 'include'), os.path.join(sys.prefix, 'Library', 'include') ], language='c++' ), ] def has_flag(compiler, flagname): """Return a boolean indicating whether a flag name is supported on the specified compiler. """ import tempfile with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f: f.write('int main (int argc, char **argv) { return 0; }') try: compiler.compile([f.name], extra_postargs=[flagname]) except setuptools.distutils.errors.CompileError: return False return True def cpp_flag(compiler): """Return the -std=c++14 compiler flag and errors when the flag is no available. """ if has_flag(compiler, '-std=c++14'): return '-std=c++14' else: raise RuntimeError('C++14 support is required by xtensor!') class BuildExt(build_ext): """A custom build extension for adding compiler-specific options.""" c_opts = { 'msvc': ['/EHsc'], 'unix': [], } if sys.platform == 'darwin': c_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.7'] def build_extensions(self): ct = self.compiler.compiler_type opts = self.c_opts.get(ct, []) if ct == 'unix': opts.append('-DVERSION_INFO="%s"' % self.distribution.get_version()) opts.append(cpp_flag(self.compiler)) if has_flag(self.compiler, '-fvisibility=hidden'): opts.append('-fvisibility=hidden') elif ct == 'msvc': opts.append('/DVERSION_INFO=\\"%s\\"' % self.distribution.get_version()) for ext in self.extensions: ext.extra_compile_args = opts build_ext.build_extensions(self) setup( name='xtensor_python_test', version=__version__, author='Sylvain Corlay', author_email='sylvain.corlay@gmail.com', url='https://github.com/pybind/python_example', description='An example project using xtensor-python', long_description='', ext_modules=ext_modules, install_requires=['pybind11>=2.0.1'], cmdclass={'build_ext': BuildExt}, zip_safe=False, ) xtensor-python-0.12.4/test_python/test_pyarray.py000066400000000000000000000056321314412064200223020ustar00rootroot00000000000000############################################################################ # Copyright (c) 2016, Johan Mabille and Sylvain Corlay # # # # Distributed under the terms of the BSD 3-Clause License. # # # # The full license is in the file LICENSE, distributed with this software. # ############################################################################ import os import sys import subprocess # Build the test extension here = os.path.abspath(os.path.dirname(__file__)) subprocess.check_call([sys.executable, os.path.join(here, 'setup.py'), 'build_ext', '--inplace'], cwd=here) # Test it! from unittest import TestCase import xtensor_python_test as xt import numpy as np class ExampleTest(TestCase): def test_example1(self): self.assertEqual(4, xt.example1([4, 5, 6])) def test_example2(self): x = np.array([[0., 1.], [2., 3.]]) res = np.array([[2., 3.], [4., 5.]]) y = xt.example2(x) np.testing.assert_allclose(y, res, 1e-12) def test_vectorize(self): x1 = np.array([[0, 1], [2, 3]]) x2 = np.array([0, 1]) res = np.array([[0, 2], [2, 4]]) y = xt.vectorize_example1(x1, x2) np.testing.assert_array_equal(y, res) def test_readme_example1(self): v = np.arange(15).reshape(3, 5) y = xt.readme_example1(v) np.testing.assert_allclose(y, 1.2853996391883833, 1e-12) def test_complex_overload_reg(self): a = 23.23 c = 2.0 + 3.1j self.assertEqual(xt.complex_overload_reg(a), a) self.assertEqual(xt.complex_overload_reg(c), c) def test_complex_overload(self): a = np.random.rand(3, 3) b = np.random.rand(3, 3) c = a + b * 1j y = xt.complex_overload(c) np.testing.assert_allclose(np.imag(y), np.imag(c)) np.testing.assert_allclose(np.real(y), np.real(c)) x = xt.complex_overload(b) self.assertEqual(x.dtype, b.dtype) np.testing.assert_allclose(x, b) def test_readme_example2(self): x = np.arange(15).reshape(3, 5) y = [1, 2, 3, 4, 5] z = xt.readme_example2(x, y) np.testing.assert_allclose(z, [[-0.540302, 1.257618, 1.89929 , 0.794764, -1.040465], [-1.499227, 0.136731, 1.646979, 1.643002, 0.128456], [-1.084323, -0.583843, 0.45342 , 1.073811, 0.706945]], 1e-5) def test_rect_to_polar(self): x = np.ones(10, dtype=complex) z = xt.rect_to_polar(x[::2]); np.testing.assert_allclose(z, np.ones(5, dtype=float), 1e-5) def test_shape_comparison(self): x = np.ones([4, 4]) y = np.ones([5, 5]) z = np.zeros([4, 4]) self.assertFalse(xt.compare_shapes(x, y)) self.assertTrue(xt.compare_shapes(x, z)) xtensor-python-0.12.4/xtensor-pythonConfig.cmake.in000066400000000000000000000017501314412064200223750ustar00rootroot00000000000000############################################################################ # Copyright (c) 2016, Johan Mabille and Sylvain Corlay # # # # Distributed under the terms of the BSD 3-Clause License. # # # # The full license is in the file LICENSE, distributed with this software. # ############################################################################ # xtensor-python cmake module # This module sets the following variables in your project:: # # xtensor-python_FOUND - true if xtensor-python found on the system # xtensor-python_INCLUDE_DIRS - the directory containing xtensor-python headers # xtensor-python_LIBRARY - empty @PACKAGE_INIT@ set(PN xtensor-python) set_and_check(${PN}_INCLUDE_DIRS "${PACKAGE_PREFIX_DIR}/@CMAKE_INSTALL_INCLUDEDIR@") set(${PN}_LIBRARY "") check_required_components(${PN})