pax_global_header00006660000000000000000000000064141626403550014520gustar00rootroot0000000000000052 comment=97de48ccd2e8a6e5989c1bd1f35caba94df5e07b pythran-0.10.0+ds2/000077500000000000000000000000001416264035500137475ustar00rootroot00000000000000pythran-0.10.0+ds2/.clang-format000066400000000000000000000036651416264035500163340ustar00rootroot00000000000000--- Language: Cpp # BasedOnStyle: LLVM AccessModifierOffset: -2 AlignAfterOpenBracket: true AlignEscapedNewlinesLeft: false AlignOperands: true AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: true AllowShortBlocksOnASingleLine: false AllowShortCaseLabelsOnASingleLine: false AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false AllowShortFunctionsOnASingleLine: None AlwaysBreakAfterDefinitionReturnType: false AlwaysBreakTemplateDeclarations: true AlwaysBreakBeforeMultilineStrings: false BreakBeforeBinaryOperators: None BreakBeforeTernaryOperators: true BreakConstructorInitializersBeforeComma: false BinPackParameters: true BinPackArguments: true ColumnLimit: 80 ConstructorInitializerAllOnOneLineOrOnePerLine: false ConstructorInitializerIndentWidth: 4 DerivePointerAlignment: false ExperimentalAutoDetectBinPacking: false IndentCaseLabels: false IndentWrappedFunctionNames: false IndentFunctionDeclarationAfterType: false MaxEmptyLinesToKeep: 1 KeepEmptyLinesAtTheStartOfBlocks: true NamespaceIndentation: All ObjCBlockIndentWidth: 2 ObjCSpaceAfterProperty: false ObjCSpaceBeforeProtocolList: true PenaltyBreakBeforeFirstCallParameter: 19 PenaltyBreakComment: 300 PenaltyBreakString: 1000 PenaltyBreakFirstLessLess: 120 PenaltyExcessCharacter: 1000000 PenaltyReturnTypeOnItsOwnLine: 60 PointerAlignment: Right SpacesBeforeTrailingComments: 1 Cpp11BracedListStyle: true Standard: Cpp11 IndentWidth: 2 TabWidth: 8 UseTab: Never BreakBeforeBraces: Linux SpacesInParentheses: false SpacesInSquareBrackets: false SpacesInAngles: false SpaceInEmptyParentheses: false SpacesInCStyleCastParentheses: false SpaceAfterCStyleCast: false SpacesInContainerLiterals: true SpaceBeforeAssignmentOperators: true ContinuationIndentWidth: 4 CommentPragmas: '^ IWYU pragma:' ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ] SpaceBeforeParens: ControlStatements DisableFormat: false ... pythran-0.10.0+ds2/.github/000077500000000000000000000000001416264035500153075ustar00rootroot00000000000000pythran-0.10.0+ds2/.github/workflows/000077500000000000000000000000001416264035500173445ustar00rootroot00000000000000pythran-0.10.0+ds2/.github/workflows/core.yml000066400000000000000000000022601416264035500210170ustar00rootroot00000000000000name: core on: push: branches: - master pull_request: branches: - master jobs: build: runs-on: ubuntu-18.04 strategy: matrix: python-version: [3.6, 3.8, 3.9, 3.10-dev] cpp-version: [g++-5, clang-5.0] steps: - uses: actions/checkout@v2 - name: Setup Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements.txt pip install ipython nbval pytest-xdist cython wheel if test ${{ matrix.python-version }} != '3.10-dev' ; then pip install scipy ; fi sudo apt install libopenblas-dev ${{ matrix.cpp-version }} - name: Setup run: | python setup.py install printf '[commpiler]\nblas=openblas\n' > ~/.pythranrc - name: Testing sequential run: | export CC=`echo ${{ matrix.cpp-version }} | sed -e 's/g++/gcc/'` export CXX=`echo ${{ matrix.cpp-version }} | sed -e 's/clang/clang++/'` pytest pythran/tests/test_*.py -v -x --numprocesses=auto $PYTEST_ARGS pythran-0.10.0+ds2/.github/workflows/doc.yml000066400000000000000000000020721416264035500206350ustar00rootroot00000000000000name: doc on: push: branches: - master pull_request: branches: - master jobs: build: runs-on: ubuntu-18.04 strategy: matrix: python-version: [3.6] cpp-version: [g++-5, clang-5.0] steps: - uses: actions/checkout@v2 - name: Setup Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements.txt pip install ipython nbval pytest-xdist cython scipy wheel sudo apt install libopenblas-dev ${{ matrix.cpp-version }} - name: Setup run: | python setup.py install printf '[commpiler]\nblas=openblas\n' > ~/.pythranrc - name: Testing documentation run: | export CC=`echo ${{ matrix.cpp-version }} | sed -e 's/g++/gcc/'` export CXX=`echo ${{ matrix.cpp-version }} | sed -e 's/clang/clang++/'` pytest pythran/tests/notebooks docs/examples --nbval pythran-0.10.0+ds2/.github/workflows/parallel.yml000066400000000000000000000032021416264035500216600ustar00rootroot00000000000000name: parallel on: push: branches: - master pull_request: branches: - master jobs: build: runs-on: ubuntu-18.04 strategy: matrix: python-version: [3.6, 3.8] cpp-version: [g++-5, clang-5.0] steps: - uses: actions/checkout@v2 - name: Setup Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements.txt pip install ipython nbval pytest-xdist cython scipy wheel # libgomp comes with gcc-5, for clang we need to install libomp sudo apt install libopenblas-dev ${{ matrix.cpp-version }} if [[ ${{ matrix.cpp-version }} == 'clang-5.0' ]]; then sudo apt install libomp5 libomp-dev; fi - name: Setup run: | python setup.py install printf '[compiler]\nblas=openblas\n' > ~/.pythranrc - name: Testing vectorized run: | export CC=`echo ${{ matrix.cpp-version }} | sed -e 's/g++/gcc/'` export CXX=`echo ${{ matrix.cpp-version }} | sed -e 's/clang/clang++/'` export CXXFLAGS="-DUSE_XSIMD -march=native" pytest pythran/tests/test_cases.py -v --numprocesses=auto - name: Testing parallel run: | export CC=`echo ${{ matrix.cpp-version }} | sed -e 's/g++/gcc/'` export CXX=`echo ${{ matrix.cpp-version }} | sed -e 's/clang/clang++/'` export CXXFLAGS="-fopenmp" export LDFLAGS="-fopenmp" pytest pythran/tests/test_cases.py -v --numprocesses=1 pythran-0.10.0+ds2/.github/workflows/windows.yml000066400000000000000000000020541416264035500215620ustar00rootroot00000000000000name: ms on: push: branches: - master pull_request: branches: - master jobs: build: runs-on: windows-latest strategy: matrix: architectures: [x86, x64] steps: - uses: actions/checkout@v2 - name: Setup Python uses: actions/setup-python@v2 with: python-version: 3.7 architecture: ${{ matrix.architecture }} - name: Install clang-cl run: | choco install llvm --yes @("C:/Program Files/LLVM/bin") + (Get-Content $env:GITHUB_PATH) | Set-Content $env:GITHUB_PATH - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements.txt pip install scipy wheel pythran-openblas pytest - name: Setup run: | python setup.py install - name: Testing sequential run: | pytest pythran/tests/test_ndarray.py -v -x pytest pythran/tests/test_scipy.py -v -x pytest pythran/tests/test_base.py -v -x pytest pythran/tests/test_advanced.py -v -x pythran-0.10.0+ds2/.gitignore000066400000000000000000000003561416264035500157430ustar00rootroot00000000000000*.so *.pyc build dist website/*.html website/license.txt pythran/nt2 pythran/boost pythran/xsimd parsetab.py .eggs/* pythran.egg-info/* docs/index.rst docs/LICENSE.rst docs/Changelog.rst docs/AUTHORS.rst docs/SUPPORT.rst docs/_build/* pythran-0.10.0+ds2/AUTHORS000066400000000000000000000004111416264035500150130ustar00rootroot00000000000000======= Authors ======= - Serge Guelton - Pierrick Brunet - Adrien Merlini - Alan Raynaud - Mehdi Amini pythran-0.10.0+ds2/Changelog000066400000000000000000000400541416264035500155640ustar00rootroot000000000000002021-09-08 Serge Guelton * Fix performance issue with assert handling * Fix issue in libomp detection * Support immediate value for some keyword parameters, esp; keep_dims parameter * Better detection of generalized expression overlap * And extra minor fixes :-) 2021-07-06 Serge Guelton * Remove six, networkx and decorator dependency * Bump gast and Beniget requirements to support python 3.10 * Bump xsimd to 7.5.0 * Minimal default support for non-linux, non-osx, now-windows platform * Numpy improvements for np.bincount, np.transpose, np.searchsorted * Restore (and test) cython compatibility * Expose pythran.get_include for toolchain integration * Improve error message on invalid spec * Handle static dispatching based on keyword signature * Raise Memory Error upon (too) large numpy alloc * Support scalar case of scipy.special.binom * Trim the number of warnings in pythonic codebase 2021-05-23 Serge Guelton * Fix compatibility issue with python 3.10 2021-05-09 Serge Guelton * Honor CXXFLAGS and LDFLAGS * Generalize numpy.dot to higher dimenson (partial support) * Fix important memory leak in handling of transposed matrices * Fix several string interaction (str.split, f-string) * Fix interaction with numpy.dtype.type * Improve OpenMP detection * Optimize some matrix transpose cases 2021-03-30 Serge Guelton * Always honor $HOME for user configuration lookup (even on Windows) * Default to clang-cl on windows * Honor CFLAGS environment variable * Correctly type tuples that are not bound to a variable * Move to pocketfft for fft related operations * Support numpy.vdot, numpy.dot between array of different dtype, improve numpy.copyto, numpy.ndarray constructor, numpy.ihfft, numpy.hfft, numpy.full and numpy.full_like * Return a floating point type when computing builtins.pow, unless the exponenent is a positive integer literal * Optimize shared reference of array expressions (not you again!) * Introduce a specific type for strings of one element, aka chr * Fix implementation of str.lstrip and str.rstrip, harden str.join * Improve quality of the error report * Detect divide by zero in debug mode * Improve PythranBuildExt to support base class customization 2020-11-09 Serge Guelton * Basic f-string support * Optimize shared reference of array expressions (again) * np.nan{min,max}, np.around, np.wrap implementation fixes * PYTHONOPTIMIZE={1,2} compatibility * Support list.sort with key argument 2020-09-22 Serge Guelton * Improve computation of contiguous slices when bounds are positive * Optimize shared reference of array expressions * Speedup complex combined types computation * Fix pythran-config with cl.exe or clang-cl.exe * Support Python 3.9 2020-08-01 Serge Guelton * First release only supporting Python3 * Allow to disable blas usage through blas=none setting * Improve range value analysis * Change numpy expression evaluator * Change sorting algorithm from std::sort to pdqsort * Bug fixes in various numpy.* implementation * Freeze gast and beniget version * Diligently use and document assert behavior * Many fix related to static conditions * Improve np.sort and np.median argument support * Improve scope computation in presence of if/else * Refuse assert with side effect * Provide sane default for OpenMP reduction on complex * Upgrade boost version to 1.72 * Allow %{ext} substitution in output filename * Prettier pythran syntax error * Faster compilation (in some cases!) * Partial np.tofile implementation * Allow to specify --config 'pythran.optimizations=' on the CLI * Fix interaction between OpenMP collapse and pythran-generated loops 2019-12-31 Serge Guelton * Last release supporting both Python2 and Python3 * Vectorized version of numpy.arange * Support more numpy type conversion operators * Improve translation time * Version bump for xsimd dependency * Fix compile without ENABLE_PYTHON_MODULE * Various fixes for bug introduced in previous revision ^^! 2019-10-30 Serge Guelton * Support Python up to 3.8 (included) * clang-cl on Windows support * PyPy3.6 support * Fix bug involving is None and loops * Support numpy.heaviside, numpy.cross * Significant improvement of numpy.random.* support * Fix memory leak when converting a dict to python * New optimization: dead function elimination * Support for kwonly arguments * More pattern transformations: numpy.cbrt, numpy.sqrt and variant of abssqr * Support dtype argument for numpy.linspace * Improve constant folding & forward substitution * Extend range analysis to tuples * Basic support for scipy.special.spherical_jn and scipy.special.spherical_yn * Support isinstance builtin * Support the type() builtin * New command line parameter: --config= as an alternative to .pythranrc * Various fixes and doc upgrade I'm too lazy to list here ;-) 2019-08-19 Serge Guelton * Warn users about deprecation of python2 * Fix incompatibility with numpy 1.17.0 * Allow pythran-openblas as a fallback dependency for openblas * Better 32bit arch support * Better support of OpenMP collapse clause * Upgrade boost dep to 1.68 and xsimd dep to 7.2.2 * Use static shape information for more efficient broadcasting * Allow / options on Windows platform in addition to Unix-style options * Fix typing issue for r-value dict/set/list * Allow to pass slice to pythran-exported functions * Fix np.arange for integral numbers * Fix static if support * Support tuple arguments for np.concatenate * Support default arguments for np.randint * Support kind argument for np.sort 2019-05-05 Serge Guelton * Better support for `is None` pattern * Support numpy.byte in code and annotation * Fix string slice assignment * Support numpy.interp * Improved Windows support * Fix numpy.fft in presence of threading * Better error message upon type mismatch * Extended support for numpy.append * Fix ndarray printing * Property report error on Elippsis * Optimize away some uses of np.array * Properly support keyword-only arguments from Python3 * Improved fixed-size array broadcasting * New annotation syntax for default argument through the '?' qualifier * Stricter type checking between Python and Pythran result type * Rely on beniget for some analyses * Fix dtype inference for OSX and Windows, wrt 32/64 bits * Generate code compatible with OpenMP collapse clause * Fix np.bincount, np.angle, np.fromiter implementation * Improved (but still incomplete) support of slicing of an array through an array * Allow specification of memory layout for 2D array parameters in annotations * Avoid useless copies for some dict operations * Support np.expand_dims, np.correlate, np.convolve, np.setdiff1d * Detect and specialize code for fixed-size list * Support more dot-idiom-to-blas forwarding * Fix important memory leak in numpy_gexpr to_python 2019-01-29 Serge Guelton * Fix np.transpose regression * Upgrade xsimd to 7.1.2 * Fix setup.py test target 2019-01-18 Serge Guelton * Honor PYTHRANRC environment variable for config file lookup * Stricter type checking for export parameters * Allow some kind of list to tuple conversion * Lazy slicing of broadcasted expression and transposed expression * Support numpy.stack, numpy.rollaxis, numpy.broadcast_to, numpy.ndarray.dtype.type * Better support of array of complex numbers * Verbose mode in pythran-config to debug compiler backend issues * Config file linting * Evaluate numpy.arange lazily when valid * Faster PRNG, namely pcg * Favor Python3 support in various places * Fix Numpy.remainder implementation * Better support for importing user modules * More vectorized operations support 2018-11-06 Serge Guelton * Moving to xsimd as vectorization engine, requires -DUSE_XSIMD * Better support of view conversion from Python to Pythran * Improved Cython integration * Improved documentation, add example section * Updated ArchLinux Packaging * Remove useless warnings during compilation * Faster ``abs(x**2)`` for complex numbers * IPython magic now accepts most optimization flags * Automatic detection of partially (or fully) constant shape for arrays * Add ignoreflags settings to .pythranrc to skip some compiler flags * Quad number (aka long double) basic support * And many minor bugfixes, as usual :-) 2018-09-16 Serge Guelton * Decent support for ``is None`` pattern * Bessel functions, aka scipy.special.stuff * ``__builtin__.slice`` support * ``np.unravel_index``, ``np.(u)intc`` support * Cleanup and fix issues around sdist / build_ext and stuff * O(1) View conversion to Python * Dropped big int support * Speed improvement for ``np.argmin``, ``np.argmax`` and square of complex * Allow partially (or fully) constant shape for arrays * Deterministic header inclusion (toward reproducible build?) * Better error report on argument mismatch * Better support for view arguments (but still lotta missing :-/) * Better Windows support (fix compiler-specific bugs) including OpenMP 2018-06-06 Serge Guelton * Much better support of fancy indexing, still not 100% support though * Better distutils integration (extra_compile_args) * Better support for np.unique, functor, np.Inf, np.arange, it.repeat * Better support of user import from pythranized module (still no globals) * Better OSX support (fix compiler-specific bugs) * Most tests now work on Python 2 and Python 3 * Many bugfixes triggered by the bug report of Yann Diorcet, Jean Laroche and David Menéndez Hurtado, Fabien Rozar, C. Claus 2018-04-23 Serge Guelton * numpy.fft support (thanks to Jean Laroche) * Faster generalized expression * Faster numpy.transpose, numpy.argmax, numpy reduction * Sphinx-compatible generated docstring (thanks to Pierre Augier) * Python output through ``-P`` (thanks to Pierre Augier) * Many bugfixes and numpy improvements (thanks to Yann Diorecet and Jean Laroche) 2018-02-05 Serge Guelton * Slimmer generated binaries * Preliminary native Windows support for Python3 * Several numpy-related function improvements * @ operator support * Better negative index support and range detection * Glimpses of OpenMP4 support, min/max support in OpenMP reductions * Python Capsule support * Work around GCC-7 parsing bug 2017-10-14 Serge Guelton * Moving to networkx 2.0 * List/Tuple partial constant folding * Minor notebook integration fixes * Minor cython integration fixes * Memory leak removal * Support out field in numpy.sum 2017-09-12 Serge Guelton * Significant compilation time improvements (again) * Improved cython cooperation * Many OSX and Python3 integration fixes * Revive pure C++ mode * Exported functions now support keyword passing style 2017-07-04 Serge Guelton * Significant compilation time improvements * Support for separated .pythran files * Many bug fixes and perf improvement 2017-01-05 Serge Guelton * Python 3 support * (unsound) Type Checker * Various bug fixes and perf improvement, as usual 2016-07-05 Serge Guelton * Fix install / setup minor issues * Restore OpenMP support * Fix GMP installation issue 2016-06-13 Serge Guelton * Better Jupyter Note book integration * Numpy Broadcasting support * Improved value binding analysis * Simple inlining optimization * Type engine improvement * Less fat in the generated modules * More and better support for various Numpy functions * Various performance improvement * Global variable handling, as constants only though 2016-01-05 Serge Guelton * IPython's magic for pythran now supports extra compile flags * Pythran's C++ output is compatible with Python3 and pythran3 can compile it! * More syntax checks (and less template traceback) * Improved UI (multiline pythran exports, better setup.py...) * Pythonic leaning / bugfixing (this tends to be a permanent item) * More generic support for numpy's dtype * Simpler install (no more boost.python deps, nor nt2 configuration) * Faster compilation (no more boost.python deps, smarter pass manager) * Better testing (gcc + clang) 2015-10-13 Serge Guelton * Significantly decrease compilation time * Faster execution of numpy generalized slicing * Tentative conda support * Tentative Windows support (using Win Python) * Preserve original docstrings * Add __pythran__ global variable to pythran generated modules * Faster implementation of various itertools functions * Rely on distutils for module code compilation * Support most of numpy.random * Remove git and make dependency to install nt2 * Proper pip support instead of distuils * Remove dependency to boost.python * Remove dependency to tcmalloc * Pythonic library cleaning (less dependencies / header / splitting / mrpropering) * More lazy computations * More numpy function support (including dot on matrices, linalg.norm, mean) * Lot of code cleaning / refactoring (both in Python and C++) * Many bugfixes, thanks to all the bug reporters! 2015-04-06 Serge Guelton * Various numpy.* function implementation improvement (incl. concatenate, str.join, itertools.combinations) * Better error detection during install step * 32 bit compatibility * Complete rewrite of the expression engine * Improved support of numpy extended expression * Better user feedback on invalid pythran spec * More efficient support of string literals * Faster exponentiation when index is an integer * NT2 revision bump * No-copy list as numpy expression parameters * Accept C and fortran layout for input arrays * Range value analysis and boundcheck removal * Newaxis style indexing * Better array-of-complex support * Glimpses of python3 support * Support for importing user defined modules * Archlinux support * Accept strided array as exported function input 2014-10-22 Serge Guelton * Full SIMD support! Almost all numpy expressions are vectorized * Better memory management at the Python/C++ layer, esp. when sharing * Support named parameters * Better complex numbers support * A lot of internal code cleaning * Better code generation for regular loops * MacOS install guide & ArchLinux packages * Travis run the test suite, w and w/ SIMD, w and w/ OpenMP * Many performance improvements at the numpy expression level * Faster array copies, including slices * Much better constant folding * Distutils support through a PythranExtension * Improve implementation of many numpy functions * Improve forward substitution * Use most recent nt2 version * Make dependency on libgomp optional 2014-05-17 Serge Guelton * Improved C++ compilation time (twice as fast) * Efficient extended slicing * Support most numpy dtype ([u]int8,..., [u]int64, float32, float64) * Support indexing array through boolean array * Add a nice Pythran logo :-) * Improve validation *a lot* * Reduce native module loading overhead * Forward substitution implementation * More numpy support and *many* bug fixes! * Remove array auto vectorization/parallelization pythran-0.10.0+ds2/LICENSE000066400000000000000000000027421416264035500147610ustar00rootroot00000000000000Copyright (c) 2012, HPC Project and Serge Guelton All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of HPCProject, Serge Guelton nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pythran-0.10.0+ds2/MANIFEST.in000066400000000000000000000002151416264035500155030ustar00rootroot00000000000000include AUTHORS include Changelog include LICENSE include README.rst include requirements.txt include docs/* recursive-include third_party * pythran-0.10.0+ds2/README.rst000066400000000000000000000106361416264035500154440ustar00rootroot00000000000000Pythran ####### https://pythran.readthedocs.io What is it? ----------- Pythran is an ahead of time compiler for a subset of the Python language, with a focus on scientific computing. It takes a Python module annotated with a few interface descriptions and turns it into a native Python module with the same interface, but (hopefully) faster. It is meant to efficiently compile **scientific programs**, and takes advantage of multi-cores and SIMD instruction units. Until 0.9.5 (included), Pythran was supporting Python 3 and Python 2.7. It now only supports Python **3**. Installation ------------ Pythran sources are hosted on https://github.com/serge-sans-paille/pythran. Pythran releases are hosted on https://pypi.python.org/pypi/pythran. Pythran is available on conda-forge on https://anaconda.org/conda-forge/pythran. Debian/Ubuntu ============= Using ``pip`` ************* 1. Gather dependencies: Pythran depends on a few Python modules and several C++ libraries. On a debian-like platform, run:: $> sudo apt-get install libatlas-base-dev $> sudo apt-get install python-dev python-ply python-numpy 2. Install with ``pip``:: $> pip install pythran Using ``mamba`` or ``conda`` **************************** 1. Using ``mamba`` (https://github.com/conda-forge/miniforge#mambaforge) or ``conda`` (https://github.com/conda-forge/miniforge) 2. Run:: $> mamba install -c conda-forge pythran or:: $> conda install -c conda-forge pythran Mac OSX ======= Using brew (https://brew.sh/):: $> pip install pythran $> brew install openblas $> printf '[compiler]\nblas=openblas\ninclude_dirs=/usr/local/opt/openblas/include\nlibrary_dirs=/usr/local/opt/openblas/lib' > ~/.pythranrc Depending on your setup, you may need to add the following to your ``~/.pythranrc`` file:: [compiler] CXX=g++-4.9 CC=gcc-4.9 ArchLinux ========= Using ``pacman``:: $> pacman -S python-pythran Fedora ====== Using ``dnf``:: $> dnf install pythran Windows ======= Windows support is on going and only targets Python 3.5+ with either Visual Studio 2017 or, better, clang-cl:: $> pip install pythran Note that using ``clang-cl.exe`` is the default setting. It can be changed through the ``CXX`` and ``CC`` environment variables. Other Platform ============== See MANUAL file. Basic Usage ----------- A simple pythran input could be ``dprod.py`` .. code-block:: python """ Naive dotproduct! Pythran supports numpy.dot """ #pythran export dprod(int list, int list) def dprod(l0,l1): """WoW, generator expression, zip and sum.""" return sum(x * y for x, y in zip(l0, l1)) To turn it into a native module, run:: $> pythran dprod.py That will generate a native dprod.so that can be imported just like the former module:: $> python -c 'import dprod' # this imports the native module instead Documentation ------------- The user documentation is available in the MANUAL file from the doc directory. The developer documentation is available in the DEVGUIDE file from the doc directory. There is also a TUTORIAL file for those who don't like reading documentation. The CLI documentation is available from the pythran help command:: $> pythran --help Some extra developer documentation is also available using pydoc. Beware, this is the computer science incarnation for the famous Where's Waldo? game:: $> pydoc pythran $> pydoc pythran.typing $> pydoc -b # in the browser Examples -------- See the ``pythran/tests/cases/`` directory from the sources. Contact ------- Praise, flame and cookies: - pythran@freelists.org -- register at https://www.freelists.org/list/pythran first! - #pythran on OFTC, https://oftc.net - serge.guelton@telecom-bretagne.eu The mailing list archive is available at https://www.freelists.org/archive/pythran/. Citing ------ If you need to cite a Pythran paper, feel free to use .. code-block:: bibtex @article{guelton2015pythran, title={Pythran: Enabling static optimization of scientific python programs}, author={Guelton, Serge and Brunet, Pierrick and Amini, Mehdi and Merlini, Adrien and Corbillon, Xavier and Raynaud, Alan}, journal={Computational Science \& Discovery}, volume={8}, number={1}, pages={014001}, year={2015}, publisher={IOP Publishing} } Authors ------- See AUTHORS file. License ------- See LICENSE file. pythran-0.10.0+ds2/docs/000077500000000000000000000000001416264035500146775ustar00rootroot00000000000000pythran-0.10.0+ds2/docs/CLI.rst000066400000000000000000000074501416264035500160460ustar00rootroot00000000000000Command Line Interface ###################### This file shows some use case of Pythran on the command line. Firstly lets clear the working space:: $> rm -f cli_* .. Small hack to setup the $PATH in a compatible way .. >>> import os, pythran, re .. >>> if 'lib' in pythran.__file__: os.environ['PATH'] = re.sub(r'(.*)/lib/.*', r'\1/bin:', pythran.__file__) + os.environ['PATH'] .. >>> os.environ['PATH'] = './scripts:' + os.environ['PATH'] One of the most classic use case in Pythran is to generate a native .so module:: $> printf '#pythran export foo()\n#pythran export msg\nmsg = \"hello world\"\ndef foo(): print(msg)' > cli_foo.py $> pythran cli_foo.py -o cli_foo.so $> ls cli_foo.so cli_foo.so The generated native ``.so`` module can then be used with the Python interpreter:: $> python -c 'import cli_foo ; cli_foo.foo()' hello world $> python -c 'import cli_foo ; print(cli_foo.msg)' hello world Pythran version can be dumped through ``--version``:: $> pythran --version 2>&1 0.10.0 The module-level ``__pythran__`` variable indicates that the module loaded has been pythranized:: $> python -c 'import cli_foo ; print(hasattr(cli_foo, \"__pythran__\"))' True You can choose your optimization level by using ``-O`` flag:: $> rm cli_foo.so $> pythran cli_foo.py -O2 -o cli_foo.so $> ls cli_foo.so cli_foo.so Out of curiosity, you can check the generated output:: $> pythran -E cli_foo.py That's some heavily templated code ;-) Pythran can then compile it for you to a Python module:: $> pythran cli_foo.cpp -o cli_foo.so Pythran can also generate raw C++ code, using the ``-e`` switch:: $> printf 'msg = \"hello world\"\ndef foo(): print(msg)' > cli_foo.py $> pythran -e cli_foo.py -o cli_foo.hpp $> printf '#include \"cli_foo.hpp\"\nusing namespace __pythran_cli_foo ; int main() { foo()(); return 0 ; }' > cli_foo.cpp $> `pythran-config --compiler --cflags` -std=c++11 cli_foo.cpp -o cli_foo $> ./cli_foo hello world You can use ``-p`` option to apply a Pythran optimization. For example, the python code can propagate constants using the Pythran ConstantFolding optimization:: $> pythran -e cli_foo.py -p pythran.optimizations.ConstantFolding If you want to specify the path of generated file:: $> pythran cli_foo.py -o /tmp/cli_foo.so -DNDEBUG $> ls /tmp/cli_foo.so /tmp/cli_foo.so To know more options about Pythran, you can check:: $> pythran --help usage: pythran [-h] [-o OUTPUT_FILE] [-P] [-E] [-e] [-v] [-w] [-V] [-p pass] [-I include_dir] [-L ldflags] [-D macro_definition] [-U macro_definition] [--config config] input_file pythran: a python to C++ compiler positional arguments: input_file the pythran module to compile, either a .py or a .cpp file optional arguments: -h, --help show this help message and exit -o OUTPUT_FILE path to generated file. Honors %{ext}. -P only run the high-level optimizer, do not compile -E only run the translator, do not compile -e similar to -E, but does not generate python glue -v be more verbose -w be less verbose -V, --version show program's version number and exit -p pass any pythran optimization to apply before code generation -I include_dir any include dir relevant to the underlying C++ compiler -L ldflags any search dir relevant to the linker -D macro_definition any macro definition relevant to the underlying C++ compiler -U macro_definition any macro undef relevant to the underlying C++ compiler --config config config additional params It's a megablast! pythran-0.10.0+ds2/docs/DEVGUIDE.rst000066400000000000000000000156551416264035500166410ustar00rootroot00000000000000Developer Guide ############### Do not hang around in Pythran code base without your developer guide! It is the compass that will guide you in the code jungle! Disclaimer ---------- This document is a never ending work-in-progress draft. Please contribute! Configuration ------------- Pythran can be configured with a rc file. An example is found in `pythran/pythran.cfg`. Look at it! To customize it:: $> cp pythran/pythran.cfg ~/.pythranrc In particular, you may want to add ``-g -O0`` to the ``cxxflags``. Coding Style ------------ All Python code must be conform to the PEP 8, and the ``flake8`` command must not yield any message when run on our database. Additionally, avoid backslashes, and try to make your code as concise as possible. $> flake8 pythran/*.py pythran/*/*.py --exclude="pythran/tests/test*.py,__init__.py" C++ code use spaces (no tabs) and a tab width of 4. File Hierarchy -------------- Listing the top level directory yields the following entries: setup.py The files that describes what gets installed, that holds ``PyPI`` entries and such. docs/ If you're reading this document, you know what it's all about! ``MANUAL`` is the user documentation and ``DEVGUIDE`` is the developer documentation. Use ``make`` from this directory to produce the static website. LICENSE Boring but important stuff. MANIFEST.in Describe additional stuff to package there. README.rst Quick introduction and description of _pythran_. pythran/ The source of all things. pythran/tests/ The source of all issues. pythran/pythonic/ Where C++ back-end lies. Validation ---------- ``pythran`` uses the ``unittest`` module and the `pytest `_ package to manage test cases. The whole validation suite is run through the command:: $> python -m pytest pythran/tests To run it faster we use the ``pytest`` extension `xdist `_, the test suite will run using all available cores. Otherwise it might run **very** slowly, something like four hours on a decent laptop :'(. Note that it is possible to use the ``pytest`` module to pass a subset of the test suite:: $> pytest -n 8 pythran/tests/test_list.py runs all the tests found in ``pythran/tests/test_list.py``. Only compiler tests can be check using test filtering:: $> pytest -n 8 pythran/tests -m "not module" There are two kinds of tests in ``pythran``: 1. unit tests that test a specific feature of the implementation. Such tests are listed as method of a class deriving from ``test_env.TestEnv`` and must call the ``run_test(function_to_translate, *effective_parameters, **name_to_signature)`` method [1]_. It translates ``function_to_translate`` into a native function using the type annotations given in the ``name_to_signature`` dictionary, runs both the python and the native version with ``effective_parameters`` as arguments and asserts the results are the same. .. [1] See examples in ``pythran/tests/test_base.py`` for more details. 2. test cases that are just plain python modules to be converted in native module by ``pythran``. It is used to test complex situations, codes or benchmarks found on the web etc. They are just translated, not run. These test cases lie in ``pythran/tests/cases/`` and are listed in ``pythran/tests/test_cases.py``. C++ runtime ----------- The C++ code generated by ``pythran`` relies on a specific back-end, ``pythonic``. It is a set of headers that mimics Python's intrinsics and collections behavior in C++. It lies in ``pythran/pythonic/``. There is one directory per module, e.g. ``pythran/pythonic/numpy`` for the ``numpy`` module, and one file per function, e.g. ``pythran/pythonic/numpy/ones.hpp`` for the ``numpy.ones`` function. Type definitions are stored in the seperate ``pythran/pythonic/types`` directory, one header per type. Each function header must be ``#includ``-able independently, i.e. it itself includes all the type and function definition it needs. This helps keeping compilation time low. All Pythran functions and types live in the ``pythonic`` namespace. Each extra module defines a new namespace, like ``pythonic::math`` or ``pythonic::random``, and each type is defined in the ``pythonic::types`` namespace. The ``DECLARE_FUNCTOR`` and ``DEFINE_FUNCTOR`` macros from ``pythonic/utils/functor.hpp`` is commonly used to convert functions into functors and put them into the mandatory ``functor`` namespace. The pythonic runtime can be used without Python support, so it is important to protect all Python-specific stuff inside ``ENABLE_PYTHON_MODULE`` guard. All methods are represented by functions in Pythran. The associated pseudo-modules are prefixed and suffixed by a double underscore ``__``, as in ``pythran/pythonic/__list__``. Benchmarking and Testing ------------------------ Stand-alone algorithms are put into ``pythran/tests/cases``. They must be valid Pythran input (including spec annotations). To be taken into account by the validation suite, they must be listed in ``pythran/tests/test_cases.py``. To be taken into account by the benchmarking suite, they must have a line starting with the ``#runas`` directive. Check ``pythran/tests/matmul.py`` for a complete example. To run the benchmark suite, one can rely on:: $> python setup.py bench --mode= where ** is one among: python Uses the interpreter used to run ``setup.py``. pythran Uses the Pythran compiler. pythran+omp Uses the Pythran compiler in OpenMP mode. All measurements are made using the ``timeit`` module. The number of iterations is customizable through the ``--nb-iter`` switch. How to ------ :Add support for a new module: 1. Provide its C++ implementation in ``pythran/pythonic++/``. ``pythran/pythonic++/math/*.hpp`` and ``pythran/pythonic++/__list__/*.hpp`` are good example to referer to. 2. Provide its description in ``pythran/tables.py``. Each function, method or variable must be listed there with the appropriate description. 3. Provide its test suite in ``pythran/tests/`` under the name ``test_my_module.py``. One test case per function, method or variable is great. :Add a new analysis: 1. Subclass one of ``ModuleAnalysis``, ``FunctionAnalysis`` or ``NodeAnalysis``. 2. List analysis required by yours in the parent constructor, they will be built automatically and stored in the attribute with the corresponding uncameled name. 3. Write your analysis as a regular ``ast.NodeVisitor``. The analysis result must be stored in ``self.result``. 4. Use it either from another pass's constructor, or through the ``passmanager.gather`` function. :Push changes into the holy trunk: 1. Use the ``github`` interface and the pull/push requests features 2. Make your dev available on the web and asks for a merge on the IRC channel ``#pythran`` on ``irc.oftc.net`` (via your browser: https://webchat.oftc.net) pythran-0.10.0+ds2/docs/EXAMPLES.rst000066400000000000000000000001521416264035500166450ustar00rootroot00000000000000Examples ======== .. toctree:: examples/Distutils Sample Project examples/Third Party Libraries pythran-0.10.0+ds2/docs/INTERNAL.rst000066400000000000000000000204141416264035500166460ustar00rootroot00000000000000Internals ######### This document describes some internals of Pythran compiler. Pythran pass management is used throughout the document:: >>> from pythran import passmanager, analyses, optimizations, backend >>> pm = passmanager.PassManager('dummy') To retrieve the code source from a function definition, the ``inspect`` module is used:: >>> from inspect import getsource And to turn source code into an AST(Abstract Syntax tree), Python provides the ``ast`` module:: >>> import gast as ast >>> getast = lambda f: ast.parse(getsource(f)) Scoping ------- There are only two scopes in Python: ``globals()`` and ``locals()``. When generating C++ code, Pythran tries its best not to declare variables at the function level, but using the deepest scope. This provides two benefits: 1. It makes writing OpenMP clauses easier, as local variables are automatically marked as private; 2. It avoids to build variables with the empty constructor then assigning them a value. Let's illustrate this with two simple examples. In the following function, variable ``a`` has to be declared outside of the ``if`` statement:: >>> def foo(n): ... if n: ... a = 1 ... else: ... a = 2 ... return n*a When computing variable scope, one gets a dictionary binding nodes to variable names:: >>> foo_tree = getast(foo) >>> scopes = pm.gather(analyses.Scope, foo_tree) ``n`` is a formal parameter, so it has function scope:: >>> sorted(scopes[foo_tree.body[0]]) ['a', 'n'] ``a`` is used at the function scope (in the ``return`` statement), so even if it's declared in an ``if`` it has function scpe too. Now let's see what happen if we add a loop to the function:: >>> def foo(n): ... s = 0 ... for i in __builtin__.range(n): ... if i: ... a = 1 ... else: ... a = 2 ... s *= a ... return s >>> foo_tree = getast(foo) >>> scopes = pm.gather(analyses.Scope, foo_tree) Variable ``a`` is only used in the loop body, so one can declare it inside the loop:: >>> scopes[tuple(foo_tree.body[0].body[1].body)] {'a'} In a similar manner, the iteration variable ``i`` gets a new value at each iteration step, and is declared at the loop level. OpenMP directives interacts a lot with scoping. In C or C++, variables declared inside a parallel region are automatically marked as private. Pythran emulates this whenever possible:: >>> def foo(n): ... s = 0 ... "omp parallel for reduction(*:s)" ... for i in __builtin__.range(n): ... if i: ... a = 1 ... else: ... a = 2 ... s += a ... return s Without scoping directive, both ``i`` and ``a`` are private:: >>> foo_tree = getast(foo) >>> scopes = pm.gather(analyses.Scope, foo_tree) >>> scopes[foo_tree.body[0].body[2]] {'i'} >>> scopes[tuple(foo_tree.body[0].body[2].body)] {'a'} But if one adds a ``lastprivate`` clause, as in:: >>> def foo(n): ... s = 0 ... a = 0 ... "omp parallel for reduction(*:s) lastprivate(a)" ... for i in __builtin__.range(n): ... if i: ... a = 1 ... else: ... a = 2 ... s += a ... return s, a >>> foo_tree = getast(foo) The scope information change. Pythran first needs to understand OpenMP directives, using a dedicated pass:: >>> from pythran import openmp >>> _ = pm.apply(openmp.GatherOMPData, foo_tree) Then let's have a look to :: >>> scopes = pm.gather(analyses.Scope, foo_tree) >>> list(scopes[foo_tree.body[0].body[2]]) # 3nd element: omp got parsed ['i'] >>> list(scopes[foo_tree.body[0]]) ['n'] >>> list(scopes[foo_tree.body[0].body[0]]) ['s'] >>> list(scopes[foo_tree.body[0].body[1]]) ['a'] ``a`` now has function body scope, which keeps the OpenMP directive legal. When the scope can be attached to an assignment, Pythran uses this piece of information:: >>> def foo(n): ... s = 0 ... "omp parallel for reduction(*:s)" ... for i in __builtin__.range(n): ... a = 2 ... s *= a ... return s >>> foo_tree = getast(foo) >>> _ = pm.apply(openmp.GatherOMPData, foo_tree) >>> scopes = pm.gather(analyses.Scope, foo_tree) >>> scopes[foo_tree.body[0].body[1].body[0]] == set(['a']) True Additionally, some OpenMP directives, when applied to a single statement, are treated by Pythran as if they created a bloc, emulated by a dummy conditional:: >>> def foo(n): ... "omp parallel" ... "omp single" ... s = 1 ... return s >>> foo_tree = getast(foo) >>> _ = pm.apply(openmp.GatherOMPData, foo_tree) >>> print(pm.dump(backend.Python, foo_tree)) def foo(n): 'omp parallel' 'omp single' if 1: s = 1 return s However the additional if bloc makes it clear that ``s`` should have function scope, and the scope is not attached to the first assignment:: >>> scopes = pm.gather(analyses.Scope, foo_tree) >>> scopes[foo_tree.body[0]] == set(['s']) True Lazyness -------- ``Expressions templates`` used by numpy internal representation enable laziness computation. It means that operations will be computed only during assignation to avoid intermediate array allocation and improve data locality. Laziness analysis enable Expression template even if there is multiple assignment in some case. Let's go for some examples. In ``foo``, no intermediate array are create for ``+`` and ``*`` operations and for each elements, two operations are apply at once instead of one by one:: >>> def foo(array): ... return array * 5 + 3 It also apply for other unary operations with numpy array. In this example, laziness doesn't change anything as is it a typical case for Expression templates but peoples may write:: >>> def foo(array): ... a = array * 5 ... return a + 3 Result is the same but there is a temporary array. This case is detected as lazy and instead of saving the result of ``array * 5`` in ``a``, we save an Expression template type ``numpy_expr`` instead of an evaluated ``ndarray``. Now, have a look at the lazyness analysis's result:: >>> foo_tree = getast(foo) >>> lazyness = pm.gather(analyses.LazynessAnalysis, foo_tree) ``array`` is a parameter so even if we count use, it can't be lazy:: >>> lazyness['a'] 1 It returns the number of use of a variable. Special case is for intermediate use:: >>> def foo(array): ... a = array * 2 ... b = a + 2 ... a = array * 5 ... return a, b In this case, ``b`` is only use once BUT ``b`` depend on ``a`` and ``a`` change before the use of ``b``. In this case, ``b`` can't be lazy so its values is ``inf``:: >>> foo_tree = getast(foo) >>> lazyness = pm.gather(analyses.LazynessAnalysis, foo_tree) >>> sorted(lazyness.items()) [('a', 1), ('array', 2), ('b', inf)] We can notice that a reassignment reinitializes its value so even if ``a`` is used twice, its counters returns ``1``. ``inf`` also happen in case of subscript use as we need to compute the value to subscript on it. Updated values can't be lazy too and variables used in loops too. Laziness also cares about aliased values:: >>> def foo(array): ... a = array * 2 ... b = a ... a_ = b * 5 ... return a_ >>> foo_tree = getast(foo) >>> lazyness = pm.gather(analyses.LazynessAnalysis, foo_tree) >>> sorted(lazyness.items()) [('a', 1), ('a_', 1), ('array', 1), ('b', 1)] Doc Strings ----------- Pythran preserves docstrings:: $> printf '#pythran export foo()\n\"top-level-docstring\"\n\ndef foo():\n \"function-level-docstring\"\n return 2' > docstrings.py $> pythran docstrings.py $> python -c 'import docstrings; print(docstrings.__doc__); print(docstrings.foo.__doc__)' top-level-docstring function-level-docstring Supported prototypes: - foo() $> rm -f docstrings.* PyPy3 support ------------- Pythran has been said to work well with PyPy3.6 v7.2.0. However, this setup is not yet tested on Travis so compilation failure may happen. Report them! pythran-0.10.0+ds2/docs/MANUAL.rst000066400000000000000000000534721416264035500164210ustar00rootroot00000000000000User Manual ########### So you want to write algorithms that are easy to maintain as in Python and you want performance as in FORTRAN or C++? Lets give a try to Pythran! Pythran is a Python-to-c++ translator that turns Python modules into native c++11 modules. From a user point of view, you still ``import`` your module, but under the hood... There is much more happening! Disclaimer ---------- Pythran is *not* a full Python-to-c++ converter, as is *shedskin*. Instead it takes a subset of the Python language and turns it into heavily templatized c++ code instantiated for your particular types. Say hello to: - polymorphic functions (!) - lambdas - list comprehension - map, reduce and the like - dictionary, set, list - exceptions - file handling - (partial) `numpy` support Say bye bye to: - classes - polymorphic variables [ but not all of them :-) ] In a nutshell, Pythran makes it possible to write numerical algorithms in Python and to have them run faster. Nuff said. Prerequisite ------------ Pythran depends on the following packages: .. include:: ../requirements.txt :literal: You also need a modern C++11 enabled compiler (e.g. g++>=5, clang>=3.5), that supports atomic operations (N3290) and variadic template (N2555). Installation from Sources ------------------------- The prefered way to install Pythran is using ``pip install pythran`` or ``conda install pythran``. Yet if you want to install from sources, here is the procedure. First get the sources:: $> git clone https://github.com/serge-sans-paille/pythran From the source directory, run:: $> pip install . ``pythran`` should now be on your ``PATH``. If not, it's possible ``pip`` installed to ``.local`` (this happens if the default ``site-packages`` location requires elevated permissions) - fix this by setting your path to:: $> export PATH=$PATH:$HOME/.local/bin It makes the ``pythran`` command available to you. Making Sure Everything is working --------------------------------- The ``setup.py`` scripts automates this. The ``test`` target, as in:: $> python setup.py test runs a whole (and long) validation suite (you will need to install the ``pytest`` module first to use it). If these tests fail, you are likely missing some of the requirements. You can set site specific flags in your ``~/.pythranrc``, read the doc a bit further! First Steps ----------- To begin with, you need... a Python function in a module. Something like:: <> def dprod(arr0, arr1): return sum([x*y for x,y in zip(arr0, arr1)]) is perfect. But due to ``\_o<`` typing, ``arr0`` and ``arr1`` can be of any type, so Pythran needs a small hint there. Add the following line somewhere in your file, say at the top head, or right before the function definition:: #pythran export dprod(int list, int list) This basically tells Pythran the type of the forthcoming arguments. Afterwards, frenetically type:: $> pythran dprod.py ``\o/`` a ``dprod.so`` native module has been created and you can play with it right *now*, as if it where a normal module:: >>> import dprod # this imports the native version if available >>> dprod.dprod([1,2], [3,4]) 11 The speedup will not be terrific because of the conversion cost from Python to C++. So let's try again with a well-known example. Let me introduce the almighty *matrix multiply*!:: <> def zero(n,m): return [[0]*n for col in range(m)] def matrix_multiply(m0, m1): new_matrix = zero(len(m0),len(m1[0])) for i in range(len(m0)): for j in range(len(m1[0])): for k in range(len(m1)): new_matrix[i][j] += m0[i][k]*m1[k][j] return new_matrix This is a slightly more complex example, as a few intrinsics such as ``range`` and ``len`` are used, with a function call and even nested list comprehension. But Pythran can make its way through this. As you only want to export the ``matrix_multiply`` function, you can safely ignore the ``zero`` function and just add:: #pythran export matrix_multiply(float list list, float list list) to the source file. Note how Pythran can combine different types and infer the resulting type. It also respects the nested list structure of Python, so you are not limited to matrices... Enough talk, run:: $> pythran mm.py One touch of magic wand and you have your native binary. Be amazed by the generation of a ``mm.so`` native module that runs around 20x faster than the original one. ``timeit`` approved! But scientific computing in Python usually means Numpy. Here is a well-known Numpy snippet:: <> import numpy as np def arc_distance(theta_1, phi_1, theta_2, phi_2): """ Calculates the pairwise arc distance between all points in vector a and b. """ temp = (np.sin((theta_2-theta_1)/2)**2 + np.cos(theta_1)*np.cos(theta_2) * np.sin((phi_2-phi_1)/2)**2) distance_matrix = 2 * np.arctan2(np.sqrt(temp), np.sqrt(1-temp)) return distance_matrix This example uses a lot of Numpy `ufunc`. Pythran is reasonably good at handling such expressions. As you already know, you need to **export** it, giving its argument types by adding:: #pythran export arc_distance(float[], float[], float[], float[]) To the input file. You can compile it as the previous code:: $> pythran arc_distance.py and you'll get a decent binary. But what you really want to do is:: $> pythran -DUSE_XSIMD -fopenmp -march=native arc_distance.py which basically tells the compiler to parallelize and vectorize loops using whatever hardware available on your machine. Then you'll get **really** fast code! Concerning Pythran specifications --------------------------------- The ``#pythran export`` commands are critical to Pythran. In fact if they are missing, Pythran will complain loudly (and fail miserably). So let us dive into this complex language! There is currently only one Pythran command, the ``export`` command. Its syntax is:: #pythran export function_name(argument_type* [, argument_type ? *]) where ``function_name`` is the name of a function defined in the module, and ``argument_type*`` is a comma separated list of argument types, composed of any combination of basic types and constructed types. ``argument_type ? *`` is a comma separated list of optional argument types, similar to ``argument_type`` but followed by a ``?``. What is an ``argument_type``? Anything that looks like a Python basic type! Constructed types are either tuples, introduced by parenthesis, like ``(int, (float, str))`` or lists (resp. set), introduced by the ``list`` (resp. ``set``) keyword:: argument_type = basic_type | (argument_type+) # this is a tuple | argument_type list # this is a list | argument_type set # this is a set | argument_type []+ # this is a ndarray, C-style | argument_type [::]+ # this is a strided ndarray | argument_type [:,...,:]+ # this is a ndarray, Cython style | argument_type [:,...,3]+ # this is a ndarray, some dimension fixed | argument_type:argument_type dict # this is a dictionary basic_type = bool | byte | int | float | str | None | slice | uint8 | uint16 | uint32 | uint64 | uintp | int8 | int16 | int32 | int64 | intp | float32 | float64 | float128 | complex64 | complex128 | complex256 .. note:: When using a 2D array, overloads of the function involved are created to accept both C-style and Fortran-style arrays. To avoid generating too many functions, one can force the memory layout using ``order(C)`` or ``order(F)`` after the array decalaration, as in ``int[:,:] order(C)``. The same syntax can be used to export global variable (in read only mode):: #pythran export var_name In a similar manner to the Python import statement, it's possible to chain the export, as in:: #pythran export var_name0, var_name1, function_name(argument_type0) Multiple overloads can be specified for the same Python function:: #pythran export function_name(argument_type0) #pythran export function_name(argument_type1) In the case of function with default parameters, you can either omit the parameter, and in that case it uses the default one, or explicitly state it's argument type:: #pythran export function_name() #pythran export function_name(argument_type0) #pythran export function_name(argument_type0, argument_type1) def function_name(a0=1, a1=True): pass When specifying multiple overloads, instead of listing them, you can use the ``or`` operator to list the alternatives, as in:: #pythran export function_name(type0 or type1, type2, type3 or type4) which is exactly equivalent to:: #pythran export function_name(type0, type2, type3) #pythran export function_name(type0, type2, type4) #pythran export function_name(type1, type2, type3) #pythran export function_name(type1, type2, type4) Easy enough, isn't it? .. note:: Multiline exports are supported, just use comments to protect them, as in:: #pythran export river_boa(int, # float, # bool) .. note:: It is in fact possible to analyse a code without specifications, but you cannot go further that generic (a.k.a. heavily templated) c++ code. Use the ``-e`` switch! .pythran files ************** Instead of writing the export lines in the ``.py`` file, it is possible to write them, **without the #pythran** prefix, inside a file that has the same path has the ``.py`` file, but with the ``.pythran`` extension. For instance, file ``I_love.py`` can have its export lines in the ``I_love.pythran`` file, using the syntax:: export function_name(argument_type*) Limitations *********** Pythran tries hard to produce code that has the same observable behavior as the original Python code. Unfortunately it's not always possible: - Pythran does not support heterogeneous containers (except tuples). - There is no BigInt support. All integer operations are performed on ``long`` (probably ``int64_t`` on a 64 bit machine.) - In most cases (with the notable exception of ``numpy.ndarray``), Pythran is working on a deep copy of the original Python arguments. This copy shares no memory relationship with the original object, which means that modifying the argument content in Pythran won't modify the original argument content. Likewise, objects generated by Pythran cannot share reference (in the sense of ``is``) with one of the input argument. Of curse, this limitation doesn't apply to non exported functions. GIL Interaction --------------- As Pythran translates the Python code in native code that only depends on ``libpython`` for data translation, it can release the GIL during the actual function run. And that's what it does :-) Put an another way, you can rip some speedup at the Python level just by spawning multiple ``threading.Thread``. IPython Integration ------------------- The magic function ``%%pythran`` is made available to ``ipython`` users through an extension. The extension is located in the ``extensions/`` directory and can be loaded using IPython's magic function:: %load_ext pythran.magic Once done, you can pythranize your code from the IPython shell:: %%pythran #pythran export foo() def foo(): print 'hello' You can pass arguments to this magic, as in:: %%pythran(-O2 -fopenmp) #pythran export foo() def foo(): print 'hello' Distutils Integration --------------------- When distributing a Python application with Pythran modules, you can either: * declare the module as a regular Python module. After all, they are 100% Python compatible. * declare them as a ``PythranExtension`` and Pythran will compile them:: from distutils.core import setup # These two lines are required to be able to use pythran in the setup.py import setuptools setuptools.dist.Distribution(dict(setup_requires='pythran')) from pythran.dist import PythranExtension, PythranBuildExt setup(..., ext_modules=[PythranExtension("mymodule", ["mymodule.py"])], cmdclass={"build_ext": PythranBuildExt}) ``PythranBuildExt`` is optional, but necessary to build extensions with different C++ compilers. It derives from distuil's ``build_ext`` by default, but you can change its base class by using ``PythranBuildExt[base_cls]`` instead. * all configuration options supported in ``.pythranrc`` can also be passed through the optional ``config`` argument, in the form of a list, e.g. ``config=['compiler.blas=openblas']`` .. note:: There's no strong compatibility guarantee between Pythran version at C++ level. As a consequence, a code distrubuted under pythran version 0.x should depend on that exact version, as version 0.y may introduce some changes. This behavior is likely to change with revisions >= 1. Capsule Corp ------------ Instead of creating functions that can be used from Python code, Pythran can produce native functions to be used by other Python extension, using the ``Capsule`` mechanism. To do so, just add the ``capsule`` keyword to the export line:: #pythran export capsule foo(double*, double) Note that pointer types are only supported within the context of a capsule, as they don't match any real Python type. **Any** Pythran type is valid as capsule parameter, but beware that non scalar or pointer types only make sense withing the Pythran context. Debug Mode ---------- Pythran honors the ``NDEBUG`` macro. If set through ``-DNDEBUG`` (which should be the default, check ``python-config --cflags``), it disables all ``assert statement`` and doesn't perform any runtime check for indexing bounds etc. However, if unset through ``-UNDEBUG``, all ``assert`` are executed and eventually raise an ``AssertionError``. Additionnaly, many internal checks are done and may fail with a C-ish assertion. Thread safety ------------- By default Pythran does not generate thread-safe code for non-OpenMP code: reference counting for automatic deletion of objects is not done atomically by default. It's still possible to force pythran to generate thread-safe reference counting by defining the flag ``THREAD_SAFE_REF_COUNT`` via ``-DTHREAD_SAFE_REF_COUNT``. There is a small performance penalty associated with this. Advanced Usage -------------- One can use ``-o `` or ``--output=`` to control the name of the generated file. If ```` contains the ``%{ext}`` pattern, it is replaced by the extension that matches your current platform. A failing compilation? A lust for c++ tangled code? Give a try to the ``-E`` switch that stops the compilation process right after c++ code generation, so that you can inspect it. Want more performance? Big fan of ``-Ofast -march=native``? Pythran _automagically_ forwards these switches to the underlying compiler! Tired of typing the same compiler switches again and again? Store them in ``$XDG_CONFIG_HOME/.pythranrc``! Wants to try your own compiler? Update the ``CC`` and ``CXX`` fields from your ``pythranrc``, or set the same environment variables to the right compilers. Environment variables have greater precedence than configuration file. Pythran also honors the ``CXXFLAGS`` and ``LDFLAGS`` environment variables. The careful reader might have noticed the ``-p`` flag from the command line. It makes it possible to define your own optimization sequence:: pythran -pConstantFolding -pmy_package.MyOptimization runs the ``ConstantFolding`` optimization from ``pythran.optimizations`` followed by a custom optimization found in the ``my_package`` package, loaded from ``PYTHONPATH``. When importing a Python module, one can check for the presence of the ``__pythran__`` variable at the module scope to see if the module has been pythranized:: import foo if hasattr(foo, '__pythran__'): print(r'\_o<') This variable is a tuple that holds three fields: 1. pythran's version 2. compilation date 3. sha256 value of the input code Adding OpenMP directives ------------------------ OpenMP is a standard set of directives for C, C++ and FORTRAN that makes it easier to turn a sequential program into a multi-threaded one. Pythran translates OpenMP-like code annotation into OpenMP directives:: r=0 #omp parallel for reduction(+:r) for x,y in zip(l1,l2): r+=x*y OpenMP directive parsing is enabled by ``-fopenmp`` when using ``g++`` as the back-end compiler. Be careful with the indentation. It has to be correct! Alternatively, one can run the great:: $> pythran -ppythran.analyses.ParallelMaps -e as.py which runs a code analyzer that displays extra information concerning parallel ``map`` found in the code. Getting Pure C++ ---------------- Pythran can be used to generate raw templated C++ code, without any Python glue. To do so use the ``-e`` switch. It will turn the Python code into C++ code you can call from a C++ program. In that case there is **no** need for a particular Pythran specification. Read the optimized Python code ------------------------------ Curious Python developers might want to study how Pythran transforms their codes. With the ``-P`` switch, Pythran optimizes the Python code, prints the result and stops there. Pythran does not care about PEP 8, so a Python formatter is often useful:: $> pythran -P arc_distance.py | yapf Customizing Your ``.pythranrc`` ------------------------------- Pythran checks for a file named ``.pythranrc`` and use it to *replace* the site configuration. Here are a few tricks! You can change the default location of the pythran configuration file using the environment variable ``PYTHRANRC``:: PYTHRANRC=/opt/company/pythran/config.pythranrc pythran arc_distance.py All the options in the ``.pythranrc`` file can be specified when running pythran by using the command line argument --config= . For example:: pythran --config compiler.blas=pythran-openblas this_file.py would specify that pythran-openblas is the blas library to use. Options specified using command-line arguments override the options found in the ``.pythranrc`` file ``[compiler]`` ************** This section contains compiler flags configuration. For education purpose, the default linux configuration is .. literalinclude:: ../pythran/pythran-linux2.cfg :``CC``: Path to the C compiler to use :``CXX``: Path to the C++ compiler to use :``defines``: Preprocessor definitions. Pythran is sensible to ``USE_XSIMD`` and ``PYTHRAN_OPENMP_MIN_ITERATION_COUNT``. The former turns on `xsimd `_ vectorization and the latter controls the minimal loop trip count to turn a sequential loop into a parallel loop. :``undefs``: Some preprocessor definitions to remove. :``include_dirs``: Additional include directories to search for headers. :``cflags``: Additional random compiler flags (``-f``, ``-O``). Optimization flags generally go there. The default is to set ``-std=c++11`` for C++11 support. :``libs``: Libraries to use during the link process. A typical extension is to add ``tcmalloc_minimal`` to use the allocator from https://code.google.com/p/gperftools/. :``library_dirs``: Extra directories to search for required libraries. :``ldflags``: Additional random linker flags. :``blas``: BLAS library to use. ``none``, ``pythran-openblas``, ``blas``, ``openblas``, ``atlas`` or ``mkl`` are viable choices. ``none`` prevents from linking with blas. ``pythran-openblas`` requires the `pythran-openblas `_ package, which provides a statically linked version of `OpenBLAS `_. Other options are system dependant. Depending on your setup, you *may* need to update ``include_dirs`` to point to the location of the BLAS headers, e.g. ``/usr/include/openblas``. :``ignoreflags``: Space-separated list of compiler flags that should not be forwarded to the pythran backend compiler when inherited, for instance, from ``python-config``. For instance ``-Wstrict-prototypes`` is a C-only option that should be pruned. ``[pythran]`` ************* This one contains internal configuration settings. Play with it at your own risk! :``optimizations``: A list of import paths pointing to transformation classes. This contains the optimization pipeline of Pythran! If you design your own optimizations, register them here! :``complex_hook``: Set this to ``True`` for faster and still Numpy-compliant complex multiplications. Not very portable, but generally works on Linux. ``[typing]`` ************ Another internal setting stuff. This controls the accuracy of the typing phase. An extract from the default setting file should convince you not to touch it:: [typing] # maximum number of container access taken into account during type inference # increasing this value inreases typing accuracy # but slows down compilation time, to the point of making g++ crash max_container_type = 2 # maximum number of combiner per user function # increasing this value inreases typing accuracy # but slows down compilation time, to the point of making g++ crash max_combiner = 2 # set this to true to enable a costly yet more accurate type inference algorithm # This algorithms generates code difficult to compile for g++, but not clang++ enable_two_steps_typing = False F.A.Q. ------ 1. Supported compiler versions: - `g++` version 4.9 and above - `clang++` version 3.5 and above Troubleshooting --------------- Plenty of them! Seriously, Pythran is software, so it will crash. You may make it abort in unusual ways! And more importantly, please provide feedback to serge_sans_paille using its email ``serge.guelton@telecom-bretagne.eu``, the IRC channel ``#pythran`` on OFTC, or the mailing list ``pythran@freelists.org`` **glhf!** pythran-0.10.0+ds2/docs/Makefile000066400000000000000000000003601416264035500163360ustar00rootroot00000000000000 SPHINXBUILD = sphinx-build BUILDDIR = _build ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees . html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished.\nfile://"$(shell pwd)"/_build/html/index.html" pythran-0.10.0+ds2/docs/TUTORIAL.rst000066400000000000000000000256411416264035500167040ustar00rootroot00000000000000Developer Tutorial ################## This is a long tutorial to help new Pythran developer discover the Pythran architecture. This is *not* a developer documentation, but it aims at giving a good overview of Pythran capacity. It requires you are comfortable with Python, and eventually with C++11. It also assumes you have some compilation background, i.e. you know what an AST is and you don't try to escape when hearing the words alias analysis, memory effect computations and such. Parsing Python Code ------------------- Python ships a standard module, ``ast`` to turn Python code into an AST. For instance:: >>> import gast as ast >>> from __future__ import print_function >>> code = "a=1" >>> tree = ast.parse(code) # turn the code into an AST >>> print(ast.dump(tree)) # view it as a string Module(body=[Assign(targets=[Name(id='a', ctx=Store(), annotation=None, type_comment=None)], value=Constant(value=1, kind=None), type_comment=None)], type_ignores=[]) Deciphering the above line, one learns that the single assignment is parsed as a module containing a single statement, which is an assignment to a single target, a ``ast.Name`` with the identifier ``a``, of the literal value ``1``. Eventually, one needs to parse more complex codes, and things get a bit more cryptic, but you get the idea:: >>> fib_src = """ ... def fib(n): ... return n if n< 2 else fib(n-1) + fib(n-2)""" >>> tree = ast.parse(fib_src) >>> print(ast.dump(tree)) Module(body=[FunctionDef(name='fib', args=arguments(args=[Name(id='n', ctx=Param(), annotation=None, type_comment=None)], posonlyargs=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[]), body=[Return(value=IfExp(test=Compare(left=Name(id='n', ctx=Load(), annotation=None, type_comment=None), ops=[Lt()], comparators=[Constant(value=2, kind=None)]), body=Name(id='n', ctx=Load(), annotation=None, type_comment=None), orelse=BinOp(left=Call(func=Name(id='fib', ctx=Load(), annotation=None, type_comment=None), args=[BinOp(left=Name(id='n', ctx=Load(), annotation=None, type_comment=None), op=Sub(), right=Constant(value=1, kind=None))], keywords=[]), op=Add(), right=Call(func=Name(id='fib', ctx=Load(), annotation=None, type_comment=None), args=[BinOp(left=Name(id='n', ctx=Load(), annotation=None, type_comment=None), op=Sub(), right=Constant(value=2, kind=None))], keywords=[]))))], decorator_list=[], returns=None, type_comment=None)], type_ignores=[]) The idea remains the same. The whole Python syntax is described in http://docs.python.org/2/library/ast.html and is worth a glance, otherwise you'll be in serious trouble understanding the following. Pythran Pass Manager -------------------- A pass is a code transformation, i.e. a function that turns an AST node into a new AST node with refined behavior. As a compiler infrastructure, Pythran proposes a pass manager that (guess what?) manages pass scheduling, that is the order in which pass is applied to achieve the ultimate goal, world domination. Oooops, efficient C++11 code generation. One first need to instantiate a pass manager with a module name:: >>> from pythran import passmanager >>> pm = passmanager.PassManager("tutorial_module") The pass manager has 3 methods and two attributes:: >>> [x for x in dir(pm) if not x.startswith('_')] ['apply', 'dump', 'gather', 'module_dir', 'module_name'] ``apply`` applies a code transformation ``dump`` dumps a node using a dedicated backend ``gather`` gathers information about the node Pythran Backends ---------------- Pythran currently has two backends. The main one is used to dump Pythran AST (a subset of Python AST) into a C++ AST:: >>> from pythran import backend >>> cxx = pm.dump(backend.Cxx, tree) >>> str(cxx) '#include \n#include \n#include \n#include \n#include \n#include \nnamespace __pythran_tutorial_module\n{\n struct fib\n {\n typedef void callable;\n typedef void pure;\n template \n struct type\n {\n typedef typename pythonic::returnable::type>::type>::type result_type;\n } \n ;\n template \n inline\n typename type::result_type operator()(argument_type0&& n) const\n ;\n } ;\n template \n inline\n typename fib::type::result_type fib::operator()(argument_type0&& n) const\n {\n return (((bool)pythonic::operator_::lt(n, 2L)) ? typename __combined::type(n) : typename __combined::type(pythonic::operator_::add(fib()(pythonic::operator_::sub(n, 1L)), fib()(pythonic::operator_::sub(n, 2L)))));\n }\n}' The above string is understandable by a C++11 compiler, but it quickly reaches the limit of our developer brain, so most of the time, we are more comfortable with the Python backend:: >>> py = pm.dump(backend.Python, tree) >>> print(py) def fib(n): return (n if (n < 2) else (fib((n - 1)) + fib((n - 2)))) Passes ------ There are many code transformations in Pythran. Some of them are used to lower the representation from Python AST to the simpler Pythran AST. For instance there is no tuple unpacking in Pythran, so Pythran provides an adequate transformation:: >>> from pythran import transformations >>> tree = ast.parse("def foo(): a,b = 1,3.5") >>> _ = pm.apply(transformations.NormalizeTuples, tree) # in-place >>> print(pm.dump(backend.Python, tree)) def foo(): __tuple0 = (1, 3.5) a = __tuple0[0] b = __tuple0[1] Pythran transforms the tuple unpacking into an intermediate tuple assignment. Note that if the unpacking statement is marked as critical using an OpenMP statement, then a temporary variable is used to hold the left hand side computation, if any:: >>> from pythran import transformations >>> tree = ast.parse(""" ... def foo(x): ... #omp critical ... a,b = 1, x + 1 ... return a + b""") >>> _ = pm.apply(transformations.NormalizeTuples, tree) # in-place >>> print(pm.dump(backend.Python, tree)) def foo(x): __tuple0 = (1, (x + 1)) a = __tuple0[0] b = __tuple0[1] return (a + b) There are many small passes used iteratively to produce the Pythran AST. For instance the implicit return at the end of every function is made explicit:: >>> tree = ast.parse('def foo():pass') >>> _ = pm.apply(transformations.NormalizeReturn, tree) >>> print(pm.dump(backend.Python, tree)) def foo(): pass return builtins.None More complex ones rely on introspection to implement constant folding:: >>> from __future__ import print_function >>> code = [fib_src, 'def foo(): return builtins.map(fib, [1,2,3])'] >>> fib_call = '\n'.join(code) >>> tree = ast.parse(fib_call) >>> from pythran import optimizations as optim >>> _ = pm.apply(optim.ConstantFolding, tree) >>> print(pm.dump(backend.Python, tree)) def fib(n): return (n if (n < 2) else (fib((n - 1)) + fib((n - 2)))) def foo(): return [1, 1, 2] One can also detect some common generator expression patterns to call the itertool module:: >>> norm = 'def norm(l): return builtins.sum(n*n for n in l)' >>> tree = ast.parse(norm) >>> _ = pm.apply(optim.ComprehensionPatterns, tree) >>> 'map' in pm.dump(backend.Python, tree) True Analysis -------- All Pythran passes are backed up by analysis. Pythran provides three levels of analysis:: >>> passmanager.FunctionAnalysis >>> passmanager.ModuleAnalysis >>> passmanager.NodeAnalysis Lets examine the information Pythran can extract from a Pythran-compatible Python code. A simple analyse gathers informations concerning used identifiers across the module. It can be used, for instance, to generate new unique identifiers:: >>> from pythran import analyses >>> code = 'a = b = 1' >>> tree = ast.parse(code) >>> sorted(pm.gather(analyses.Identifiers, tree)) ['a', 'b'] One can also computes the state of ``globals()``:: >>> code = 'import math\n' >>> code += 'def foo(a): b = math.cos(a) ; return [b] * 3' >>> tree = ast.parse(code) >>> sorted(list(pm.gather(analyses.Globals, tree))) ['__dispatch__', 'builtins', 'foo', 'math'] One can also compute the state of ``locals()`` at any point of the program:: >>> l = pm.gather(analyses.Locals, tree) >>> fdef = tree.body[-1] >>> freturn = fdef.body[-1] >>> sorted(l[freturn]) ['a', 'b', 'math'] The ``ConstantFolding`` pass relies on the eponymous analyse that flags all constant expressions. In the previous code, there is only two constant *expressions* but only one can be evaluate:: >>> ce = pm.gather(analyses.ConstantExpressions, tree) >>> sorted(map(ast.dump, ce)) ["Attribute(value=Name(id='math', ctx=Load(), annotation=None, type_comment=None), attr='cos', ctx=Load())", 'Constant(value=3, kind=None)'] One of the most critical analyse of Pythran is the points-to analysis. There are two flavors of this analyse, one that computes an over-set of the aliased variable, and one that computes an under set. ``Aliases`` computes an over-set:: >>> code = 'def foo(c, d): b= c or d ; return b' >>> tree = ast.parse(code) >>> al = pm.gather(analyses.Aliases, tree) >>> returned = tree.body[-1].body[-1].value >>> print(ast.dump(returned)) Name(id='b', ctx=Load(), annotation=None, type_comment=None) >>> sorted(a.id for a in al[returned]) ['c', 'd'] Pythran also implements an inter-procedural analyse to compute which arguments are updated, for instance using an augmented assign, or the ``append`` method:: >>> code = 'def foo(l,a): l+=[a]\ndef bar(g): foo(g, 1)' >>> tree = ast.parse(code) >>> ae = pm.gather(analyses.ArgumentEffects, tree) >>> foo, bar = tree.body[0], tree.body[1] >>> ae[foo] [True, False] >>> ae[bar] [True] From this analyse and the ``GlobalEffects`` analyse, one can compute the set of pure functions, i.e. functions that have no side effects:: >>> code = 'import random\ndef f():pass\ndef b(l): random.seed(0)' >>> tree = ast.parse(code) >>> pf = pm.gather(analyses.PureExpressions, tree) >>> f = tree.body[1] >>> b = tree.body[2] >>> f in pf True >>> b in pf False Pure functions are also interesting in the context of ``map``, as the application of a pure functions using a map results in a parallel ``map``:: >>> code = 'def foo(x): return x*x\n' >>> code += 'builtins.map(foo, builtins.range(100))' >>> tree = ast.parse(code) >>> pmaps = pm.gather(analyses.ParallelMaps, tree) >>> len(pmaps) 1 pythran-0.10.0+ds2/docs/conf.py000066400000000000000000000207121416264035500162000ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Pythran documentation build configuration file, created by # sphinx-quickstart on Wed Feb 19 20:57:04 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import re from pythran import __version__ with open("../README.rst") as readme: readme_body = readme.read() toc = ''' .. toctree:: :maxdepth: 1 MANUAL EXAMPLES CLI SUPPORT DEVGUIDE TUTORIAL INTERNAL LICENSE AUTHORS Changelog ''' readme_body = readme_body.replace('https://pythran.readthedocs.io', toc) with open("index.rst", "w") as index: index.write(readme_body) del readme_body with open("../LICENSE") as license: with open('LICENSE.rst', 'w') as license_rst: license_rst.write("=======\nLICENSE\n=======\n\n") license_rst.write(license.read()) with open("../Changelog") as changelog: with open('Changelog.rst', 'w') as changelog_rst: changelog_rst.write('=========\nChangelog\n=========\n\n') changelog_rst.write(changelog.read()) with open("../AUTHORS") as authors: with open('AUTHORS.rst', 'w') as authors_rst: authors_rst.write(authors.read()) def make_support(): from pythran import tables TITLE = "Supported Modules and Functions" DEPTHS = '=*-+:~#.^"`' body = [] body.append(DEPTHS[0]*len(TITLE)) body.append(TITLE) body.append(DEPTHS[0]*len(TITLE)) body.append("") def format_name(name): if name.endswith('_') and not name.startswith('_'): name = name[:-1] return name def isiterable(obj): return hasattr(obj, '__iter__') def dump_entry(entry_name, entry_value, depth): if isiterable(entry_value): body.append(entry_name) body.append(DEPTHS[depth] * len(entry_name)) body.append("") sym_entries, sub_entries = [], [] for sym in entry_value: w = sub_entries if isiterable(entry_value[sym]) else sym_entries w.append(sym) for k in sorted(sym_entries): dump_entry(format_name(k), entry_value[k], depth + 1) body.append("") for k in sorted(sub_entries): dump_entry(format_name(k), entry_value[k], depth + 1) body.append("") else: body.append(entry_name) for MODULE in sorted(tables.MODULES): if MODULE != '__dispatch__': dump_entry(format_name(MODULE), tables.MODULES[MODULE], 1) return "\n".join(body) with open('SUPPORT.rst', 'w') as support: support.write(make_support()) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['nbsphinx',] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Pythran' copyright = u'2014, Serge Guelton, Pierrick Brunet et al.' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = __version__ # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['**.ipynb_checkpoints'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. import guzzle_sphinx_theme html_theme_path = guzzle_sphinx_theme.html_theme_path() html_theme = 'guzzle_sphinx_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { # Set the name of the project to appear in the sidebar "project_nav_name": "Project Name", } # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = 'pythran.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_sidebars = {'**': ['globaltoc.html']} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. html_domain_indices = False # If false, no index is generated. html_use_index = False # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Pythrandoc' pythran-0.10.0+ds2/docs/examples/000077500000000000000000000000001416264035500165155ustar00rootroot00000000000000pythran-0.10.0+ds2/docs/examples/Distutils Sample Project.ipynb000066400000000000000000000134551416264035500243450ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Example of Pythran Usage Within a Full Project\n", "\n", "This notebook covers the creation of a simple, distutils-powered, project that ships a pythran kernel.\n", "\n", "\n", "But first some cleanup" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "!rm -rf hello setup.py && mkdir hello" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Project layout\n", "\n", "The Pythran file is really dumb.\n", "The expected layout is:\n", "\n", "```\n", "setup.py\n", "hello/\n", " +---- __init__.py\n", " +---- hello.py\n", "```" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Writing hello/hello.py\n" ] } ], "source": [ "%%file hello/hello.py\n", "\n", "#pythran export hello()\n", "\n", "def hello():\n", " \"\"\"\n", " Wave hello.\n", " \"\"\"\n", " print(\"Hello from Pythran o/\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "And so is the ``__init__.py`` file." ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Writing hello/__init__.py\n" ] } ], "source": [ "%%file hello/__init__.py\n", "\"\"\"\n", "Hello package, featuring a Pythran kernel.\n", "\"\"\"\n", "from hello import hello" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The ``setup.py`` file contains the classical metadata, plus a special header. this header basically states *if pythran is available, use it, otherwise fallback to the python file*." ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Writing setup.py\n" ] } ], "source": [ "%%file setup.py\n", "from distutils.core import setup\n", "\n", "try:\n", " from pythran.dist import PythranExtension, PythranBuildExt\n", " setup_args = {\n", " 'cmdclass': {\"build_ext\": PythranBuildExt},\n", " 'ext_modules': [PythranExtension('hello.hello', sources = ['hello/hello.py'])],\n", " }\n", "except ImportError:\n", " print(\"Not building Pythran extension\")\n", " setup_args = {}\n", " \n", "setup(name = 'hello',\n", " version = '1.0',\n", " description = 'Yet another demo package',\n", " packages = ['hello'],\n", " **setup_args)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Running ``setup.py``\n", "\n", "With the described configuration, the normal ``python setup.py`` targets should « just work ».\n", "\n", "If pythran is in the path, it is used to generate the alternative c++ extension when building a source release. Note the ``hello.cpp``!" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "hello/hello.cpp\n", "hello/hello.py\n" ] } ], "source": [ "%%sh\n", "rm -rf build dist\n", "python setup.py sdist 2>/dev/null 1>/dev/null\n", "tar tf dist/hello-1.0.tar.gz | grep -E 'hello/hello.(py|cpp)' -o | sort" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "But if pythran is no longer in the ``PYTHONPATH``, the installation does not fail: the regular Python source can still be used." ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "hello/hello.py\n" ] } ], "source": [ "%%sh\n", "rm -rf build dist\n", "PYTHONPATH= python setup.py sdist 2>/dev/null 1>/dev/null\n", "tar tf dist/hello-1.0.tar.gz | grep -E 'hello/hello.py' -o" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "In case of binary distribution, the native module is generated alongside the original source." ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "hello/hello.py\n" ] } ], "source": [ "%%sh\n", "rm -rf build dist\n", "python setup.py bdist 2>/dev/null 1>/dev/null\n", "tar tf dist/hello-1.0.linux-x86_64.tar.gz | grep -E 'hello/hello.(py|cpp)' -o" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "And if pythran is not in the ``PYTHONPATH``, this still work ``\\o/``" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "hello/hello.py\n" ] } ], "source": [ "%%sh\n", "rm -rf build dist\n", "PYTHONPATH= python setup.py bdist 2>/dev/null 1>/dev/null\n", "tar tf dist/hello-1.0.linux-x86_64.tar.gz | grep -E 'hello/hello.py' -o" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.3" } }, "nbformat": 4, "nbformat_minor": 2 } pythran-0.10.0+ds2/docs/examples/Third Party Libraries.ipynb000066400000000000000000000215421416264035500236130ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Using third-party Native Libraries\n", "\n", "Sometimes, the functionnality you need are onmy available in third-party native libraries. There's still an opportunity to use them from within Pythran, using Pythran support for capsules. " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Pythran Code\n", "\n", "The pythran code requires function pointers to the third-party functions, passed as parameters to your pythran routine, as in the following:" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import pythran\n", "%load_ext pythran.magic" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "%%pythran \n", "#pythran export pythran_cbrt(float64(float64), float64)\n", "\n", "def pythran_cbrt(libm_cbrt, val):\n", " return libm_cbrt(val)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "In that case ``libm_cbrt`` is expected to be a capsule containing the function pointer to ``libm``'s ``cbrt`` (cube root) function.\n", "\n", "This capsule can be created using ``ctypes``:" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "import ctypes\n", "\n", "# capsulefactory\n", "PyCapsule_New = ctypes.pythonapi.PyCapsule_New\n", "PyCapsule_New.restype = ctypes.py_object\n", "PyCapsule_New.argtypes = ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p\n", "\n", "# load libm\n", "libm = ctypes.CDLL('libm.so.6')\n", "\n", "# extract the proper symbol\n", "cbrt = libm.cbrt\n", "\n", "# wrap it\n", "cbrt_capsule = PyCapsule_New(cbrt, \"double(double)\".encode(), None)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The capsule is not usable from Python context (it's some kind of opaque box) but Pythran knows how to use it. beware, it does not try to do any kind of type verification. It trusts your ``#pythran export`` line." ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "2.0" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "pythran_cbrt(cbrt_capsule, 8.)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## With Pointers\n", "\n", "Now, let's try to use the ``sincos`` function. It's C signature is ``void sincos(double, double*, double*)``. How do we pass that to Pythran?" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "%%pythran\n", "\n", "#pythran export pythran_sincos(None(float64, float64*, float64*), float64)\n", "def pythran_sincos(libm_sincos, val):\n", " import numpy as np\n", " val_sin, val_cos = np.empty(1), np.empty(1)\n", " libm_sincos(val, val_sin, val_cos)\n", " return val_sin[0], val_cos[0]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "There is some magic happening here:\n", "\n", "- ``None`` is used to state the function pointer does not return anything.\n", "\n", "- In order to create pointers, we actually create empty one-dimensional array and let pythran handle them as pointer. Beware that you're in charge of all the memory checking stuff!\n", "\n", "Apart from that, we can now call our function with the proper capsule parameter." ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "sincos_capsule = PyCapsule_New(libm.sincos, \"unchecked anyway\".encode(), None)" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "(0.0, 1.0)" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "pythran_sincos(sincos_capsule, 0.)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## With Pythran\n", "\n", "It is naturally also possible to use capsule generated by Pythran. In that case, no type shenanigans is required, we're in our small world.\n", "\n", "One just need to use the ``capsule`` keyword to indicate we want to generate a capsule." ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "%%pythran\n", "\n", "## This is the capsule.\n", "#pythran export capsule corp((int, str), str set)\n", "def corp(param, lookup):\n", " res, key = param\n", " return res if key in lookup else -1\n", "\n", "## This is some dummy callsite\n", "#pythran export brief(int, int((int, str), str set)):\n", "def brief(val, capsule):\n", " return capsule((val, \"doctor\"), {\"some\"})\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "It's not possible to call the capsule directly, it's an opaque structure." ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "'PyCapsule' object is not callable\n" ] } ], "source": [ "try:\n", " corp((1,\"some\"),set())\n", "except TypeError as e:\n", " print(e)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "It's possible to pass it to the according pythran function though." ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "-1" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "brief(1, corp)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## With Cython\n", "\n", "The capsule pythran uses may come from Cython-generated code. This uses a little-known feature from cython: ``api`` and ``__pyx_capi__``. ``nogil`` is of importance here: Pythran releases the GIL, so **better not call a cythonized function that uses it**." ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [], "source": [ "!find -name 'cube*' -delete" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Writing cube.pyx\n" ] } ], "source": [ "%%file cube.pyx\n", "#cython: language_level=3\n", "cdef api double cube(double x) nogil:\n", " return x * x * x" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Compiling cube.pyx because it changed.\n", "[1/1] Cythonizing cube.pyx\n" ] } ], "source": [ "from setuptools import setup\n", "from Cython.Build import cythonize\n", "\n", "_ = setup(\n", " name='cube',\n", " ext_modules=cythonize(\"cube.pyx\"),\n", " zip_safe=False,\n", " # fake CLI call\n", " script_name='setup.py',\n", " script_args=['--quiet', 'build_ext', '--inplace']\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The cythonized module has a special dictionary that holds the capsule we're looking for." ] }, { "cell_type": "code", "execution_count": 14, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n" ] } ], "source": [ "import sys\n", "sys.path.insert(0, '.')\n", "import cube\n", "print(type(cube.__pyx_capi__['cube']))" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "8.0" ] }, "execution_count": 15, "metadata": {}, "output_type": "execute_result" } ], "source": [ "cython_cube = cube.__pyx_capi__['cube']\n", "pythran_cbrt(cython_cube, 2.)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.3" } }, "nbformat": 4, "nbformat_minor": 2 } pythran-0.10.0+ds2/docs/requirements.txt000066400000000000000000000000511416264035500201570ustar00rootroot00000000000000numpy nbsphinx scipy guzzle_sphinx_theme pythran-0.10.0+ds2/omp/000077500000000000000000000000001416264035500145425ustar00rootroot00000000000000pythran-0.10.0+ds2/omp/__init__.py000066400000000000000000000107451416264035500166620ustar00rootroot00000000000000"""OpenMP wrapper using a libgomp dynamically loaded library.""" from ctypes.util import find_library from subprocess import check_output, CalledProcessError, DEVNULL from numpy.distutils.misc_util import ( msvc_runtime_major, get_shared_lib_extension ) import ctypes import os import sys try: # there may be an environ modification when loading config from pythran.config import compiler except ImportError: def compiler(): return os.environ.get('CXX', 'c++') cxx = compiler() class OpenMP(object): """ Internal representation of the OpenMP module. Custom class is used to dynamically add omp runtime function to this library when function is called. """ def __init__(self): ver = msvc_runtime_major() if ver is None: self.init_not_msvc() else: self.init_msvc(ver) def init_msvc(self, ver): vcomp_path = find_library('vcomp%d.dll' % ver) if not vcomp_path: raise ImportError("I can't find a shared library for vcomp.") else: # Load the library (shouldn't fail with an absolute path right?) self.libomp = ctypes.CDLL(vcomp_path) self.version = 20 def get_libomp_names(self): """Return list of OpenMP libraries to try""" return ['omp', 'gomp', 'iomp5'] def init_not_msvc(self): """ Find OpenMP library and try to load if using ctype interface. """ # find_library() does not automatically search LD_LIBRARY_PATH # until Python 3.6+, so we explicitly add it. # LD_LIBRARY_PATH is used on Linux, while macOS uses DYLD_LIBRARY_PATH # and DYLD_FALLBACK_LIBRARY_PATH. env_vars = [] if sys.platform == 'darwin': env_vars = ['DYLD_LIBRARY_PATH', 'DYLD_FALLBACK_LIBRARY_PATH'] else: env_vars = ['LD_LIBRARY_PATH'] paths = [] for env_var in env_vars: env_paths = os.environ.get(env_var, '') if env_paths: paths.extend(env_paths.split(os.pathsep)) libomp_names = self.get_libomp_names() if cxx is not None: for libomp_name in libomp_names: cmd = [cxx, '-print-file-name=lib{}{}'.format( libomp_name, get_shared_lib_extension())] # The subprocess can fail in various ways, including because it # doesn't support '-print-file-name'. In that case just give up. try: output = check_output(cmd, stderr=DEVNULL) path = os.path.dirname(output.decode().strip()) if path: paths.append(path) except (OSError, CalledProcessError): pass for libomp_name in libomp_names: # Try to load find libomp shared library using loader search dirs libomp_path = find_library(libomp_name) # Try to use custom paths if lookup failed if not libomp_path: for path in paths: candidate_path = os.path.join( path, 'lib{}{}'.format(libomp_name, get_shared_lib_extension())) if os.path.isfile(candidate_path): libomp_path = candidate_path break # Load the library if libomp_path: try: self.libomp = ctypes.CDLL(libomp_path) except OSError: raise ImportError("found openMP library '{}' but couldn't load it. " "This may happen if you are cross-compiling.".format(libomp_path)) self.version = 45 return raise ImportError("I can't find a shared library for libomp, you may need to install it " "or adjust the {} environment variable.".format(env_vars[0])) def __getattr__(self, name): """ Get correct function name from libgomp ready to be use. __getattr__ is call only `name != libomp` as libomp is a real attribute. """ if name == 'VERSION': return self.version return getattr(self.libomp, 'omp_' + name) # see http://mail.python.org/pipermail/python-ideas/2012-May/014969.html sys.modules[__name__] = OpenMP() pythran-0.10.0+ds2/pythran/000077500000000000000000000000001416264035500154345ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/__init__.py000066400000000000000000000036251416264035500175530ustar00rootroot00000000000000''' This package provides several entry points * spec_parser looks for code annotations in the form of formatted comments * functions defined in toolchain.py: * generate_cxx: python (str) to c++ code, returns a PythonModule * compile_cxxfile: c++ (file) to DLL, returns DLL filename * compile_cxxcode: c++ (str) to DLL, returns DLL filename * compile_pythrancode: python (str) to so/cpp, returns output filename * compile_pythranfile: python (file) to so/cpp, returns output filename * test_compile: passthrough compile test, raises CompileError Exception. Basic scenario is to turn a Python AST into C++ code: >>> code = "def foo(x): return x * 2" >>> cxx_generator, error_checker = generate_cxx('my_module', code) >>> cxx = cxx_generator.generate() To generate a native module, one need to add type information: >>> cxx = generate_cxx('my_module', code, {'foo':([int],)}) Eventually, the type information can be translated from a string: >>> spec = spec_parser('#pythran export foo(int)') >>> cxx = generate_cxx('my_module', code, spec) Higher level entry points include: >>> with open('my_module.py', 'w') as fd: ... _ = fd.write(code) >>> dll_file = compile_pythranfile("my_module.py") >>> cpp_file = compile_pythranfile("my_module.py",cpponly=True) >>> dll_file = compile_pythrancode("my_module", code) >>> dll_file = compile_cxxfile("my_module", cpp_file) Cleanup >>> import os, glob >>> for target in glob.glob('my_module.*'): ... os.remove(target) ''' import pythran.log from pythran.config import get_include from pythran.toolchain import (generate_cxx, compile_cxxfile, compile_cxxcode, compile_pythrancode, compile_pythranfile, test_compile) from pythran.spec import spec_parser from pythran.spec import load_specfile from pythran.dist import PythranExtension from pythran.version import __version__ pythran-0.10.0+ds2/pythran/analyses/000077500000000000000000000000001416264035500172535ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/analyses/__init__.py000066400000000000000000000033741416264035500213730ustar00rootroot00000000000000"""The analyses submodule contains all the analyses passes offered in Pythran. This file is just for convenience and turns the import from import analyses.foo.Foo into import analyses.Foo """ from .aliases import Aliases, StrictAliases from .ancestors import Ancestors, AncestorsWithBody from .argument_effects import ArgumentEffects from .argument_read_once import ArgumentReadOnce from .ast_matcher import ASTMatcher, AST_any, AST_or, Placeholder, Check from .cfg import CFG from .constant_expressions import ConstantExpressions from .dependencies import Dependencies from .extended_syntax_check import ExtendedSyntaxCheck from .fixed_size_list import FixedSizeList from .global_declarations import GlobalDeclarations from .global_effects import GlobalEffects from .globals_analysis import Globals from .has_return import HasReturn, HasBreak, HasContinue from .identifiers import Identifiers from .immediates import Immediates from .imported_ids import ImportedIds from .inlinable import Inlinable from .is_assigned import IsAssigned from .lazyness_analysis import LazynessAnalysis from .literals import Literals from .local_declarations import LocalNodeDeclarations, LocalNameDeclarations from .locals_analysis import Locals from .node_count import NodeCount from .optimizable_comprehension import OptimizableComprehension from .ordered_global_declarations import OrderedGlobalDeclarations from .parallel_maps import ParallelMaps from .potential_iterator import PotentialIterator from .pure_expressions import PureExpressions from .range_values import RangeValues from .scope import Scope from .static_expressions import StaticExpressions, HasStaticExpression from .use_def_chain import DefUseChains, UseDefChains from .use_omp import UseOMP from .yield_points import YieldPoints pythran-0.10.0+ds2/pythran/analyses/aliases.py000066400000000000000000000660401416264035500212540ustar00rootroot00000000000000""" Aliases gather aliasing informations. """ from pythran.analyses.global_declarations import GlobalDeclarations from pythran.intrinsic import Intrinsic, Class, UnboundValue from pythran.passmanager import ModuleAnalysis from pythran.syntax import PythranSyntaxError from pythran.tables import functions, methods, MODULES from pythran.unparse import Unparser from pythran.conversion import demangle import pythran.metadata as md from pythran.utils import isnum import gast as ast from copy import deepcopy from itertools import product import io IntrinsicAliases = dict() class ContainerOf(object): ''' Represents a container of something We just know that if indexed by the integer value `index', we get `containee' ''' UnknownIndex = float('nan') __slots__ = 'index', 'containee' cache = {} def __new__(cls, *args, **kwargs): # cache the creation of new objects, so that same keys give same id # thus great hashing key = tuple(args), tuple(kwargs.items()) if key not in ContainerOf.cache: new_obj = super(ContainerOf, cls).__new__(cls) ContainerOf.cache[key] = new_obj return ContainerOf.cache[key] def __init__(self, containee, index=UnknownIndex): self.index = index self.containee = containee def save_intrinsic_alias(module): """ Recursively save default aliases for pythonic functions. """ for v in module.values(): if isinstance(v, dict): # Submodules case save_intrinsic_alias(v) else: IntrinsicAliases[v] = frozenset((v,)) if isinstance(v, Class): save_intrinsic_alias(v.fields) for module in MODULES.values(): save_intrinsic_alias(module) class Aliases(ModuleAnalysis): ''' Gather aliasing informations across nodes As a result, each node from the module is associated to a set of node or Intrinsic to which it *may* alias to. ''' RetId = '@' def __init__(self): self.result = dict() self.aliases = None ContainerOf.cache.clear() super(Aliases, self).__init__(GlobalDeclarations) @staticmethod def dump(result, filter=None): def pp(n): output = io.StringIO() Unparser(n, output) return output.getvalue().strip() if isinstance(result, dict): for k, v in result.items(): if (filter is None) or isinstance(k, filter): print('{} => {}'.format(pp(k), sorted(map(pp, v)))) elif isinstance(result, (frozenset, set)): print(sorted(map(pp, result))) def get_unbound_value_set(self): return {UnboundValue} @staticmethod def access_path(node): if isinstance(node, ast.Name): return MODULES.get(demangle(node.id), node.id) elif isinstance(node, ast.Attribute): attr_key = demangle(node.attr) value_dict = Aliases.access_path(node.value) if attr_key not in value_dict: raise PythranSyntaxError( "Unsupported attribute '{}' for this object" .format(attr_key), node.value) return value_dict[attr_key] elif isinstance(node, ast.FunctionDef): return node.name else: return node # aliasing created by expressions def add(self, node, values=None): if values is None: # no given target for the alias if isinstance(node, Intrinsic): values = {node} # an Intrinsic always aliases to itself else: values = self.get_unbound_value_set() self.result[node] = values return values def visit_BoolOp(self, node): ''' Resulting node may alias to either operands: >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> module = ast.parse('def foo(a, b): return a or b') >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.BoolOp) (a or b) => ['a', 'b'] Note that a literal does not create any alias >>> module = ast.parse('def foo(a, b): return a or 0') >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.BoolOp) (a or 0) => ['', 'a'] ''' return self.add(node, set.union(*[self.visit(n) for n in node.values])) def visit_UnaryOp(self, node): ''' Resulting node does not alias to anything >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> module = ast.parse('def foo(a): return -a') >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.UnaryOp) (- a) => [''] ''' self.generic_visit(node) return self.add(node) visit_BinOp = visit_UnaryOp visit_Compare = visit_UnaryOp def visit_IfExp(self, node): ''' Resulting node alias to either branch >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> module = ast.parse('def foo(a, b, c): return a if c else b') >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.IfExp) (a if c else b) => ['a', 'b'] ''' self.visit(node.test) rec = [self.visit(n) for n in (node.body, node.orelse)] return self.add(node, set.union(*rec)) def visit_Dict(self, node): ''' A dict is abstracted as an unordered container of its values >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> module = ast.parse('def foo(a, b): return {0: a, 1: b}') >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Dict) {0: a, 1: b} => ['|a|', '|b|'] where the |id| notation means something that may contain ``id``. ''' if node.keys: elts_aliases = set() for key, val in zip(node.keys, node.values): self.visit(key) # res ignored, just to fill self.aliases elt_aliases = self.visit(val) elts_aliases.update(map(ContainerOf, elt_aliases)) else: elts_aliases = None return self.add(node, elts_aliases) def visit_Set(self, node): ''' A set is abstracted as an unordered container of its elements >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> module = ast.parse('def foo(a, b): return {a, b}') >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Set) {a, b} => ['|a|', '|b|'] where the |id| notation means something that may contain ``id``. ''' if node.elts: elts_aliases = {ContainerOf(alias) for elt in node.elts for alias in self.visit(elt)} else: elts_aliases = None return self.add(node, elts_aliases) def visit_Return(self, node): ''' A side effect of computing aliases on a Return is that it updates the ``return_alias`` field of current function >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> module = ast.parse('def foo(a, b): return a') >>> result = pm.gather(Aliases, module) >>> module.body[0].return_alias # doctest: +ELLIPSIS This field is a function that takes as many nodes as the function argument count as input and returns an expression based on these arguments if the function happens to create aliasing between its input and output. In our case: >>> f = module.body[0].return_alias >>> Aliases.dump(f([ast.Name('A', ast.Load(), None, None), ... ast.Constant(1, None)])) ['A'] This also works if the relationship between input and output is more complex: >>> module = ast.parse('def foo(a, b): return a or b[0]') >>> result = pm.gather(Aliases, module) >>> f = module.body[0].return_alias >>> List = ast.List([ast.Name('L0', ast.Load(), None, None)], ... ast.Load()) >>> Aliases.dump(f([ast.Name('B', ast.Load(), None, None), List])) ['B', '[L0][0]'] Which actually means that when called with two arguments ``B`` and the single-element list ``[L[0]]``, ``foo`` may returns either the first argument, or the first element of the second argument. ''' if not node.value: return ret_aliases = self.visit(node.value) if Aliases.RetId in self.aliases: ret_aliases = ret_aliases.union(self.aliases[Aliases.RetId]) self.aliases[Aliases.RetId] = ret_aliases def call_return_alias(self, node): def interprocedural_aliases(func, args): arg_aliases = [self.result[arg] or {arg} for arg in args] return_aliases = set() for args_combination in product(*arg_aliases): return_aliases.update( func.return_alias(args_combination)) return {expand_subscript(ra) for ra in return_aliases} def expand_subscript(node): if isinstance(node, ast.Subscript): if isinstance(node.value, ContainerOf): return node.value.containee return node def full_args(func, call): args = call.args if isinstance(func, ast.FunctionDef): extra = len(func.args.args) - len(args) if extra: tail = [deepcopy(n) for n in func.args.defaults[extra:]] for arg in tail: self.visit(arg) args = args + tail return args func = node.func aliases = set() if node.keywords: # too soon, we don't support keywords in interprocedural_aliases pass elif isinstance(func, ast.Attribute): _, signature = methods.get(func.attr, functions.get(func.attr, [(None, None)])[0]) if signature: args = full_args(signature, node) aliases = interprocedural_aliases(signature, args) elif isinstance(func, ast.Name): func_aliases = self.result[func] for func_alias in func_aliases: if hasattr(func_alias, 'return_alias'): args = full_args(func_alias, node) aliases.update(interprocedural_aliases(func_alias, args)) else: pass # better thing to do ? [self.add(a) for a in aliases if a not in self.result] return aliases or self.get_unbound_value_set() def visit_Call(self, node): ''' Resulting node alias to the return_alias of called function, if the function is already known by Pythran (i.e. it's an Intrinsic) or if Pythran already computed it's ``return_alias`` behavior. >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> fun = """ ... def f(a): return a ... def foo(b): c = f(b)""" >>> module = ast.parse(fun) The ``f`` function create aliasing between the returned value and its first argument. >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Call) f(b) => ['b'] This also works with intrinsics, e.g ``dict.setdefault`` which may create alias between its third argument and the return value. >>> fun = 'def foo(a, d): builtins.dict.setdefault(d, 0, a)' >>> module = ast.parse(fun) >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Call) builtins.dict.setdefault(d, 0, a) => ['', 'a'] Note that complex cases can arise, when one of the formal parameter is already known to alias to various values: >>> fun = """ ... def f(a, b): return a and b ... def foo(A, B, C, D): return f(A or B, C or D)""" >>> module = ast.parse(fun) >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Call) f((A or B), (C or D)) => ['A', 'B', 'C', 'D'] ''' self.generic_visit(node) f = node.func # special handler for bind functions if isinstance(f, ast.Attribute) and f.attr == "partial": return self.add(node, {node}) else: return_alias = self.call_return_alias(node) # expand collected aliases all_aliases = set() for value in return_alias: # no translation if isinstance(value, (ContainerOf, ast.FunctionDef, Intrinsic)): all_aliases.add(value) elif value in self.result: all_aliases.update(self.result[value]) else: try: ap = Aliases.access_path(value) all_aliases.update(self.aliases.get(ap, ())) except NotImplementedError: # should we do something better here? all_aliases.add(value) return self.add(node, all_aliases) visit_Constant = visit_UnaryOp def visit_Attribute(self, node): return self.add(node, {Aliases.access_path(node)}) def visit_Subscript(self, node): ''' Resulting node alias stores the subscript relationship if we don't know anything about the subscripted node. >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> module = ast.parse('def foo(a): return a[0]') >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Subscript) a[0] => ['a[0]'] If we know something about the container, e.g. in case of a list, we can use this information to get more accurate informations: >>> module = ast.parse('def foo(a, b, c): return [a, b][c]') >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Subscript) [a, b][c] => ['a', 'b'] Moreover, in case of a tuple indexed by a constant value, we can further refine the aliasing information: >>> fun = """ ... def f(a, b): return a, b ... def foo(a, b): return f(a, b)[0]""" >>> module = ast.parse(fun) >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Subscript) f(a, b)[0] => ['a'] Nothing is done for slices, even if the indices are known :-/ >>> module = ast.parse('def foo(a, b, c): return [a, b, c][1:]') >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Subscript) [a, b, c][1:] => [''] ''' if isinstance(node.slice, ast.Tuple): # could be enhanced through better handling of containers self.visit(node.value) for elt in node.slice.elts: self.visit(elt) aliases = None else: aliases = set() self.visit(node.slice) value_aliases = self.visit(node.value) for alias in value_aliases: if isinstance(alias, ContainerOf): if isinstance(node.slice, ast.Slice): continue if isnum(node.slice): if node.slice.value != alias.index: continue # FIXME: what if the index is a slice variable... aliases.add(alias.containee) elif isinstance(getattr(alias, 'ctx', None), (ast.Param, ast.Store)): aliases.add(ast.Subscript(alias, node.slice, node.ctx)) if not aliases: aliases = None return self.add(node, aliases) def visit_OMPDirective(self, node): ''' omp directive may introduce new variables, just register them ''' for dep in node.deps: self.add(dep) def visit_Name(self, node): if node.id not in self.aliases: err = ("identifier {0} unknown, either because " "it is an unsupported intrinsic, " "the input code is faulty, " "or... pythran is buggy.") raise PythranSyntaxError(err.format(node.id), node) return self.add(node, self.aliases[node.id]) def visit_Tuple(self, node): ''' A tuple is abstracted as an ordered container of its values >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> module = ast.parse('def foo(a, b): return a, b') >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Tuple) (a, b) => ['|[0]=a|', '|[1]=b|'] where the |[i]=id| notation means something that may contain ``id`` at index ``i``. ''' if node.elts: elts_aliases = set() for i, elt in enumerate(node.elts): elt_aliases = self.visit(elt) elts_aliases.update(ContainerOf(alias, i) for alias in elt_aliases) else: elts_aliases = None return self.add(node, elts_aliases) visit_List = visit_Set def visit_comprehension(self, node): self.aliases[node.target.id] = {node.target} self.generic_visit(node) def visit_ListComp(self, node): ''' A comprehension is not abstracted in any way >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> module = ast.parse('def foo(a, b): return [a for i in b]') >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.ListComp) [a for i in b] => [''] ''' for generator in node.generators: self.visit_comprehension(generator) self.visit(node.elt) return self.add(node) visit_SetComp = visit_ListComp visit_GeneratorExp = visit_ListComp def visit_DictComp(self, node): ''' A comprehension is not abstracted in any way >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> module = ast.parse('def foo(a, b): return {i: i for i in b}') >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.DictComp) {i: i for i in b} => [''] ''' for generator in node.generators: self.visit_comprehension(generator) self.visit(node.key) self.visit(node.value) return self.add(node) # aliasing created by statements def visit_FunctionDef(self, node): ''' Initialise aliasing default value before visiting. Add aliasing values for : - Pythonic - globals declarations - current function arguments ''' self.aliases = IntrinsicAliases.copy() self.aliases.update((k, {v}) for k, v in self.global_declarations.items()) self.aliases.update((arg.id, {arg}) for arg in node.args.args) self.generic_visit(node) if Aliases.RetId in self.aliases: # parametrize the expression def parametrize(exp): # constant or global -> no change if isinstance(exp, (ast.Constant, Intrinsic, ast.FunctionDef)): return lambda _: {exp} elif isinstance(exp, ContainerOf): pcontainee = parametrize(exp.containee) index = exp.index return lambda args: { ContainerOf(pc, index) for pc in pcontainee(args) } elif isinstance(exp, ast.Name): try: w = node.args.args.index(exp) def return_alias(args): if w < len(args): return {args[w]} else: return {node.args.defaults[w - len(args)]} return return_alias except ValueError: return lambda _: self.get_unbound_value_set() elif isinstance(exp, ast.Subscript): values = parametrize(exp.value) slices = parametrize(exp.slice) return lambda args: { ast.Subscript(value, slice, ast.Load()) for value in values(args) for slice in slices(args)} else: return lambda _: self.get_unbound_value_set() # this is a little tricky: for each returned alias, # parametrize builds a function that, given a list of args, # returns the alias # then as we may have multiple returned alias, we compute the union # of these returned aliases return_aliases = [parametrize(ret_alias) for ret_alias in self.aliases[Aliases.RetId]] def merge_return_aliases(args): return {ra for return_alias in return_aliases for ra in return_alias(args)} node.return_alias = merge_return_aliases def visit_Assign(self, node): r''' Assignment creates aliasing between lhs and rhs >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> module = ast.parse('def foo(a): c = a ; d = e = c ; {c, d, e}') >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Set) {c, d, e} => ['|a|'] Everyone points to the formal parameter 'a' \o/ ''' md.visit(self, node) value_aliases = self.visit(node.value) for t in node.targets: if isinstance(t, ast.Name): self.aliases[t.id] = set(value_aliases) or {t} for alias in list(value_aliases): if isinstance(alias, ast.Name): a_id = alias.id self.aliases[a_id] = self.aliases[a_id].union((t,)) self.add(t, self.aliases[t.id]) else: self.visit(t) def visit_For(self, node): ''' For loop creates aliasing between the target and the content of the iterator >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> module = ast.parse(""" ... def foo(a): ... for i in a: ... {i}""") >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Set) {i} => ['|i|'] Not very useful, unless we know something about the iterated container >>> module = ast.parse(""" ... def foo(a, b): ... for i in [a, b]: ... {i}""") >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Set) {i} => ['|a|', '|b|'] ''' iter_aliases = self.visit(node.iter) if all(isinstance(x, ContainerOf) for x in iter_aliases): target_aliases = {iter_alias.containee for iter_alias in iter_aliases} else: target_aliases = {node.target} self.add(node.target, target_aliases) self.aliases[node.target.id] = self.result[node.target] self.generic_visit(node) self.generic_visit(node) def visit_While(self, node): ''' While statement evaluation is somehow equivalent to the evaluation of a sequence, except the fact that in some subtle cases, the first rounds of analyse fails because we do not follow the regular execution order >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> fun = """ ... def foo(a): ... while(a): ... if a == 1: builtins.print(b) ... else: b = a""" >>> module = ast.parse(fun) >>> result = pm.gather(Aliases, module) ''' self.generic_visit(node) self.generic_visit(node) def visit_If(self, node): ''' After an if statement, the values from both branches are merged, potentially creating more aliasing: >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> fun = """ ... def foo(a, b): ... if a: c=a ... else: c=b ... return {c}""" >>> module = ast.parse(fun) >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Set) {c} => ['|a|', '|b|'] ''' md.visit(self, node) self.visit(node.test) true_aliases = false_aliases = None # first try the true branch try: tmp = self.aliases.copy() for stmt in node.body: self.visit(stmt) true_aliases = self.aliases self.aliases = tmp except PythranSyntaxError: pass # then try the false branch try: for stmt in node.orelse: self.visit(stmt) false_aliases = self.aliases except PythranSyntaxError: pass if true_aliases and not false_aliases: self.aliases = true_aliases for stmt in node.orelse: self.visit(stmt) false_aliases = self.aliases if false_aliases and not true_aliases: self.aliases = false_aliases for stmt in node.body: self.visit(stmt) true_aliases = self.aliases # merge the results from true and false branches if false_aliases and true_aliases: for k, v in true_aliases.items(): if k in self.aliases: self.aliases[k] = self.aliases[k].union(v) else: assert isinstance(v, (frozenset, set)) self.aliases[k] = v elif true_aliases: self.aliases = true_aliases def visit_ExceptHandler(self, node): if node.name: self.aliases[node.name.id] = {node.name} self.generic_visit(node) class StrictAliases(Aliases): """ Gather aliasing informations across nodes, without adding unsure aliases. """ def get_unbound_value_set(self): return set() pythran-0.10.0+ds2/pythran/analyses/ancestors.py000066400000000000000000000045231416264035500216320ustar00rootroot00000000000000""" Ancestors computes the ancestors of each node """ from pythran.passmanager import ModuleAnalysis from pythran.utils import pushpop class Ancestors(ModuleAnalysis): ''' Associate each node with the list of its ancestors Based on the tree view of the AST: each node has the Module as parent. The result of this analysis is a dictionary with nodes as key, and list of nodes as values. ''' def __init__(self): self.result = dict() self.current = list() super(Ancestors, self).__init__() def generic_visit(self, node): self.result[node] = list(self.current) with pushpop(self.current, node): super(Ancestors, self).generic_visit(node) class AncestorsWithBody(Ancestors): def visit_metadata(self, node): if hasattr(node, 'metadata'): self.generic_visit(node.metadata) def visit_body(self, body): body_as_tuple = tuple(body) self.result[body_as_tuple] = list(self.current) with pushpop(self.current, body_as_tuple): for stmt in body: self.generic_visit(stmt) def visit_If(self, node): self.result[node] = list(self.current) with pushpop(self.current, node): self.generic_visit(node.test) self.visit_metadata(node) self.visit_body(node.body) self.visit_body(node.orelse) def visit_While(self, node): self.result[node] = list(self.current) with pushpop(self.current, node): self.generic_visit(node.test) self.visit_metadata(node) self.visit_body(node.body) self.visit_body(node.orelse) def visit_For(self, node): self.result[node] = list(self.current) with pushpop(self.current, node): self.generic_visit(node.target) self.generic_visit(node.iter) self.visit_metadata(node) self.visit_body(node.body) self.visit_body(node.orelse) def visit_Try(self, node): self.result[node] = list(self.current) with pushpop(self.current, node): self.visit_metadata(node) self.visit_body(node.body) for handler in node.handlers: self.generic_visit(handler) self.visit_body(node.orelse) self.visit_body(node.finalbody) pythran-0.10.0+ds2/pythran/analyses/argument_effects.py000066400000000000000000000170171416264035500231540ustar00rootroot00000000000000""" ArgumentEffects computes write effect on arguments. """ from pythran.analyses.aliases import Aliases from pythran.analyses.global_declarations import GlobalDeclarations from pythran.passmanager import ModuleAnalysis from pythran.tables import MODULES from pythran.graph import DiGraph # FIXME: investigate why we need to import it that way from pythran import intrinsic import gast as ast from functools import reduce class FunctionEffects(object): def __init__(self, node): self.func = node if isinstance(node, ast.FunctionDef): self.update_effects = [False] * len(node.args.args) elif isinstance(node, intrinsic.Intrinsic): self.update_effects = [isinstance(x, intrinsic.UpdateEffect) for x in node.argument_effects] elif isinstance(node, ast.alias): self.update_effects = [] elif isinstance(node, intrinsic.Class): self.update_effects = [] else: raise NotImplementedError # Compute the intrinsic effects only once IntrinsicArgumentEffects = {} def save_function_effect(module): """ Recursively save function effect for pythonic functions. """ for intr in module.values(): if isinstance(intr, dict): # Submodule case save_function_effect(intr) else: fe = FunctionEffects(intr) IntrinsicArgumentEffects[intr] = fe if isinstance(intr, intrinsic.Class): save_function_effect(intr.fields) for module in MODULES.values(): save_function_effect(module) class ArgumentEffects(ModuleAnalysis): """Gathers inter-procedural effects on function arguments.""" def __init__(self): self.result = DiGraph() self.node_to_functioneffect = IntrinsicArgumentEffects.copy() for fe in IntrinsicArgumentEffects.values(): self.result.add_node(fe) super(ArgumentEffects, self).__init__(Aliases, GlobalDeclarations) def prepare(self, node): """ Initialise arguments effects as this analyse is inter-procedural. Initialisation done for Pythonic functions and default value set for user defined functions. """ super(ArgumentEffects, self).prepare(node) for n in self.global_declarations.values(): fe = FunctionEffects(n) self.node_to_functioneffect[n] = fe self.result.add_node(fe) def run(self, node): result = super(ArgumentEffects, self).run(node) candidates = set(result) while candidates: function = candidates.pop() for ue in enumerate(function.update_effects): update_effect_idx, update_effect = ue if not update_effect: continue for pred in result.predecessors(function): edge = result.edges[pred, function] for fp in enumerate(edge["formal_parameters"]): i, formal_parameter_idx = fp # propagate the impurity backward if needed. # Afterward we may need another graph iteration ith_effectiv = edge["effective_parameters"][i] if(formal_parameter_idx == update_effect_idx and not pred.update_effects[ith_effectiv]): pred.update_effects[ith_effectiv] = True candidates.add(pred) self.result = {f.func: f.update_effects for f in result} return self.result def argument_index(self, node): while isinstance(node, ast.Subscript): node = node.value for node_alias in self.aliases[node]: while isinstance(node_alias, ast.Subscript): node_alias = node_alias.value if node_alias in self.current_arguments: return self.current_arguments[node_alias] if node_alias in self.current_subscripted_arguments: return self.current_subscripted_arguments[node_alias] return -1 def visit_FunctionDef(self, node): self.current_function = self.node_to_functioneffect[node] self.current_arguments = {arg: i for i, arg in enumerate(node.args.args)} self.current_subscripted_arguments = dict() assert self.current_function in self.result self.generic_visit(node) def visit_For(self, node): ai = self.argument_index(node.iter) if ai >= 0: self.current_subscripted_arguments[node.target] = ai self.generic_visit(node) def visit_AugAssign(self, node): n = self.argument_index(node.target) if n >= 0: self.current_function.update_effects[n] = True self.generic_visit(node) def visit_Assign(self, node): for t in node.targets: if isinstance(t, ast.Subscript): n = self.argument_index(t) if n >= 0: self.current_function.update_effects[n] = True self.generic_visit(node) def visit_Call(self, node): for i, arg in enumerate(node.args): n = self.argument_index(arg) if n >= 0: func_aliases = self.aliases[node.func] # pessimistic case: no alias found if func_aliases is None: self.current_function.update_effects[n] = True continue # expand argument if any func_aliases = reduce( lambda x, y: x + ( # all functions list(self.node_to_functioneffect.keys()) if (isinstance(y, ast.Name) and self.argument_index(y) >= 0) else [y]), func_aliases, list()) for func_alias in func_aliases: # special hook for binded functions if isinstance(func_alias, ast.Call): bound_name = func_alias.args[0].id func_alias = self.global_declarations[bound_name] if func_alias is intrinsic.UnboundValue: continue if func_alias not in self.node_to_functioneffect: continue if func_alias is MODULES['functools']['partial']: base_func_aliases = self.aliases[node.args[0]] fe = self.node_to_functioneffect[func_alias] if len(base_func_aliases) == 1: base_func_alias = next(iter(base_func_aliases)) fe = self.node_to_functioneffect.get( base_func_alias, fe) else: fe = self.node_to_functioneffect[func_alias] predecessors = self.result.predecessors(fe) if self.current_function not in predecessors: self.result.add_edge( self.current_function, fe, effective_parameters=[], formal_parameters=[]) edge = self.result.edges[self.current_function, fe] edge["effective_parameters"].append(n) edge["formal_parameters"].append(i) self.generic_visit(node) pythran-0.10.0+ds2/pythran/analyses/argument_read_once.py000066400000000000000000000214331416264035500234510ustar00rootroot00000000000000""" ArgumentReadOnce counts the usages of each argument of each function. """ from pythran.analyses.aliases import Aliases from pythran.analyses.global_declarations import GlobalDeclarations from pythran.passmanager import ModuleAnalysis from pythran.tables import MODULES import pythran.intrinsic as intrinsic import gast as ast from functools import reduce class ArgumentReadOnce(ModuleAnalysis): """ Counts the usages of each argument of each function. Attributes ---------- result : {FunctionEffects} Number of use for each argument of each function. node_to_functioneffect : {???: ???} FunctionDef ast node to function effect binding. """ class FunctionEffects(object): def __init__(self, node): self.func = node self.dependencies = lambda ctx: 0 if isinstance(node, ast.FunctionDef): self.read_effects = [-1] * len(node.args.args) elif isinstance(node, intrinsic.Intrinsic): self.read_effects = [ 1 if isinstance(x, intrinsic.ReadOnceEffect) else 2 for x in node.argument_effects] elif isinstance(node, ast.alias): self.read_effects = [] else: raise NotImplementedError class ConstructorEffects(object): def __init__(self, node): self.func = node self.dependencies = lambda ctx: 0 self.read_effects = [0] class Context(object): def __init__(self, function, index, path, global_dependencies): self.function = function self.index = index self.path = path self.global_dependencies = global_dependencies def __init__(self): """ Basic initialiser for class attributes. """ self.result = set() self.node_to_functioneffect = dict() super(ArgumentReadOnce, self).__init__(Aliases, GlobalDeclarations) def prepare(self, node): """ Initialise arguments effects as this analysis in inter-procedural. Initialisation done for Pythonic functions and default values set for user defined functions. """ super(ArgumentReadOnce, self).prepare(node) # global functions init for n in self.global_declarations.values(): fe = ArgumentReadOnce.FunctionEffects(n) self.node_to_functioneffect[n] = fe self.result.add(fe) # Pythonic functions init def save_effect(module): """ Recursively save read once effect for Pythonic functions. """ for intr in module.values(): if isinstance(intr, dict): # Submodule case save_effect(intr) else: fe = ArgumentReadOnce.FunctionEffects(intr) self.node_to_functioneffect[intr] = fe self.result.add(fe) if isinstance(intr, intrinsic.Class): # Class case save_effect(intr.fields) for module in MODULES.values(): save_effect(module) def run(self, node): result = super(ArgumentReadOnce, self).run(node) for fun in result: for i in range(len(fun.read_effects)): self.recursive_weight(fun, i, set()) self.result = {f.func: f.read_effects for f in result} return self.result def recursive_weight(self, function, index, predecessors): # TODO : Find out why it happens in some cases if len(function.read_effects) <= index: return 0 if function.read_effects[index] == -1: # In case of recursive/cyclic calls cycle = function in predecessors predecessors.add(function) if cycle: function.read_effects[index] = 2 * function.dependencies( ArgumentReadOnce.Context(function, index, predecessors, False)) else: function.read_effects[index] = function.dependencies( ArgumentReadOnce.Context(function, index, predecessors, True)) return function.read_effects[index] def argument_index(self, node): while isinstance(node, ast.Subscript): node = node.value if node in self.aliases: for n_alias in self.aliases[node]: try: return self.current_function.func.args.args.index(n_alias) except ValueError: pass return -1 def local_effect(self, node, effect): index = self.argument_index(node) return lambda ctx: effect if index == ctx.index else 0 def generic_visit(self, node): lambdas = [self.visit(child) for child in ast.iter_child_nodes(node)] return lambda ctx: sum(l(ctx) for l in lambdas) def visit_FunctionDef(self, node): self.current_function = self.node_to_functioneffect[node] assert self.current_function in self.result self.current_function.dependencies = self.generic_visit(node) def visit_Return(self, node): dep = self.generic_visit(node) if isinstance(node.value, ast.Name): local = self.local_effect(node.value, 2) return lambda ctx: dep(ctx) + local(ctx) else: return dep def visit_Assign(self, node): dep = self.generic_visit(node) local = [self.local_effect(t, 2) for t in node.targets if isinstance(t, ast.Subscript)] return lambda ctx: dep(ctx) + sum(l(ctx) for l in local) def visit_AugAssign(self, node): dep = self.generic_visit(node) local = self.local_effect(node.target, 2) return lambda ctx: dep(ctx) + local(ctx) def visit_For(self, node): iter_local = self.local_effect(node.iter, 1) iter_deps = self.visit(node.iter) body_deps = [self.visit(stmt) for stmt in node.body] else_deps = [self.visit(stmt) for stmt in node.orelse] return lambda ctx: iter_local(ctx) + iter_deps(ctx) + 2 * sum( l(ctx) for l in body_deps) + sum(l(ctx) for l in else_deps) def visit_While(self, node): test_deps = self.visit(node.test) body_deps = [self.visit(stmt) for stmt in node.body] else_deps = [self.visit(stmt) for stmt in node.orelse] return lambda ctx: test_deps(ctx) + 2 * sum( l(ctx) for l in body_deps) + sum(l(ctx) for l in else_deps) def visit_If(self, node): test_deps = self.visit(node.test) body_deps = [self.visit(stmt) for stmt in node.body] else_deps = [self.visit(stmt) for stmt in node.orelse] return lambda ctx: test_deps(ctx) + max(sum( l(ctx) for l in body_deps), sum(l(ctx) for l in else_deps)) def visit_Call(self, node): l0 = self.generic_visit(node) index_corres = dict() func = None for i, arg in enumerate(node.args): n = self.argument_index(arg) if n >= 0: func_aliases = self.aliases[node.func] # expand argument if any func_aliases = reduce( lambda x, y: x + ( # all functions list(self.node_to_functioneffect.keys()) if (isinstance(y, ast.Name) and self.argument_index(y) >= 0) else [y]), func_aliases, list()) for func_alias in func_aliases: # special hook for binded functions if isinstance(func_alias, ast.Call): bound_name = func_alias.args[0].id func_alias = self.global_declarations[bound_name] if func_alias is intrinsic.UnboundValue: continue if func_alias not in self.node_to_functioneffect: continue func = self.node_to_functioneffect[func_alias] index_corres[n] = i def merger(ctx): base = l0(ctx) if (ctx.index in index_corres) and ctx.global_dependencies: rec = self.recursive_weight(func, index_corres[ctx.index], ctx.path) else: rec = 0 return base + rec return merger def visit_Subscript(self, node): dep = self.generic_visit(node) local = self.local_effect(node.value, 2) return lambda ctx: dep(ctx) + local(ctx) def visit_comprehension(self, node): dep = self.generic_visit(node) local = self.local_effect(node.iter, 1) return lambda ctx: dep(ctx) + local(ctx) pythran-0.10.0+ds2/pythran/analyses/ast_matcher.py000066400000000000000000000163031416264035500221220ustar00rootroot00000000000000""" Module to looks for a specified pattern in a given AST. """ from gast import AST, iter_fields, NodeVisitor, Dict, Set from itertools import permutations from math import isnan MAX_UNORDERED_LENGTH = 10 class DamnTooLongPattern(Exception): """ Exception for long dict/set comparison to reduce compile time. """ class Placeholder(AST): """ Class to save information from ast while check for pattern. """ def __init__(self, identifier, type=None): """ Placehorder are identified using an identifier. """ self.id = identifier self.type = type super(Placeholder, self).__init__() class AST_any(AST): """ Class to specify we don't care about a field value in ast. """ class AST_or(AST): """ Class to specify multiple possibles value for a given field in ast. Attributes ---------- args: [ast field value] List of possible value for a field of an ast. """ def __init__(self, *args): """ Initialiser to keep track of arguments. """ self.args = args super(AST_or, self).__init__() class Check(NodeVisitor): """ Checker for ast <-> pattern. NodeVisitor is needed for specific behavior checker. Attributes ---------- node : AST node we want to compare with pattern placeholders : [AST] list of placeholder value for later comparison or replacement. """ def __init__(self, node, placeholders): """ Initialize attributes. """ self.node = node self.placeholders = placeholders def check_list(self, node_list, pattern_list): """ Check if list of node are equal. """ if len(node_list) != len(pattern_list): return False return all(Check(node_elt, self.placeholders).visit(pattern_elt) for node_elt, pattern_elt in zip(node_list, pattern_list)) def visit_Placeholder(self, pattern): """ Save matching node or compare it with the existing one. FIXME : What if the new placeholder is a better choice? """ if (pattern.id in self.placeholders and not Check(self.node, self.placeholders).visit( self.placeholders[pattern.id])): return False elif pattern.type is not None and not isinstance(self.node, pattern.type): return False else: self.placeholders[pattern.id] = self.node return True @staticmethod def visit_AST_any(_): """ Every node match with it. """ return True def visit_AST_or(self, pattern): """ Match if any of the or content match with the other node. """ return any(self.field_match(self.node, value_or) for value_or in pattern.args) def visit_Set(self, pattern): """ Set have unordered values. """ if not isinstance(self.node, Set): return False if len(pattern.elts) > MAX_UNORDERED_LENGTH: raise DamnTooLongPattern("Pattern for Set is too long") return any(self.check_list(self.node.elts, pattern_elts) for pattern_elts in permutations(pattern.elts)) def visit_Dict(self, pattern): """ Dict can match with unordered values. """ if not isinstance(self.node, Dict): return False if len(pattern.keys) > MAX_UNORDERED_LENGTH: raise DamnTooLongPattern("Pattern for Dict is too long") for permutation in permutations(range(len(self.node.keys))): for i, value in enumerate(permutation): if not self.field_match(self.node.keys[i], pattern.keys[value]): break else: pattern_values = [pattern.values[i] for i in permutation] return self.check_list(self.node.values, pattern_values) return False def field_match(self, node_field, pattern_field): """ Check if two fields match. Field match if: - If it is a list, all values have to match. - If if is a node, recursively check it. - Otherwise, check values are equal. """ if isinstance(pattern_field, list): return self.check_list(node_field, pattern_field) if isinstance(pattern_field, AST): return Check(node_field, self.placeholders).visit(pattern_field) return Check.strict_eq(pattern_field, node_field) @staticmethod def strict_eq(f0, f1): if f0 == f1: return True try: return isnan(f0) and isnan(f1) except TypeError: return False def generic_visit(self, pattern): """ Check if the pattern match with the checked node. a node match if: - type match - all field match """ if not isinstance(pattern, type(self.node)): return False return all(self.field_match(value, getattr(pattern, field)) for field, value in iter_fields(self.node)) class ASTMatcher(NodeVisitor): """ Visitor to gather node matching with a given pattern. Examples -------- >>> import gast as ast >>> code = "[(i, j) for i in range(a) for j in range(b)]" >>> pattern = ast.Call(func=ast.Name('range', ctx=ast.Load(), ... annotation=None, ... type_comment=None), ... args=AST_any(), keywords=[]) >>> len(ASTMatcher(pattern).search(ast.parse(code))) 2 >>> code = "[(i, j) for i in range(a) for j in range(b)]" >>> pattern = ast.Call(func=ast.Name(id=AST_or('range', 'range'), ... ctx=ast.Load(), ... annotation=None, ... type_comment=None), ... args=AST_any(), keywords=[]) >>> len(ASTMatcher(pattern).search(ast.parse(code))) 2 >>> code = "{1:2, 3:4}" >>> pattern = ast.Dict(keys=[ast.Constant(3, None), ast.Constant(1, None)], ... values=[ast.Constant(4, None), ... ast.Constant(2, None)]) >>> len(ASTMatcher(pattern).search(ast.parse(code))) 1 >>> code = "{1, 2, 3}" >>> pattern = ast.Set(elts=[ast.Constant(3, None), ... ast.Constant(2, None), ... ast.Constant(1, None)]) >>> len(ASTMatcher(pattern).search(ast.parse(code))) 1 """ def __init__(self, pattern): """ Basic initialiser saving pattern and initialising result set. """ self.pattern = pattern self.result = set() super(ASTMatcher, self).__init__() def visit(self, node): """ Visitor looking for matching between current node and pattern. If it match, save it but whatever happen, keep going. """ if Check(node, dict()).visit(self.pattern): self.result.add(node) self.generic_visit(node) def search(self, node): """ Facility to get values of the matcher for a given node. """ self.visit(node) return self.result pythran-0.10.0+ds2/pythran/analyses/cfg.py000066400000000000000000000137211416264035500203700ustar00rootroot00000000000000""" Computes the Control Flow Graph of a function. """ from pythran.passmanager import FunctionAnalysis from pythran.utils import isnum from pythran.graph import DiGraph import gast as ast def is_true_predicate(node): # FIXME: there may be more patterns here if isnum(node) and node.value: return True if isinstance(node, ast.Attribute) and node.attr == 'True': return True if isinstance(node, (ast.List, ast.Tuple, ast.Set)) and node.elts: return True if isinstance(node, ast.Dict) and node.keys: return True return False class CFG(FunctionAnalysis): """ Computes the Control Flow Graph of a function. The processing of a node yields a pair containing * the OUT nodes, to be linked with the IN nodes of the successor * the RAISE nodes, nodes that stop the control flow (exception/break/...) """ #: The sink node in the control flow graph. #: #: The predecessors of this node are those AST nodes that terminate #: control flow without a return statement. NIL = object() def __init__(self): self.result = DiGraph() super(CFG, self).__init__() def visit_FunctionDef(self, node): """OUT = node, RAISES = ()""" # the function itself is the entry point self.result.add_node(node) currs = (node,) for n in node.body: self.result.add_node(n) for curr in currs: self.result.add_edge(curr, n) currs, _ = self.visit(n) # add an edge to NIL for nodes that end the control flow # without a return self.result.add_node(CFG.NIL) for curr in currs: self.result.add_edge(curr, CFG.NIL) return (node,), () def visit_Pass(self, node): """OUT = node, RAISES = ()""" return (node,), () # All these nodes have the same behavior as pass visit_Assign = visit_AugAssign = visit_Import = visit_Pass visit_Expr = visit_Print = visit_ImportFrom = visit_Pass visit_Yield = visit_Delete = visit_Pass def visit_Return(self, node): """OUT = (), RAISES = ()""" return (), () def visit_For(self, node): """ OUT = (node,) + last body statements RAISES = body's that are not break or continue """ currs = (node,) break_currs = tuple() raises = () # handle body for n in node.body: self.result.add_node(n) for curr in currs: self.result.add_edge(curr, n) currs, nraises = self.visit(n) for nraise in nraises: if isinstance(nraise, ast.Break): break_currs += (nraise,) elif isinstance(nraise, ast.Continue): self.result.add_edge(nraise, node) else: raises += (nraise,) # add the backward loop for curr in currs: self.result.add_edge(curr, node) # the else statement if needed if node.orelse: for n in node.orelse: self.result.add_node(n) for curr in currs: self.result.add_edge(curr, n) currs, nraises = self.visit(n) else: currs = node, # while only if isinstance(node, ast.While): if is_true_predicate(node.test): return break_currs, raises else: return break_currs + currs, raises # for only return break_currs + currs, raises visit_While = visit_For def visit_If(self, node): """ OUT = true branch U false branch RAISES = true branch U false branch """ currs = (node,) raises = () # true branch for n in node.body: self.result.add_node(n) for curr in currs: self.result.add_edge(curr, n) currs, nraises = self.visit(n) raises += nraises # false branch tcurrs = currs traises = raises currs = (node,) for n in node.orelse: self.result.add_node(n) for curr in currs: self.result.add_edge(curr, n) currs, nraises = self.visit(n) raises = traises + nraises if is_true_predicate(node.test): return tcurrs, raises return tcurrs + currs, raises def visit_Raise(self, node): """OUT = (), RAISES = (node)""" return (), (node,) visit_Break = visit_Continue = visit_Raise def visit_Assert(self, node): """OUT = RAISES = (node)""" return (node,), (node,) def visit_Try(self, node): """ OUT = body's U handler's RAISES = handler's this equation is not has good has it could be... but we need type information to be more accurate """ currs = (node,) raises = () for handler in node.handlers: self.result.add_node(handler) for n in node.body: self.result.add_node(n) for curr in currs: self.result.add_edge(curr, n) currs, nraises = self.visit(n) for nraise in nraises: if isinstance(nraise, ast.Raise): for handler in node.handlers: self.result.add_edge(nraise, handler) else: raises += (nraise,) for handler in node.handlers: ncurrs, nraises = self.visit(handler) currs += ncurrs raises += nraises return currs, raises def visit_ExceptHandler(self, node): """OUT = body's, RAISES = body's""" currs = (node,) raises = () for n in node.body: self.result.add_node(n) for curr in currs: self.result.add_edge(curr, n) currs, nraises = self.visit(n) raises += nraises return currs, raises pythran-0.10.0+ds2/pythran/analyses/constant_expressions.py000066400000000000000000000066651416264035500241350ustar00rootroot00000000000000""" ConstantExpressions gathers constant expression. """ from pythran.analyses.aliases import Aliases from pythran.analyses.globals_analysis import Globals from pythran.analyses.locals_analysis import Locals from pythran.analyses.pure_expressions import PureExpressions from pythran.intrinsic import FunctionIntr from pythran.passmanager import NodeAnalysis import gast as ast class ConstantExpressions(NodeAnalysis): """Identify constant expressions.""" def __init__(self): self.result = set() super(ConstantExpressions, self).__init__(Globals, Locals, PureExpressions, Aliases) def add(self, node): self.result.add(node) return True def visit_BoolOp(self, node): return all([self.visit(x) for x in node.values]) and self.add(node) def visit_BinOp(self, node): rec = all([self.visit(x) for x in (node.left, node.right)]) return rec and self.add(node) def visit_UnaryOp(self, node): return self.visit(node.operand) and self.add(node) def visit_IfExp(self, node): rec = all([self.visit(x) for x in (node.test, node.body, node.orelse)]) return rec and self.add(node) def visit_Compare(self, node): rec = all([self.visit(x) for x in ([node.left] + node.comparators)]) return rec and self.add(node) def visit_Call(self, node): rec = all([self.visit(x) for x in (node.args + [node.func])]) return rec and self.add(node) visit_Constant = add def visit_Subscript(self, node): rec = all([self.visit(x) for x in (node.value, node.slice)]) rec = isinstance(node.ctx, ast.Load) and rec return rec and self.add(node) def visit_Name(self, node): if node in self.aliases: # params and store are not constants if not isinstance(node.ctx, ast.Load): return False # if we can alias on multiple value, it is not constant elif len(self.aliases[node]) > 1: return False # if it is not a globals, it depends on variable so it is not # constant elif node.id not in self.globals: return False # if it is defined in the current function, it is not constant elif node.id in self.locals[node]: return False def is_function(x): return isinstance(x, (FunctionIntr, ast.FunctionDef, ast.alias)) pure_fun = all(alias in self.pure_expressions and is_function(alias) for alias in self.aliases[node]) return pure_fun else: return False def visit_Attribute(self, node): return Aliases.access_path(node).isconst() and self.add(node) def visit_Dict(self, node): rec = all([self.visit(x) for x in (node.keys + node.values)]) return rec and self.add(node) def visit_List(self, node): return all([self.visit(x) for x in node.elts]) and self.add(node) visit_Tuple = visit_List visit_Set = visit_List def visit_Slice(self, node): return all([x is None or self.visit(x) for x in (node.lower, node.upper, node.step)]) and self.add(node) def visit_Index(self, node): return self.visit(node.value) and self.add(node) pythran-0.10.0+ds2/pythran/analyses/dependencies.py000066400000000000000000000112271416264035500222560ustar00rootroot00000000000000""" Dependencies lists the functions and types required by a function """ from pythran.passmanager import ModuleAnalysis from pythran.conversion import demangle import gast as ast import math class Dependencies(ModuleAnalysis): OpMap = { # binop ast.Add: ('operator', 'add'), ast.Sub: ('operator', 'sub'), ast.Mult: ('operator', 'mul'), ast.Div: ('operator', 'div'), ast.Mod: ('operator', 'mod'), ast.Pow: ('operator', 'pow'), ast.LShift: ('operator', 'lshift'), ast.RShift: ('operator', 'rshift'), ast.BitOr: ('operator', 'or_'), ast.BitXor: ('operator', 'xor_'), ast.BitAnd: ('operator', 'and_'), ast.MatMult: ('operator', 'matmul'), ast.FloorDiv: ('operator', 'floordiv'), # unaryop ast.Invert: ('operator', 'invert'), ast.Not: ('operator', 'not_'), ast.UAdd: ('operator', 'pos'), ast.USub: ('operator', 'neg'), # cmpop ast.Eq: ('operator', 'eq'), ast.NotEq: ('operator', 'ne'), ast.Lt: ('operator', 'lt'), ast.LtE: ('operator', 'le'), ast.Gt: ('operator', 'gt'), ast.GtE: ('operator', 'ge'), ast.Is: ('operator', 'is_'), ast.IsNot: ('operator', 'is_not'), ast.In: ('operator', 'contains'), ast.NotIn: ('operator', 'contains'), } IOpMap = { ast.Add: ('operator', 'iadd'), ast.Sub: ('operator', 'isub'), ast.Mult: ('operator', 'imul'), ast.Div: ('operator', 'idiv'), ast.Mod: ('operator', 'imod'), ast.Pow: ('operator', 'ipow'), ast.LShift: ('operator', 'ilshift'), ast.RShift: ('operator', 'irshift'), ast.BitOr: ('operator', 'ior'), ast.BitXor: ('operator', 'ixor'), ast.BitAnd: ('operator', 'iand'), ast.MatMult: ('operator', 'imatmul'), ast.FloorDiv: ('operator', 'ifloordiv'), } def __init__(self): self.result = set() super(Dependencies, self).__init__() def visit_List(self, node): self.result.add(('builtins', 'list')) self.generic_visit(node) def visit_Tuple(self, node): self.result.add(('builtins', 'tuple')) self.generic_visit(node) def visit_Set(self, node): self.result.add(('builtins', 'set')) self.generic_visit(node) def visit_Dict(self, node): self.result.add(('builtins', 'dict')) self.generic_visit(node) def visit_Slice(self, node): self.result.add(('types', 'slice')) self.generic_visit(node) def visit_And(self, node): self.result.add(('builtins', 'pythran', 'and')) self.generic_visit(node) def visit_Or(self, node): self.result.add(('builtins', 'pythran', 'or')) self.generic_visit(node) def visit_BinOp(self, node): self.visit(node.left) self.result.add(Dependencies.OpMap[type(node.op)]) self.visit(node.right) def visit_UnaryOp(self, node): self.result.add(Dependencies.OpMap[type(node.op)]) self.visit(node.operand) def visit_Compare(self, node): self.visit(node.left) for op in node.ops: self.result.add(Dependencies.OpMap[type(op)]) for comparator in node.comparators: self.visit(comparator) def visit_AugAssign(self, node): self.visit(node.target) # because of the way type inference turns augassign into assign self.result.add(Dependencies.OpMap[type(node.op)]) self.result.add(Dependencies.IOpMap[type(node.op)]) self.visit(node.value) def visit_Print(self, node): self.result.add(('builtins', 'print')) self.generic_visit(node) def visit_Assert(self, node): self.result.add(('builtins', 'assert')) self.generic_visit(node) def visit_Yield(self, node): self.result.add(('utils', 'yield')) self.generic_visit(node) def visit_Constant(self, node): if node.value is None: self.result.add(('builtins', 'None')) elif isinstance(node.value, str): self.result.add(('types', 'str')) elif isinstance(node.value, complex): self.result.add(('types', 'complex')) elif math.isnan(node.value): self.result.add(('numpy', 'nan')) elif math.isinf(node.value): self.result.add(('numpy', 'inf')) def visit_Attribute(self, node): def rec(n): if isinstance(n, ast.Name): return demangle(n.id), elif isinstance(n, ast.Attribute): return rec(n.value) + (n.attr,) attr = rec(node) attr and self.result.add(attr) pythran-0.10.0+ds2/pythran/analyses/extended_syntax_check.py000066400000000000000000000137331416264035500241770ustar00rootroot00000000000000""" ExtendedSyntaxCheck performs various syntax checks on the pythran AST. """ from pythran.passmanager import ModuleAnalysis from pythran.analyses import StrictAliases, ArgumentEffects from pythran.syntax import PythranSyntaxError from pythran.intrinsic import ConstantIntr, FunctionIntr from pythran import metadata import gast as ast def is_global_constant(node): if isinstance(node, ConstantIntr): return True if not isinstance(node, ast.FunctionDef): return False return metadata.get(node.body[0], metadata.StaticReturn) def is_global(node): return (isinstance(node, (FunctionIntr, ast.FunctionDef)) or is_global_constant(node)) class ExtendedSyntaxCheck(ModuleAnalysis): """ Perform advanced syntax checking, based on strict aliases analysis: - is there a function redefinition? - is there a function call that does not match the called expression arity? - is there an operation that updates a global variable? """ def __init__(self): self.result = None self.update = False self.inassert = False self.functions = set() ModuleAnalysis.__init__(self, StrictAliases, ArgumentEffects) def check_global_with_side_effect(self, node, arg): if not isinstance(arg, ast.Call): return try: aliases = self.strict_aliases[arg.func] except KeyError: return for alias in aliases: if is_global_constant(alias): raise PythranSyntaxError( ("Cannot modify '{}': global variables are constant " "in pythran.").format(alias.name), arg.func) def visit_FunctionDef(self, node): if node.name in self.functions: raise PythranSyntaxError("Function {} redefined".format( node.name), node) else: self.functions.add(node.name) self.generic_visit(node) def check_assert_with_side_effect(self, node, arg): if self.inassert: raise PythranSyntaxError("Cannot call a function with side effect " "in an assert", node) def visit_Assert(self, node): self.inassert = True self.generic_visit(node) self.inassert = False def is_immutable_constant(self, node): if isinstance(node, ast.Constant): return True if isinstance(node, ast.Tuple): return all(self.is_immutable_constant(elt) for elt in node.elts) if isinstance(node, ast.UnaryOp): return self.is_immutable_constant(node.operand) if isinstance(node, ast.Call): target = getattr(node, 'func', node) try: aliases = self.strict_aliases[target] except KeyError: return False if not aliases: return False if all(is_global_constant(alias) for alias in aliases): return True if isinstance(node, ast.Attribute): target = getattr(node, 'func', node) try: aliases = self.strict_aliases[target] except KeyError: return False if not aliases: return False if all(is_global(alias) for alias in aliases): return True if isinstance(node, ast.Name): try: aliases = self.strict_aliases[node] except KeyError: return False if all(isinstance(alias, ast.FunctionDef) for alias in aliases): return True return False def visit_arguments(self, node): self.generic_visit(node) for arg_default in node.defaults: if not self.is_immutable_constant(arg_default): raise PythranSyntaxError( "Pythran does not support mutable default values. Use a " "`None' default and set the value at runtime instead.", arg_default) def visit_Call(self, node): self.generic_visit(node) func = node.func try: aliases = self.strict_aliases[func] except KeyError: raise PythranSyntaxError( "Call to unknown function `{}`, it's a trap!" .format(getattr(func, 'id', None) or func), node) argument_effects = set() for alias in aliases: # look for effect on arguments to prepare check on globals try: func_aes = self.argument_effects[alias] for i, effect in enumerate(func_aes): if effect: argument_effects.add(i) except KeyError: pass if not isinstance(alias, ast.FunctionDef): continue ubound = len(alias.args.args) lbound = ubound - len(alias.args.defaults) call_args_count = len(node.args) + len(node.keywords) if lbound <= call_args_count <= ubound: continue if lbound == ubound: msg = 'Invalid call to {}: expected {} arguments, got {}' msg = msg.format(alias.name, len(alias.args.args), len(node.args) ) else: msg = ('Invalid {} call: ' 'expected between {} and {} arguments, got {}') msg = msg.format(alias.name, lbound, ubound, len(node.args) ) raise PythranSyntaxError(msg, node) # check for effects on globals for i, arg in enumerate(node.args): if i not in argument_effects: continue self.check_global_with_side_effect(node, arg) self.check_assert_with_side_effect(node, arg) pythran-0.10.0+ds2/pythran/analyses/fixed_size_list.py000066400000000000000000000051401416264035500230110ustar00rootroot00000000000000""" Whether a list usage makes it a candidate for fized-size-list This could be a type information, but it seems easier to implement it that way """ from pythran.passmanager import FunctionAnalysis from pythran.tables import MODULES import gast as ast class FixedSizeList(FunctionAnalysis): def __init__(self): self.result = set() from pythran.analyses import Aliases, DefUseChains, Ancestors from pythran.analyses import ArgumentEffects super(FixedSizeList, self).__init__(Aliases, DefUseChains, Ancestors, ArgumentEffects) def is_fixed_size_list_def(self, node): if isinstance(node, ast.List): return True if not isinstance(node, ast.Call): return False return all(alias == MODULES['builtins']['list'] for alias in self.aliases[node.func]) def is_safe_call(self, node, index): func_aliases = list(self.aliases[node]) for alias in func_aliases: if isinstance(alias, ast.Call): if not self.is_safe_call(alias.args[0], index + len(alias.args) - 1): return False if alias in self.argument_effects: func_aes = self.argument_effects[alias] if func_aes[index]: return False return True def is_safe_use(self, use): parent = self.ancestors[use.node][-1] OK = ast.Subscript, ast.BinOp if isinstance(parent, OK): return True if isinstance(parent, ast.Call): n = parent.args.index(use.node) return self.is_safe_call(parent.func, n) return False def visit_Assign(self, node): self.generic_visit(node) if not self.is_fixed_size_list_def(node.value): return for target in node.targets: def_ = self.def_use_chains.chains[target] if any(not self.is_safe_use(u) for u in def_.users()): break if not isinstance(target, ast.Name): continue if len([d for d in self.def_use_chains.locals[self.ctx.function] if d.name() == target.id]) > 1: break else: self.result.add(node.value) def visit_Call(self, node): self.generic_visit(node) for i, arg in enumerate(node.args): if not self.is_fixed_size_list_def(arg): continue if self.is_safe_call(node.func, i): self.result.add(arg) pythran-0.10.0+ds2/pythran/analyses/global_declarations.py000066400000000000000000000022711416264035500236170ustar00rootroot00000000000000""" GlobalDeclarations gathers top-level declarations. """ from pythran.passmanager import ModuleAnalysis from beniget import DefUseChains class SilentDefUseChains(DefUseChains): def unbound_identifier(self, name, node): pass class GlobalDeclarations(ModuleAnalysis): """ Gather all kind of identifier defined at global scope. >>> import gast as ast >>> from pythran import passmanager >>> from pythran.analyses import GlobalDeclarations >>> node = ast.parse(''' ... import math ... import math as maths ... from math import cos ... c = 12 ... def foo(a): ... b = a + 1''') >>> pm = passmanager.PassManager("test") >>> sorted(pm.gather(GlobalDeclarations, node).keys()) ['c', 'cos', 'foo', 'math', 'maths'] """ def __init__(self): """ Result is an identifier with matching definition. """ self.result = dict() super(GlobalDeclarations, self).__init__() def visit_Module(self, node): """ Import module define a new variable name. """ duc = SilentDefUseChains() duc.visit(node) self.result = {d.name(): d.node for d in duc.locals[node]} pythran-0.10.0+ds2/pythran/analyses/global_effects.py000066400000000000000000000104641416264035500225710ustar00rootroot00000000000000""" GlobalEffects computes function effect on global state. """ from pythran.analyses.aliases import Aliases from pythran.analyses.global_declarations import GlobalDeclarations from pythran.passmanager import ModuleAnalysis from pythran.tables import MODULES from pythran.graph import DiGraph import pythran.intrinsic as intrinsic import gast as ast from functools import reduce class GlobalEffects(ModuleAnalysis): """Add a flag on each function that updates a global variable.""" class FunctionEffect(object): def __init__(self, node): self.func = node if isinstance(node, ast.FunctionDef): self.global_effect = False elif isinstance(node, intrinsic.Intrinsic): self.global_effect = node.global_effects elif isinstance(node, ast.alias): self.global_effect = False elif isinstance(node, str): self.global_effect = False elif isinstance(node, intrinsic.Class): self.global_effect = False elif isinstance(node, intrinsic.UnboundValueType): self.global_effect = True # conservative choice else: print(type(node), node) raise NotImplementedError def __init__(self): self.result = DiGraph() self.node_to_functioneffect = dict() super(GlobalEffects, self).__init__(Aliases, GlobalDeclarations) def prepare(self, node): """ Initialise globals effects as this analyse is inter-procedural. Initialisation done for Pythonic functions and default value set for user defined functions. """ super(GlobalEffects, self).prepare(node) def register_node(module): """ Recursively save globals effect for all functions. """ for v in module.values(): if isinstance(v, dict): # Submodule case register_node(v) else: fe = GlobalEffects.FunctionEffect(v) self.node_to_functioneffect[v] = fe self.result.add_node(fe) if isinstance(v, intrinsic.Class): register_node(v.fields) register_node(self.global_declarations) for module in MODULES.values(): register_node(module) self.node_to_functioneffect[intrinsic.UnboundValue] = \ GlobalEffects.FunctionEffect(intrinsic.UnboundValue) def run(self, node): result = super(GlobalEffects, self).run(node) keep_going = True while keep_going: keep_going = False for function in result: if function.global_effect: for pred in self.result.predecessors(function): if not pred.global_effect: keep_going = pred.global_effect = True self.result = {f.func for f in result if f.global_effect} return self.result def visit_FunctionDef(self, node): self.current_function = self.node_to_functioneffect[node] assert self.current_function in self.result self.generic_visit(node) def visit_Print(self, _): self.current_function.global_effect = True def visit_Call(self, node): # try to get all aliases of the function, if possible # else use [] as a fallback func_aliases = self.aliases[node.func] # expand argument if any func_aliases = reduce( # all funcs lambda x, y: x + (list(self.node_to_functioneffect.keys()) if isinstance(y, ast.Name) else [y]), func_aliases, list()) for func_alias in func_aliases: # special hook for bound functions if isinstance(func_alias, ast.Call): fake_call = ast.Call(func_alias.args[0], func_alias.args[1:], []) self.visit(fake_call) continue # conservative choice if func_alias not in self.node_to_functioneffect: func_alias = intrinsic.UnboundValue func_alias = self.node_to_functioneffect[func_alias] self.result.add_edge(self.current_function, func_alias) self.generic_visit(node) pythran-0.10.0+ds2/pythran/analyses/globals_analysis.py000066400000000000000000000007101416264035500231510ustar00rootroot00000000000000""" Globals computes the value of globals(). """ from pythran.analyses.global_declarations import GlobalDeclarations from pythran.passmanager import ModuleAnalysis class Globals(ModuleAnalysis): def __init__(self): self.result = set() super(Globals, self).__init__(GlobalDeclarations) def visit_Module(self, node): self.result = {'builtins', '__dispatch__'}.union(self.global_declarations.keys()) pythran-0.10.0+ds2/pythran/analyses/has_return.py000066400000000000000000000017211416264035500220000ustar00rootroot00000000000000""" HasReturn detects if there's a return or yield statement HasBreak detects if there's a break statement HasContinue detects if there's a continue statement """ from pythran.passmanager import NodeAnalysis class HasReturn(NodeAnalysis): def __init__(self): self.result = False super(HasReturn, self).__init__() def visit_Return(self, _): self.result = True def visit_Yield(self, _): self.result = True class HasBreak(NodeAnalysis): def __init__(self): self.result = False super(HasBreak, self).__init__() def visit_For(self, _): return visit_While = visit_For def visit_Break(self, _): self.result = True class HasContinue(NodeAnalysis): def __init__(self): self.result = False super(HasContinue, self).__init__() def visit_For(self, _): return visit_While = visit_For def visit_Continue(self, _): self.result = True pythran-0.10.0+ds2/pythran/analyses/identifiers.py000066400000000000000000000013061416264035500221320ustar00rootroot00000000000000""" Identifiers gathers all identifiers used in a node """ from pythran.passmanager import NodeAnalysis class Identifiers(NodeAnalysis): """Gather all identifiers used throughout a node.""" def __init__(self): self.result = set() super(Identifiers, self).__init__() def visit_Name(self, node): self.result.add(node.id) def visit_FunctionDef(self, node): self.result.add(node.name) self.generic_visit(node) def visit_ImportFrom(self, node): self.generic_visit(node) self.result.add(node.module) def visit_alias(self, node): self.result.add(node.name) if node.asname: self.result.add(node.asname) pythran-0.10.0+ds2/pythran/analyses/immediates.py000066400000000000000000000022621416264035500217500ustar00rootroot00000000000000""" Immediates gathers immediates. For now, only integers within shape are considered as immediates """ import gast as ast from pythran.tables import MODULES from pythran.analyses import Aliases from pythran.passmanager import NodeAnalysis from pythran.utils import pythran_builtin, isnum _make_shape = pythran_builtin('make_shape') class Immediates(NodeAnalysis): def __init__(self): self.result = set() super(Immediates, self).__init__(Aliases) def visit_Call(self, node): func_aliases = self.aliases[node.func] for alias in func_aliases: if getattr(alias, "immediate_arguments", []): for i, arg in enumerate(node.args): if i in alias.immediate_arguments: if isinstance(arg, ast.Constant): self.result.add(arg) if len(func_aliases) == 1 and next(iter(func_aliases)) is _make_shape: self.result.update(a for a in node.args if isnum(a) and isinstance(a.value, int) and a.value >= 0) return return self.generic_visit(node) pythran-0.10.0+ds2/pythran/analyses/imported_ids.py000066400000000000000000000066341416264035500223200ustar00rootroot00000000000000""" ImportedIds gathers identifiers imported by a node. """ from pythran.analyses.globals_analysis import Globals from pythran.analyses.locals_analysis import Locals from pythran.passmanager import NodeAnalysis import pythran.metadata as md import gast as ast class ImportedIds(NodeAnalysis): """Gather ids referenced by a node and not declared locally.""" def __init__(self): self.result = set() self.current_locals = set() self.is_list = False self.in_augassign = False super(ImportedIds, self).__init__(Globals, Locals) def visit_Name(self, node): if isinstance(node.ctx, ast.Store) and not self.in_augassign: self.current_locals.add(node.id) elif (node.id not in self.visible_globals and node.id not in self.current_locals): self.result.add(node.id) def visit_FunctionDef(self, node): self.current_locals.add(node.name) current_locals = self.current_locals.copy() self.current_locals.update(arg.id for arg in node.args.args) for stmt in node.body: self.visit(stmt) self.current_locals = current_locals def visit_AnyComp(self, node): current_locals = self.current_locals.copy() for generator in node.generators: self.visit(generator) self.visit(node.elt) self.current_locals = current_locals visit_ListComp = visit_AnyComp visit_SetComp = visit_AnyComp visit_DictComp = visit_AnyComp visit_GeneratorExp = visit_AnyComp def visit_Assign(self, node): # order matter as an assignation # is evaluated before being assigned md.visit(self, node) self.visit(node.value) for target in node.targets: self.visit(target) def visit_AugAssign(self, node): self.in_augassign = True self.generic_visit(node) self.in_augassign = False def visit_Lambda(self, node): current_locals = self.current_locals.copy() self.current_locals.update(arg.id for arg in node.args.args) self.visit(node.body) self.current_locals = current_locals def visit_Import(self, node): self.current_locals.update(alias.name for alias in node.names) def visit_StoredTuple(self, node): for elt in node.elts: if isinstance(elt, ast.Name): self.current_locals.add(elt.id) continue if isinstance(elt, ast.Subscript): self.visit(elt) if isinstance(elt, ast.Tuple): self.visit_StoredTuple(node) def visit_Tuple(self, node): if isinstance(node.ctx, ast.Load): self.generic_visit(node) else: self.visit_StoredTuple(node) visit_List = visit_Tuple def visit_ImportFrom(self, node): self.current_locals.update(alias.name for alias in node.names) def visit_Attribute(self, node): pass def prepare(self, node): super(ImportedIds, self).prepare(node) if self.is_list: # so that this pass can be called on list node = node.body[0] self.visible_globals = set(self.globals) - self.locals[node] def run(self, node): if isinstance(node, list): # so that this pass can be called on list self.is_list = True node = ast.If(ast.Constant(1, None), node, []) return super(ImportedIds, self).run(node) pythran-0.10.0+ds2/pythran/analyses/inlinable.py000066400000000000000000000023051416264035500215620ustar00rootroot00000000000000""" Inlinable list function that may be inlined. """ from pythran.passmanager import ModuleAnalysis from pythran.analyses import Identifiers from pythran.analyses.pure_expressions import PureExpressions import pythran.metadata as metadata import gast as ast import copy class Inlinable(ModuleAnalysis): """ Determine set of inlinable function. A function can be inlined if it has only one statement and doesn't recurse on itself. """ def __init__(self): self.result = dict() super(Inlinable, self).__init__(PureExpressions) def visit_FunctionDef(self, node): """ Determine this function definition can be inlined. """ if len(node.body) != 1: return sbody = node.body[0] if not isinstance(sbody, (ast.Call, ast.Return)): return # only consider static return if they are pure if metadata.get(sbody, metadata.StaticReturn): if sbody not in self.pure_expressions: return ids = self.gather(Identifiers, sbody) # FIXME : It mark "not inlinable" def foo(foo): return foo if node.name not in ids: self.result[node.name] = copy.deepcopy(node) pythran-0.10.0+ds2/pythran/analyses/is_assigned.py000066400000000000000000000017331416264035500221210ustar00rootroot00000000000000""" Gathers variables that have value modification in the given node. """ from pythran.passmanager import NodeAnalysis import gast as ast class IsAssigned(NodeAnalysis): """ Gather variable that change in given node. It doesn't check constness as it is use for integer so we don't care about arguments effects as it is use by value. """ def __init__(self): """ Basic initialiser. """ self.result = list() super(IsAssigned, self).__init__() def visit_Name(self, node): """ Stored variable have new value. """ if isinstance(node.ctx, ast.Store): self.result.append(node) def visit_Tuple(self, node): if isinstance(node.ctx, ast.Store): def rec(n): if isinstance(n, ast.Name): self.result.append(n) elif isinstance(n, ast.Tuple): for elt in n.elts: rec(elt) rec(node) pythran-0.10.0+ds2/pythran/analyses/lazyness_analysis.py000066400000000000000000000340231416264035500234020ustar00rootroot00000000000000""" LazynessAnalysis returns number of time a name is use. """ from pythran.analyses.aliases import Aliases from pythran.analyses.argument_effects import ArgumentEffects from pythran.analyses.identifiers import Identifiers from pythran.analyses.pure_expressions import PureExpressions from pythran.passmanager import FunctionAnalysis from pythran.syntax import PythranSyntaxError from pythran.utils import get_variable, isattr import pythran.metadata as md import pythran.openmp as openmp import gast as ast import sys class LazynessAnalysis(FunctionAnalysis): """ Returns number of time a name is used. +inf if it is use in a loop, if a variable used to compute it is modify before its last use or if it is use in a function call (as it is not an interprocedural analysis) >>> import gast as ast, sys >>> from pythran import passmanager, backend >>> code = "def foo(): c = 1; a = c + 2; c = 2; b = c + c + a; return b" >>> node = ast.parse(code) >>> pm = passmanager.PassManager("test") >>> res = pm.gather(LazynessAnalysis, node) >>> res['a'], res['b'], res['c'] (inf, 1, 2) >>> code = ''' ... def foo(): ... k = 2 ... for i in [1, 2]: ... builtins.print(k) ... k = i ... builtins.print(k)''' >>> node = ast.parse(code) >>> res = pm.gather(LazynessAnalysis, node) >>> (res['i'], res['k']) == (sys.maxsize, 1) True >>> code = ''' ... def foo(): ... k = 2 ... for i in [1, 2]: ... builtins.print(k) ... k = i ... builtins.print(k)''' >>> node = ast.parse(code) >>> res = pm.gather(LazynessAnalysis, node) >>> (res['i'], res['k']) == (sys.maxsize, 2) True >>> code = ''' ... def foo(): ... d = 0 ... for i in [0, 1]: ... for j in [0, 1]: ... k = 1 ... d += k * 2 ... return d''' >>> node = ast.parse(code) >>> res = pm.gather(LazynessAnalysis, node) >>> res['k'] 1 >>> code = ''' ... def foo(): ... k = 2 ... for i in [1, 2]: ... builtins.print(k)''' >>> node = ast.parse(code) >>> res = pm.gather(LazynessAnalysis, node) >>> res['k'] == sys.maxsize True >>> code = ''' ... def foo(): ... k = builtins.sum ... builtins.print(k([1, 2]))''' >>> node = ast.parse(code) >>> res = pm.gather(LazynessAnalysis, node) >>> res['k'] 1 """ INF = float('inf') MANY = sys.maxsize def __init__(self): # map variable with maximum count of use in the programm self.result = dict() # map variable with current count of use self.name_count = dict() # map variable to variables needed to compute it self.use = dict() # gather variables which can't be compute later. (variables used # to compute it have changed self.dead = set() # count use of variable before first assignation in the loop # {variable: (count, is_assigned)} self.pre_loop_count = dict() # prevent any form of Forward Substitution at omp frontier self.in_omp = set() self.name_to_nodes = dict() super(LazynessAnalysis, self).__init__(ArgumentEffects, Aliases, PureExpressions) def modify(self, name): # if we modify a variable, all variables that needed it # to be compute are dead and its aliases too dead_vars = [var for var, deps in self.use.items() if name in deps] self.dead.update(dead_vars) for var in dead_vars: dead_aliases = [alias.id for alias in self.name_to_nodes[var] if isinstance(alias, ast.Name)] self.dead.update(dead_aliases) def assign_to(self, node, from_): if isinstance(node, ast.Name): self.name_to_nodes.setdefault(node.id, set()).add(node) # a reassigned variable is not dead anymore if node.id in self.dead: self.dead.remove(node.id) # we keep the bigger possible number of use self.result[node.id] = max(self.result.get(node.id, 0), self.name_count.get(node.id, 0)) # assign variable don't come from before omp pragma anymore self.in_omp.discard(node.id) # count number of use in the loop before first reassign pre_loop = self.pre_loop_count.setdefault(node.id, (0, True)) if not pre_loop[1]: self.pre_loop_count[node.id] = (pre_loop[0], True) # note this variable as modified self.modify(node.id) # prepare a new variable count self.name_count[node.id] = 0 self.use[node.id] = set(from_) def visit(self, node): old_omp = self.in_omp omp_nodes = md.get(node, openmp.OMPDirective) if omp_nodes: self.in_omp = set(self.name_count.keys()) super(LazynessAnalysis, self).visit(node) if omp_nodes: new_nodes = set(self.name_count).difference(self.in_omp) for omp_node in omp_nodes: for n in omp_node.deps: if isinstance(n, ast.Name): self.result[n.id] = LazynessAnalysis.INF self.dead.update(new_nodes) self.in_omp = old_omp def visit_FunctionDef(self, node): self.ids = self.gather(Identifiers, node) self.generic_visit(node) def visit_Assign(self, node): md.visit(self, node) self.visit(node.value) ids = self.gather(Identifiers, node.value) for target in node.targets: if isinstance(target, ast.Name): self.assign_to(target, ids) if node.value not in self.pure_expressions: self.result[target.id] = LazynessAnalysis.INF elif isinstance(target, (ast.Subscript)) or isattr(target): # if we modify just a part of a variable, it can't be lazy var_name = get_variable(target) if isinstance(var_name, ast.Name): # variable is modified so other variables that use it dies self.modify(var_name.id) # and this variable can't be lazy self.result[var_name.id] = LazynessAnalysis.INF else: raise PythranSyntaxError("Assign to unknown node", node) def visit_AugAssign(self, node): md.visit(self, node) # augassigned variable can't be lazy self.visit(node.value) if isinstance(node.target, ast.Name): # variable is modified so other variables that use it dies self.modify(node.target.id) # and this variable can't be lazy self.result[node.target.id] = LazynessAnalysis.INF elif isinstance(node.target, ast.Subscript) or isattr(node.target): var_name = get_variable(node.target) # variable is modified so other variables that use it dies self.modify(var_name.id) # and this variable can't be lazy self.result[var_name.id] = LazynessAnalysis.INF else: raise PythranSyntaxError("AugAssign to unknown node", node) def visit_Name(self, node): if isinstance(node.ctx, ast.Load) and node.id in self.use: # we only care about variable local to the function def is_loc_var(x): return isinstance(x, ast.Name) and x.id in self.ids alias_names = [var for var in self.aliases[node] if is_loc_var(var)] alias_names = {x.id for x in alias_names} alias_names.add(node.id) for alias in alias_names: if (node.id in self.dead or node.id in self.in_omp): self.result[alias] = LazynessAnalysis.INF elif alias in self.name_count: self.name_count[alias] += 1 # init value as pre_use variable and count it pre_loop = self.pre_loop_count.setdefault(alias, (0, False)) if not pre_loop[1]: self.pre_loop_count[alias] = (pre_loop[0] + 1, False) else: # a variable may alias to assigned value (with a = b, 'b' # alias on 'a' as modifying 'a' will modify 'b' too) pass elif isinstance(node.ctx, ast.Param): self.name_count[node.id] = 0 self.use[node.id] = set() elif isinstance(node.ctx, ast.Store): # Store is only for exception self.name_count[node.id] = LazynessAnalysis.INF self.use[node.id] = set() else: # we ignore globals pass def visit_If(self, node): md.visit(self, node) self.visit(node.test) old_count = dict(self.name_count) old_dead = set(self.dead) old_deps = {a: set(b) for a, b in self.use.items()} # wrap body in a list if we come from an ifExp body = node.body if isinstance(node.body, list) else [node.body] for stmt in body: self.visit(stmt) mid_count = self.name_count mid_dead = self.dead mid_deps = self.use self.name_count = old_count self.dead = old_dead self.use = old_deps # wrap orelse in a list if we come from an ifExp orelse = (node.orelse if isinstance(node.orelse, list) else [node.orelse]) for stmt in orelse: self.visit(stmt) # merge use variable for key in self.use: if key in mid_deps: self.use[key].update(mid_deps[key]) for key in mid_deps: if key not in self.use: self.use[key] = set(mid_deps[key]) # value is the worse case of both branches names = set(self.name_count.keys()).union(mid_count.keys()) for name in names: val_body = mid_count.get(name, 0) val_else = self.name_count.get(name, 0) self.name_count[name] = max(val_body, val_else) # dead var are still dead self.dead.update(mid_dead) visit_IfExp = visit_If def visit_loop(self, body): # we start a new loop so we init the "at start of loop use" counter old_pre_count = self.pre_loop_count self.pre_loop_count = dict() # do visit body for stmt in body: self.visit(stmt) # variable use in loop but not assigned are no lazy no_assign = [n for n, (_, a) in self.pre_loop_count.items() if not a] self.result.update(zip(no_assign, [LazynessAnalysis.MANY] * len(no_assign))) # lazyness value is the max of previous lazyness and lazyness for one # iteration in the loop for k, v in self.pre_loop_count.items(): loop_value = v[0] + self.name_count[k] self.result[k] = max(self.result.get(k, 0), loop_value) # variable dead at the end of the loop but use at the beginning of it # can't be lazy dead = self.dead.intersection(self.pre_loop_count) self.result.update(zip(dead, [LazynessAnalysis.INF] * len(dead))) # merge previous count of "use at start of loop" and current state. for k, v in old_pre_count.items(): if v[1] or k not in self.pre_loop_count: self.pre_loop_count[k] = v else: self.pre_loop_count[k] = (v[0] + self.pre_loop_count[k][0], self.pre_loop_count[k][1]) def visit_For(self, node): md.visit(self, node) ids = self.gather(Identifiers, node.iter) if isinstance(node.target, ast.Name): self.assign_to(node.target, ids) self.result[node.target.id] = LazynessAnalysis.INF else: err = "Assignation in for loop not to a Name" raise PythranSyntaxError(err, node) self.visit_loop(node.body) for stmt in node.orelse: self.visit(stmt) def visit_While(self, node): md.visit(self, node) self.visit(node.test) self.visit_loop(node.body) for stmt in node.orelse: self.visit(stmt) def func_args_lazyness(self, func_name, args, node): for fun in self.aliases[func_name]: if isinstance(fun, ast.Call): # call to partial functions self.func_args_lazyness(fun.args[0], fun.args[1:] + args, node) elif fun in self.argument_effects: # when there is an argument effect, apply "modify" to the arg for i, arg in enumerate(self.argument_effects[fun]): # check len of args as default is 11 args if arg and len(args) > i: if isinstance(args[i], ast.Name): self.modify(args[i].id) elif isinstance(fun, ast.Name): # it may be a variable to a function. Lazyness will be compute # correctly thanks to aliasing continue else: # conservative choice for arg in args: self.modify(arg) def visit_Call(self, node): """ Compute use of variables in a function call. Each arg is use once and function name too. Information about modified arguments is forwarded to func_args_lazyness. """ md.visit(self, node) for arg in node.args: self.visit(arg) self.func_args_lazyness(node.func, node.args, node) self.visit(node.func) def run(self, node): result = super(LazynessAnalysis, self).run(node) # update result with last name_count values for name, val in self.name_count.items(): old_val = result.get(name, 0) result[name] = max(old_val, val) self.result = result return self.result pythran-0.10.0+ds2/pythran/analyses/literals.py000066400000000000000000000014031416264035500214420ustar00rootroot00000000000000""" Literals lists nodes that are only literals """ from pythran.passmanager import FunctionAnalysis import gast as ast class Literals(FunctionAnalysis): """ Store variable that save only Literals (with no construction cost) """ def __init__(self): self.result = set() super(Literals, self).__init__() def visit_Assign(self, node): # list, dict, set and other are not considered as Literals as they have # a constructor which may be costly and they can be updated using # function call if isinstance(node.value, (ast.Constant, ast.Lambda)): targets = [target for target in node.targets if isinstance(target, ast.Name)] self.result.update(targets) pythran-0.10.0+ds2/pythran/analyses/local_declarations.py000066400000000000000000000042301416264035500234460ustar00rootroot00000000000000""" LocalNameDeclarations gathers name of declarations local to a node. LocalNodeDeclarations gathers node of declarations local to a node. """ from pythran.passmanager import NodeAnalysis import gast as ast class LocalNodeDeclarations(NodeAnalysis): """ Gathers all local symbols from a function. It should not be use from outside a function, but can be used on a function (but in that case, parameters are not taken into account) >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(a): ... b = a + 1''') >>> pm = passmanager.PassManager("test") >>> [name.id for name in pm.gather(LocalNodeDeclarations, node)] ['b'] >>> node = ast.parse(''' ... for c in range(n): ... b = a + 1''') >>> pm = passmanager.PassManager("test") >>> sorted([name.id for name in pm.gather(LocalNodeDeclarations, node)]) ['b', 'c'] """ def __init__(self): """ Initialize empty set as the result. """ self.result = set() super(LocalNodeDeclarations, self).__init__() def visit_Name(self, node): """ Any node with Store context is a new declaration. """ if isinstance(node.ctx, ast.Store): self.result.add(node) class LocalNameDeclarations(NodeAnalysis): """ Gathers all local identifiers from a node. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(a): ... b = a + 1''') >>> pm = passmanager.PassManager("test") >>> sorted(pm.gather(LocalNameDeclarations, node)) ['a', 'b', 'foo'] """ def __init__(self): """ Initialize empty set as the result. """ self.result = set() super(LocalNameDeclarations, self).__init__() def visit_Name(self, node): """ Any node with Store or Param context is a new identifier. """ if isinstance(node.ctx, (ast.Store, ast.Param)): self.result.add(node.id) def visit_FunctionDef(self, node): """ Function name is a possible identifier. """ self.result.add(node.name) self.generic_visit(node) pythran-0.10.0+ds2/pythran/analyses/locals_analysis.py000066400000000000000000000075471416264035500230220ustar00rootroot00000000000000""" Locals computes the value of locals() """ from pythran.passmanager import ModuleAnalysis import pythran.metadata as md import gast as ast class Locals(ModuleAnalysis): """ Statically compute the value of locals() before each statement Yields a dictionary binding every node to the set of variable names defined *before* this node. Following snippet illustrates its behavior: >>> import gast as ast >>> from pythran import passmanager >>> pm = passmanager.PassManager('test') >>> code = ''' ... def b(n): ... m = n + 1 ... def b(n): ... return n + 1 ... return b(m)''' >>> tree = ast.parse(code) >>> l = pm.gather(Locals, tree) >>> sorted(l[tree.body[0].body[0]]) ['n'] >>> sorted(l[tree.body[0].body[1]]) ['b', 'm', 'n'] """ def __init__(self): self.result = dict() self.locals = set() self.nesting = 0 super(Locals, self).__init__() def generic_visit(self, node): super(Locals, self).generic_visit(node) if node not in self.result: self.result[node] = self.result[self.expr_parent] def store_and_visit(self, node): self.expr_parent = node self.result[node] = self.locals.copy() self.generic_visit(node) def visit_Module(self, node): self.expr_parent = node self.result[node] = self.locals self.generic_visit(node) def visit_FunctionDef(self, node): # special case for nested functions if self.nesting: self.locals.add(node.name) self.nesting += 1 self.expr_parent = node self.result[node] = self.locals.copy() parent_locals = self.locals.copy() for default in node.args.defaults: self.visit(default) for arg in node.args.args: if arg.annotation: self.visit(arg.annotation) if node.returns: self.visit(node.returns) self.locals.update(arg.id for arg in node.args.args) for stmt in node.body: self.visit(stmt) self.locals = parent_locals self.nesting -= 1 def visit_Assign(self, node): self.expr_parent = node self.result[node] = self.locals.copy() md.visit(self, node) self.visit(node.value) self.locals.update(t.id for t in node.targets if isinstance(t, ast.Name)) for target in node.targets: self.visit(target) def visit_For(self, node): self.expr_parent = node self.result[node] = self.locals.copy() md.visit(self, node) self.visit(node.iter) self.locals.add(node.target.id) for stmt in node.body: self.visit(stmt) for stmt in node.orelse: self.visit(stmt) def visit_Import(self, node): self.result[node] = self.locals.copy() self.locals.update(alias.name for alias in node.names) def visit_ImportFrom(self, node): self.result[node] = self.locals.copy() self.locals.update(alias.name for alias in node.names) def visit_ExceptHandler(self, node): self.expr_parent = node self.result[node] = self.locals.copy() if node.name: self.locals.add(node.name.id) node.type and self.visit(node.type) for stmt in node.body: self.visit(stmt) # statements that do not define a new variable visit_Return = store_and_visit visit_Yield = store_and_visit visit_Try = store_and_visit visit_AugAssign = store_and_visit visit_Print = store_and_visit visit_While = store_and_visit visit_If = store_and_visit visit_Raise = store_and_visit visit_Assert = store_and_visit visit_Expr = store_and_visit visit_Pass = store_and_visit visit_Break = store_and_visit visit_Continue = store_and_visit pythran-0.10.0+ds2/pythran/analyses/node_count.py000066400000000000000000000013621416264035500217640ustar00rootroot00000000000000""" NodeCount counts the number of nodes in a node """ from pythran.passmanager import NodeAnalysis class NodeCount(NodeAnalysis): """ Count the number of nodes included in a node This has nothing to do with execution time or whatever, its mainly use is to prevent the AST from growing too much when unrolling >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("if 1: return 3") >>> pm = passmanager.PassManager("test") >>> print(pm.gather(NodeCount, node)) 5 """ def __init__(self): self.result = 0 super(NodeCount, self).__init__() def generic_visit(self, node): self.result += 1 super(NodeCount, self).generic_visit(node) pythran-0.10.0+ds2/pythran/analyses/optimizable_comprehension.py000066400000000000000000000017601416264035500251010ustar00rootroot00000000000000""" OptimizableComp finds whether a comprehension can be optimized. """ from pythran.analyses.identifiers import Identifiers from pythran.passmanager import NodeAnalysis class OptimizableComprehension(NodeAnalysis): """Find whether a comprehension can be optimized.""" def __init__(self): self.result = set() super(OptimizableComprehension, self).__init__(Identifiers) def check_comprehension(self, iters): targets = {gen.target.id for gen in iters} optimizable = True for it in iters: ids = self.gather(Identifiers, it) optimizable &= all(((ident == it.target.id) | (ident not in targets)) for ident in ids) return optimizable def visit_ListComp(self, node): if (self.check_comprehension(node.generators)): self.result.add(node) def visit_GeneratorExp(self, node): if (self.check_comprehension(node.generators)): self.result.add(node) pythran-0.10.0+ds2/pythran/analyses/ordered_global_declarations.py000066400000000000000000000035571416264035500253330ustar00rootroot00000000000000""" OrderedGlobalDeclarations orders all global functions. """ from pythran.analyses.aliases import StrictAliases from pythran.analyses.global_declarations import GlobalDeclarations from pythran.passmanager import ModuleAnalysis import gast as ast class OrderedGlobalDeclarations(ModuleAnalysis): '''Order all global functions according to their callgraph depth''' def __init__(self): self.result = dict() super(OrderedGlobalDeclarations, self).__init__( StrictAliases, GlobalDeclarations) def visit_FunctionDef(self, node): self.curr = node self.result[node] = set() self.generic_visit(node) def visit_Name(self, node): if node in self.strict_aliases: for alias in self.strict_aliases[node]: if isinstance(alias, ast.FunctionDef): self.result[self.curr].add(alias) elif isinstance(alias, ast.Call): # this is a bind for alias in self.strict_aliases[alias.args[0]]: if alias in self.global_declarations: self.result[self.curr].add(alias) def run(self, node): # compute the weight of each function # the weight of a function is the number functions it references result = super(OrderedGlobalDeclarations, self).run(node) old_count = -1 new_count = 0 # iteratively propagate weights while new_count != old_count: for v in result.values(): v.update(*[result[f] for f in v]) old_count = new_count new_count = sum(len(value) for value in result.values()) # return functions, the one with the greatest weight first self.result = sorted(self.result.keys(), reverse=True, key=lambda s: len(self.result[s])) return self.result pythran-0.10.0+ds2/pythran/analyses/parallel_maps.py000066400000000000000000000017071416264035500224460ustar00rootroot00000000000000""" ParallelMaps detects parallel map(...). """ from pythran.analyses.aliases import Aliases from pythran.analyses.pure_expressions import PureExpressions from pythran.passmanager import ModuleAnalysis from pythran.tables import MODULES class ParallelMaps(ModuleAnalysis): """Yields the est of maps that could be parallel.""" def __init__(self): self.result = set() super(ParallelMaps, self).__init__(PureExpressions, Aliases) def visit_Call(self, node): if all(alias == MODULES['builtins']['map'] for alias in self.aliases[node.func]): if all(f in self.pure_expressions for f in self.aliases[node.args[0]]): self.result.add(node) def display(self, data): for node in data: print("I:", "{0} {1}".format( "call to the `map' intrinsic could be parallel", "(line {0})".format(node.lineno) )) pythran-0.10.0+ds2/pythran/analyses/potential_iterator.py000066400000000000000000000021461416264035500235400ustar00rootroot00000000000000""" PotentialIterator finds if it is possible to use an iterator. """ from pythran.analyses.aliases import Aliases from pythran.analyses.argument_read_once import ArgumentReadOnce from pythran.passmanager import NodeAnalysis import gast as ast class PotentialIterator(NodeAnalysis): """Find whether an expression can be replaced with an iterator.""" def __init__(self): self.result = set() NodeAnalysis.__init__(self, Aliases, ArgumentReadOnce) def visit_For(self, node): self.result.add(node.iter) self.generic_visit(node) def visit_Compare(self, node): if isinstance(node.ops[0], (ast.In, ast.NotIn)): self.result.update(node.comparators) self.generic_visit(node) def visit_Call(self, node): for i, arg in enumerate(node.args): def isReadOnce(f, i): return (f in self.argument_read_once and self.argument_read_once[f][i] <= 1) if all(isReadOnce(alias, i) for alias in self.aliases[node.func]): self.result.add(arg) self.generic_visit(node) pythran-0.10.0+ds2/pythran/analyses/pure_expressions.py000066400000000000000000000050431416264035500232440ustar00rootroot00000000000000""" PureExpressions detects functions without side-effects. """ from pythran.analyses.aliases import Aliases from pythran.analyses.argument_effects import ArgumentEffects from pythran.analyses.global_effects import GlobalEffects from pythran.passmanager import ModuleAnalysis from pythran.intrinsic import Intrinsic import gast as ast class PureExpressions(ModuleAnalysis): '''Yields the set of pure expressions''' def __init__(self): self.result = set() super(PureExpressions, self).__init__(ArgumentEffects, GlobalEffects, Aliases) def visit_FunctionDef(self, node): # do not visit arguments for stmt in node.body: self.visit(stmt) # Pure functions are already compute, we don't need to add them again return False def generic_visit(self, node): is_pure = all([self.visit(x) for x in ast.iter_child_nodes(node)]) if is_pure: self.result.add(node) return is_pure def visit_Call(self, node): # check if all arguments are Pures is_pure = all([self.visit(arg) for arg in node.args]) # check all possible function called func_aliases = self.aliases[node.func] if func_aliases: for func_alias in func_aliases: # does the function have a global effect? if isinstance(func_alias, Intrinsic): is_pure &= not func_alias.global_effects else: is_pure &= func_alias in self.result # does the function have an argument effect ? # trivial arguments can be ignored if func_alias in self.argument_effects: func_aes = self.argument_effects[func_alias] for arg, ae in zip(node.args, func_aes): if ae: try: ast.literal_eval(arg) except ValueError: is_pure = False else: is_pure = False else: is_pure = False # conservative choice # check for chained call is_pure &= self.visit(node.func) if is_pure: self.result.add(node) return is_pure def prepare(self, node): super(PureExpressions, self).prepare(node) self.result = {func for func, ae in self.argument_effects.items() if func not in self.global_effects and not any(ae)} pythran-0.10.0+ds2/pythran/analyses/range_values.py000066400000000000000000001161261416264035500223070ustar00rootroot00000000000000""" Module Analysing code to extract positive subscripts from code. """ # TODO check bound of while and if for more accurate values. import gast as ast from collections import defaultdict from contextlib import contextmanager from functools import reduce from pythran.analyses import Aliases, CFG from pythran.intrinsic import Intrinsic from pythran.passmanager import ModuleAnalysis from pythran.interval import Interval, IntervalTuple, UNKNOWN_RANGE from pythran.tables import MODULES, attributes class UnsupportedExpression(NotImplementedError): pass class RangeValueTooCostly(RuntimeError): pass def combine(op, node0, node1): key = '__{}__'.format(op.__class__.__name__.lower()) try: return getattr(type(node0), key)(node0, node1) except AttributeError: return UNKNOWN_RANGE def negate(node): if isinstance(node, ast.Name): # Not type info, could be anything :( raise UnsupportedExpression() if isinstance(node, ast.UnaryOp): # !~x <> ~x == 0 <> x == ~0 <> x == -1 if isinstance(node.op, ast.Invert): return ast.Compare(node.operand, [ast.Eq()], [ast.Constant(-1, None)]) # !!x <> x if isinstance(node.op, ast.Not): return node.operand # !+x <> +x == 0 <> x == 0 <> !x if isinstance(node.op, ast.UAdd): return node.operand # !-x <> -x == 0 <> x == 0 <> !x if isinstance(node.op, ast.USub): return node.operand if isinstance(node, ast.BoolOp): new_values = [ast.UnaryOp(ast.Not(), v) for v in node.values] # !(x or y) <> !x and !y if isinstance(node.op, ast.Or): return ast.BoolOp(ast.And(), new_values) # !(x and y) <> !x or !y if isinstance(node.op, ast.And): return ast.BoolOp(ast.Or(), new_values) if isinstance(node, ast.Compare): cmps = [ast.Compare(x, [negate(o)], [y]) for x, o, y in zip([node.left] + node.comparators[:-1], node.ops, node.comparators)] if len(cmps) == 1: return cmps[0] return ast.BoolOp(ast.Or(), cmps) if isinstance(node, ast.Eq): return ast.NotEq() if isinstance(node, ast.NotEq): return ast.Eq() if isinstance(node, ast.Gt): return ast.LtE() if isinstance(node, ast.GtE): return ast.Lt() if isinstance(node, ast.Lt): return ast.GtE() if isinstance(node, ast.LtE): return ast.Gt() if isinstance(node, ast.In): return ast.NotIn() if isinstance(node, ast.NotIn): return ast.In() if isinstance(node, ast.Attribute): if node.attr == 'False': return ast.Constant(True, None) if node.attr == 'True': return ast.Constant(False, None) raise UnsupportedExpression() def bound_range(mapping, aliases, node, modified=None): """ Bound the idenifier in `mapping' with the expression in `node'. `aliases' is the result of aliasing analysis and `modified' is updated with the set of identifiers possibly `bounded' as the result of the call. Returns `modified' or a fresh set of modified identifiers. """ if modified is None: modified = set() if isinstance(node, ast.Name): # could be anything not just an integral pass elif isinstance(node, ast.UnaryOp): try: negated = negate(node.operand) bound_range(mapping, aliases, negated, modified) except UnsupportedExpression: pass elif isinstance(node, ast.BoolOp): if isinstance(node.op, ast.And): for value in node.values: bound_range(mapping, aliases, value, modified) elif isinstance(node.op, ast.Or): mappings = [mapping.copy() for _ in node.values] for value, mapping_cpy in zip(node.values, mappings): bound_range(mapping_cpy, aliases, value, modified) for k in modified: mapping[k] = reduce(lambda x, y: x.union(y[k]), mappings[1:], mappings[0][k]) elif isinstance(node, ast.Compare): left = node.left if isinstance(node.left, ast.Name): modified.add(node.left.id) for op, right in zip(node.ops, node.comparators): if isinstance(right, ast.Name): modified.add(right.id) if isinstance(left, ast.Name): left_interval = mapping[left.id] else: left_interval = mapping[left] if isinstance(right, ast.Name): right_interval = mapping[right.id] else: right_interval = mapping[right] l_l, l_h = left_interval.low, left_interval.high r_l, r_h = right_interval.low, right_interval.high r_i = l_i = None if isinstance(op, ast.Eq): low, high = max(l_l, r_l), min(l_h, r_h) if low <= high: l_i = r_i = Interval(max(l_l, r_l), min(l_h, r_h)) elif isinstance(op, ast.Lt): # l < r => l.low < r.high & l.high < r.high l_i = Interval(min(l_l, r_h - 1), min(l_h, r_h - 1)) # l < r => r.low < l.low & r.high < l.low r_i = Interval(max(r_l, l_l + 1), max(r_h, l_l + 1)) elif isinstance(op, ast.LtE): # l <= r => l.low <= r.high & l.high <= r.high l_i = Interval(min(l_l, r_h), min(l_h, r_h)) # l <= r => r.low <= l.low & r.high <= l.low r_i = Interval(max(r_l, l_l), max(r_h, l_l)) elif isinstance(op, ast.Gt): # l > r => l.low > r.low & l.high > r.low l_i = Interval(max(l_l, r_l + 1), max(l_h, r_l + 1)) # l > r => r.low > l.high & r.high > l.high r_i = Interval(min(r_l, l_h - 1), min(r_h, l_h - 1)) elif isinstance(op, ast.GtE): # l >= r => l.high >= r.low & l.low >= r.low l_i = Interval(max(l_l, r_l), max(l_h, r_l)) # l >= r => r.low > l.high & r.high >= l.high r_i = Interval(min(r_l, l_h), min(r_h, l_h)) elif isinstance(op, ast.In): if isinstance(right, (ast.List, ast.Tuple, ast.Set)): if right.elts: low = min(mapping[elt].low for elt in right.elts) high = max(mapping[elt].high for elt in right.elts) l_i = Interval(low, high) elif isinstance(right, ast.Call): for alias in aliases[right.func]: if not hasattr(alias, 'return_range_content'): l_i = None break rrc = alias.return_range_content([mapping[arg] for arg in right.args]) if l_i is None: l_i = rrc else: l_i = l_i.union(alias.return_range(right)) if l_i is not None and isinstance(left, ast.Name): mapping[left.id] = l_i if r_i is not None and isinstance(right, ast.Name): mapping[right.id] = r_i left = right class RangeValuesBase(ModuleAnalysis): ResultHolder = object() def __init__(self): """Initialize instance variable and gather globals name information.""" self.result = defaultdict(lambda: UNKNOWN_RANGE) from pythran.analyses import UseOMP super(RangeValuesBase, self).__init__(Aliases, CFG, UseOMP) self.parent = self def add(self, variable, range_): """ Add a new low and high bound for a variable. As it is flow insensitive, it compares it with old values and update it if needed. """ if variable not in self.result: self.result[variable] = range_ else: self.result[variable] = self.result[variable].union(range_) return self.result[variable] def unionify(self, other): for k, v in other.items(): if k in self.result: self.result[k] = self.result[k].union(v) else: self.result[k] = v def widen(self, curr, other): self.result = curr for k, v in other.items(): w = self.result.get(k, None) if w is None: self.result[k] = v elif v is not w: self.result[k] = w.widen(v) def visit_BoolOp(self, node): """ Merge right and left operands ranges. TODO : We could exclude some operand with this range information... >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = 2 ... c = 3 ... d = a or c''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['d'] Interval(low=2, high=3) """ res = list(zip(*[self.visit(elt).bounds() for elt in node.values])) return self.add(node, Interval(min(res[0]), max(res[1]))) def visit_BinOp(self, node): """ Combine operands ranges for given operator. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = 2 ... c = 3 ... d = a - c''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['d'] Interval(low=-1, high=-1) """ res = combine(node.op, self.visit(node.left), self.visit(node.right)) return self.add(node, res) def visit_UnaryOp(self, node): """ Update range with given unary operation. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = 2 ... c = -a ... d = ~a ... f = +a ... e = not a''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['f'] Interval(low=2, high=2) >>> res['c'] Interval(low=-2, high=-2) >>> res['d'] Interval(low=-3, high=-3) >>> res['e'] Interval(low=0, high=1) """ res = self.visit(node.operand) if isinstance(node.op, ast.Not): res = Interval(0, 1) elif(isinstance(node.op, ast.Invert) and isinstance(res.high, int) and isinstance(res.low, int)): res = Interval(~res.high, ~res.low) elif isinstance(node.op, ast.UAdd): pass elif isinstance(node.op, ast.USub): res = Interval(-res.high, -res.low) else: res = UNKNOWN_RANGE return self.add(node, res) def visit_IfExp(self, node): """ Use worst case for both possible values. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = 2 or 3 ... b = 4 or 5 ... c = a if a else b''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['c'] Interval(low=2, high=5) """ self.visit(node.test) body_res = self.visit(node.body) orelse_res = self.visit(node.orelse) return self.add(node, orelse_res.union(body_res)) def visit_Compare(self, node): """ Boolean are possible index. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = 2 or 3 ... b = 4 or 5 ... c = a < b ... d = b < 3 ... e = b == 4''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['c'] Interval(low=1, high=1) >>> res['d'] Interval(low=0, high=0) >>> res['e'] Interval(low=0, high=1) """ if any(isinstance(op, (ast.In, ast.NotIn, ast.Is, ast.IsNot)) for op in node.ops): self.generic_visit(node) return self.add(node, Interval(0, 1)) curr = self.visit(node.left) res = [] for op, comparator in zip(node.ops, node.comparators): comparator = self.visit(comparator) fake = ast.Compare(ast.Name('x', ast.Load(), None, None), [op], [ast.Name('y', ast.Load(), None, None)]) fake = ast.Expression(fake) ast.fix_missing_locations(fake) expr = compile(ast.gast_to_ast(fake), '', 'eval') res.append(eval(expr, {'x': curr, 'y': comparator})) if all(res): return self.add(node, Interval(1, 1)) elif any(r.low == r.high == 0 for r in res): return self.add(node, Interval(0, 0)) else: return self.add(node, Interval(0, 1)) def visit_Call(self, node): """ Function calls are not handled for now. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = builtins.range(10)''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['a'] Interval(low=-inf, high=inf) """ for alias in self.aliases[node.func]: if alias is MODULES['builtins']['getattr']: attr_name = node.args[-1].value attribute = attributes[attr_name][-1] self.add(node, attribute.return_range(None)) elif isinstance(alias, Intrinsic): alias_range = alias.return_range( [self.visit(n) for n in node.args]) self.add(node, alias_range) elif isinstance(alias, ast.FunctionDef): if alias not in self.result: state = self.save_state() self.parent.visit(alias) self.restore_state(state) self.add(node, self.result[alias]) else: self.result.pop(node, None) return self.generic_visit(node) return self.result[node] def visit_Constant(self, node): """ Handle literals integers values. """ if isinstance(node.value, (bool, int)): return self.add(node, Interval(node.value, node.value)) return UNKNOWN_RANGE def visit_Name(self, node): """ Get range for parameters for examples or false branching. """ return self.add(node, self.result[node.id]) def visit_Tuple(self, node): return self.add(node, IntervalTuple(self.visit(elt) for elt in node.elts)) def visit_Index(self, node): return self.add(node, self.visit(node.value)) def visit_Subscript(self, node): if isinstance(node.value, ast.Call): for alias in self.aliases[node.value.func]: if alias is MODULES['builtins']['getattr']: attr_name = node.value.args[-1].value attribute = attributes[attr_name][-1] self.add(node, attribute.return_range_content(None)) elif isinstance(alias, Intrinsic): self.add(node, alias.return_range_content( [self.visit(n) for n in node.value.args])) else: return self.generic_visit(node) if not self.aliases[node.value.func]: return self.generic_visit(node) self.visit(node.slice) return self.result[node] else: value = self.visit(node.value) slice = self.visit(node.slice) return self.add(node, value[slice]) def visit_FunctionDef(self, node): """ Set default range value for globals and attributes. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(a, b): pass") >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['a'] Interval(low=-inf, high=inf) """ if node in self.result: return if self.use_omp: return self.result[node] = UNKNOWN_RANGE # Set this prematurely to avoid infinite callgraph loop prev_result = self.result.get(RangeValuesBase.ResultHolder, None) self.function_visitor(node) del self.result[node] self.add(node, self.result[RangeValuesBase.ResultHolder]) if prev_result is not None: self.result[RangeValuesBase.ResultHolder] = prev_result else: del self.result[RangeValuesBase.ResultHolder] class RangeValuesSimple(RangeValuesBase): """ This analyse extract positive subscripts from code. It is flow sensitive and aliasing is not taken into account as integer doesn't create aliasing in Python. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(a): ... for i in builtins.range(1, 10): ... c = i // 2''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValuesSimple, node) >>> res['c'], res['i'] (Interval(low=0, high=5), Interval(low=1, high=10)) """ def __init__(self, parent=None): if parent is not None: self.parent = parent self.ctx = parent.ctx self.deps = parent.deps self.result = parent.result self.aliases = parent.aliases self.passmanager = parent.passmanager else: super(RangeValuesSimple, self).__init__() def generic_visit(self, node): """ Other nodes are not known and range value neither. """ super(RangeValuesSimple, self).generic_visit(node) return self.add(node, UNKNOWN_RANGE) def save_state(self): return self.aliases, def restore_state(self, state): self.aliases, = state def function_visitor(self, node): for stmt in node.body: self.visit(stmt) def visit_Return(self, node): if node.value: return_range = self.visit(node.value) return self.add(RangeValues.ResultHolder, return_range) else: return self.generic_visit(node) def visit_Assert(self, node): """ Constraint the range of variables >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(a): assert a >= 1; b = a + 1") >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValuesSimple, node) >>> res['a'] Interval(low=1, high=inf) >>> res['b'] Interval(low=2, high=inf) """ self.generic_visit(node) bound_range(self.result, self.aliases, node.test) def visit_Assign(self, node): """ Set range value for assigned variable. We do not handle container values. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(): a = b = 2") >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValuesSimple, node) >>> res['a'] Interval(low=2, high=2) >>> res['b'] Interval(low=2, high=2) """ assigned_range = self.visit(node.value) for target in node.targets: if isinstance(target, ast.Name): # Make sure all Interval doesn't alias for multiple variables. self.add(target.id, assigned_range) else: self.visit(target) def visit_AugAssign(self, node): """ Update range value for augassigned variables. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(): a = 2; a -= 1") >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValuesSimple, node) >>> res['a'] Interval(low=1, high=1) """ self.generic_visit(node) if isinstance(node.target, ast.Name): name = node.target.id res = combine(node.op, self.result[name], self.result[node.value]) self.result[name] = res def visit_For(self, node): """ Handle iterate variable in for loops. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = b = c = 2 ... for i in builtins.range(1): ... a -= 1 ... b += 1''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValuesSimple, node) >>> res['a'] Interval(low=-inf, high=2) >>> res['b'] Interval(low=2, high=inf) >>> res['c'] Interval(low=2, high=2) >>> node = ast.parse(''' ... def foo(): ... for i in (1, 2, 4): ... a = i''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValuesSimple, node) >>> res['a'] Interval(low=1, high=4) """ assert isinstance(node.target, ast.Name), "For apply on variables." self.visit(node.iter) if isinstance(node.iter, ast.Call): for alias in self.aliases[node.iter.func]: if isinstance(alias, Intrinsic): self.add(node.target.id, alias.return_range_content( [self.visit(n) for n in node.iter.args])) self.visit_loop(node, ast.Compare(node.target, [ast.In()], [node.iter])) def visit_loop(self, node, cond=None): """ Handle incremented variables in loop body. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = b = c = 2 ... while a > 0: ... a -= 1 ... b += 1''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValuesSimple, node) >>> res['a'] Interval(low=0, high=2) >>> res['b'] Interval(low=2, high=inf) >>> res['c'] Interval(low=2, high=2) """ if cond is not None: init_range = self.result self.result = self.result.copy() bound_range(self.result, self.aliases, cond) # visit once to gather newly declared vars for stmt in node.body: self.visit(stmt) # freeze current state old_range = self.result.copy() # extra round for stmt in node.body: self.visit(stmt) # widen any change for expr, range_ in old_range.items(): self.result[expr] = self.result[expr].widen(range_) # propagate the new informations again if cond is not None: bound_range(self.result, self.aliases, cond) for stmt in node.body: self.visit(stmt) self.unionify(init_range) self.visit(cond) for stmt in node.orelse: self.visit(stmt) def visit_While(self, node): self.visit(node.test) return self.visit_loop(node, node.test) def visit_If(self, node): """ Handle iterate variable across branches >>> import gast as ast >>> from pythran import passmanager, backend >>> pm = passmanager.PassManager("test") >>> node = ast.parse(''' ... def foo(a): ... if a > 1: b = 1 ... else: b = 3''') >>> res = pm.gather(RangeValuesSimple, node) >>> res['b'] Interval(low=1, high=3) >>> node = ast.parse(''' ... def foo(a): ... if a > 1: b = a ... else: b = 3''') >>> res = pm.gather(RangeValuesSimple, node) >>> res['b'] Interval(low=2, high=inf) >>> node = ast.parse(''' ... def foo(a): ... if 0 < a < 4: b = a ... else: b = 3''') >>> res = pm.gather(RangeValuesSimple, node) >>> res['b'] Interval(low=1, high=3) >>> node = ast.parse(''' ... def foo(a): ... if (0 < a) and (a < 4): b = a ... else: b = 3''') >>> res = pm.gather(RangeValuesSimple, node) >>> res['b'] Interval(low=1, high=3) >>> node = ast.parse(''' ... def foo(a): ... if (a == 1) or (a == 2): b = a ... else: b = 3''') >>> res = pm.gather(RangeValuesSimple, node) >>> res['b'] Interval(low=1, high=3) """ self.visit(node.test) old_range = self.result self.result = old_range.copy() bound_range(self.result, self.aliases, node.test) for stmt in node.body: self.visit(stmt) body_range = self.result self.result = old_range.copy() for stmt in node.orelse: self.visit(stmt) orelse_range = self.result self.result = body_range self.unionify(orelse_range) def visit_Try(self, node): init_range = self.result self.result = init_range.copy() for stmt in node.body: self.visit(stmt) self.unionify(init_range) init_range = self.result.copy() for handler in node.handlers: self.result, prev_state = init_range.copy(), self.result for stmt in handler.body: self.visit(stmt) self.unionify(prev_state) self.result, prev_state = init_range, self.result for stmt in node.orelse: self.visit(stmt) self.unionify(prev_state) for stmt in node.finalbody: self.visit(stmt) class RangeValues(RangeValuesBase): """ This analyse extract positive subscripts from code. It is flow sensitive and aliasing is not taken into account as integer doesn't create aliasing in Python. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(a): ... for i in builtins.range(1, 10): ... c = i // 2 ... return''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['c'], res['i'] (Interval(low=0, high=5), Interval(low=1, high=10)) """ def __init__(self): super(RangeValues, self).__init__() def generic_visit(self, node): """ Other nodes are not known and range value neither. """ super(RangeValues, self).generic_visit(node) if isinstance(node, ast.stmt): if node in self.cfg: return self.cfg.successors(node) else: return self.add(node, UNKNOWN_RANGE) def cfg_visit(self, node, skip=None): successors = [node] visited = set() if skip is None else skip.copy() while successors: successor = successors.pop() if successor in visited: continue visited.add(successor) nexts = self.visit(successor) if nexts: successors.extend((n for n in nexts if n is not CFG.NIL)) def save_state(self): return (self.cfg, self.aliases, self.use_omp, self.no_backward, self.no_if_split) def restore_state(self, state): (self.cfg, self.aliases, self.use_omp, self.no_backward, self.no_if_split) = state def function_visitor(self, node): parent_result = self.result self.result = defaultdict(lambda: UNKNOWN_RANGE) for k, v in parent_result.items(): if isinstance(k, ast.FunctionDef): self.result[k] = v # try to visit the cfg, it's greedy but more accurate try: self.no_backward = 0 self.no_if_split = 0 self.cfg_visit(next(self.cfg.successors(node))) for k, v in self.result.items(): parent_result[k] = v self.result = parent_result # too greedy? Never mind, we know how to be fast and simple :-) except RangeValueTooCostly: self.result = parent_result rvs = RangeValuesSimple(self) rvs.visit(node) def visit_Return(self, node): if node.value: return_range = self.visit(node.value) self.add(RangeValues.ResultHolder, return_range) return self.cfg.successors(node) def visit_Assert(self, node): """ Constraint the range of variables >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(a): assert a >= 1; b = a + 1") >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['a'] Interval(low=1, high=inf) >>> res['b'] Interval(low=2, high=inf) """ self.visit(node.test) bound_range(self.result, self.aliases, node.test) return self.cfg.successors(node) def visit_Assign(self, node): """ Set range value for assigned variable. We do not handle container values. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(): a = b = 2") >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['a'] Interval(low=2, high=2) >>> res['b'] Interval(low=2, high=2) """ assigned_range = self.visit(node.value) for target in node.targets: if isinstance(target, ast.Name): # Make sure all Interval doesn't alias for multiple variables. self.result[target.id] = assigned_range else: self.visit(target) return self.cfg.successors(node) def visit_AugAssign(self, node): """ Update range value for augassigned variables. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(): a = 2; a -= 1") >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['a'] Interval(low=1, high=1) """ self.generic_visit(node) if isinstance(node.target, ast.Name): name = node.target.id res = combine(node.op, self.result[name], self.result[node.value]) self.result[name] = res return self.cfg.successors(node) def visit_loop_successor(self, node): for successor in self.cfg.successors(node): if successor is not node.body[0]: if isinstance(node, ast.While): bound_range(self.result, self.aliases, ast.UnaryOp(ast.Not(), node.test)) return [successor] def visit_For(self, node): """ Handle iterate variable in for loops. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = b = c = 2 ... for i in builtins.range(1): ... a -= 1 ... b += 1 ... return''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['a'] Interval(low=-inf, high=2) >>> res['b'] Interval(low=2, high=inf) >>> res['c'] Interval(low=2, high=2) >>> node = ast.parse(''' ... def foo(): ... for i in (1, 2, 4): ... a = i ... return''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['a'] Interval(low=1, high=4) """ assert isinstance(node.target, ast.Name), "For apply on variables." self.visit(node.iter) init_state = self.result.copy() bound_range(self.result, self.aliases, ast.Compare(node.target, [ast.In()], [node.iter])) # visit body skip = {x for x in self.cfg.successors(node) if x is not node.body[0]} skip.add(node) next_ = self.cfg_visit(node.body[0], skip=skip) if self.no_backward: return self.visit_loop_successor(node) else: pass #self.no_backward += 1 prev_state = self.result self.result = prev_state.copy() self.cfg_visit(node.body[0], skip=skip) self.widen(self.result, prev_state) self.cfg_visit(node.body[0], skip=skip) self.unionify(init_state) pass #self.no_backward -= 1 return self.visit_loop_successor(node) def visit_While(self, node): """ Handle incremented variables in loop body. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = b = c = 10 ... while a > 0: ... a -= 1 ... b += 1 ... return''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['a'] Interval(low=-inf, high=0) >>> res['b'] Interval(low=11, high=inf) >>> res['c'] Interval(low=10, high=10) """ test_range = self.visit(node.test) init_state = self.result.copy() skip = {x for x in self.cfg.successors(node) if x is not node.body[0]} skip.add(node) # if the test may be false, visit the tail if 0 in test_range: for successor in list(self.cfg.successors(node)): if successor is not node.body[0]: self.cfg_visit(successor, skip=skip) bound_range(self.result, self.aliases, node.test) # visit body self.cfg_visit(node.body[0], skip=skip) if self.no_backward: if 0 in test_range: self.unionify(init_state) return self.visit_loop_successor(node) else: pass #self.no_backward += 1 prev_state = self.result self.result = prev_state.copy() self.cfg_visit(node.body[0], skip=skip) self.widen(self.result, prev_state) # propagate the result of the widening self.cfg_visit(node.body[0], skip=skip) if 0 in test_range: self.unionify(init_state) else: self.unionify(prev_state) self.visit(node.test) pass #self.no_backward -= 1 # exit from the while test return self.visit_loop_successor(node) def visit_If(self, node): """ Handle iterate variable across branches >>> import gast as ast >>> from pythran import passmanager, backend >>> pm = passmanager.PassManager("test") >>> node = ast.parse(''' ... def foo(a): ... if a > 1: b = 1 ... else: b = 3 ... pass''') >>> res = pm.gather(RangeValues, node) >>> res['b'] Interval(low=1, high=3) >>> node = ast.parse(''' ... def foo(a): ... if a > 1: b = a ... else: b = 3 ... pass''') >>> res = pm.gather(RangeValues, node) >>> res['b'] Interval(low=2, high=inf) >>> node = ast.parse(''' ... def foo(a): ... if 0 < a < 4: b = a ... else: b = 3 ... pass''') >>> res = pm.gather(RangeValues, node) >>> res['b'] Interval(low=1, high=3) >>> node = ast.parse(''' ... def foo(a): ... if (0 < a) and (a < 4): b = a ... else: b = 3 ... pass''') >>> res = pm.gather(RangeValues, node) >>> res['b'] Interval(low=1, high=3) >>> node = ast.parse(''' ... def foo(a): ... if (a == 1) or (a == 2): b = a ... else: b = 3 ... pass''') >>> res = pm.gather(RangeValues, node) >>> res['b'] Interval(low=1, high=3) >>> node = ast.parse(''' ... def foo(a): ... b = 5 ... if a > 0: b = a ... pass''') >>> res = pm.gather(RangeValues, node) >>> res['a'], res['b'] (Interval(low=-inf, high=inf), Interval(low=1, high=inf)) >>> node = ast.parse(''' ... def foo(a): ... if a > 3: b = 1 ... else: b = 2 ... if a > 1: b = 2 ... pass''') >>> res = pm.gather(RangeValues, node) >>> res['b'] Interval(low=2, high=2) """ # handling each branch becomes too costly, opt for a simpler, # less accurate algorithm. if self.no_if_split == 4: raise RangeValueTooCostly() self.no_if_split += 1 test_range = self.visit(node.test) init_state = self.result.copy() if 1 in test_range: bound_range(self.result, self.aliases, node.test) self.cfg_visit(node.body[0]) visited_successors = {node.body[0]} if node.orelse: if 0 in test_range: prev_state = self.result self.result = init_state.copy() bound_range(self.result, self.aliases, ast.UnaryOp(ast.Not(), node.test)) self.cfg_visit(node.orelse[0]) self.unionify(prev_state) visited_successors.add(node.orelse[0]) elif 0 in test_range: successors = self.cfg.successors(node) for successor in list(successors): # no else branch if successor not in visited_successors: self.result, prev_state = init_state.copy(), self.result bound_range(self.result, self.aliases, ast.UnaryOp(ast.Not(), node.test)) self.cfg_visit(successor) self.unionify(prev_state) self.no_if_split -= 1 def visit_Try(self, node): init_range = self.result self.result = init_range.copy() self.cfg_visit(node.body[0]) self.unionify(init_range) init_range = self.result.copy() for handler in node.handlers: self.result, prev_state = init_range.copy(), self.result self.cfg_visit(handler.body[0]) self.unionify(prev_state) # Un comment the line below to test RangeValuesSimple # RangeValues = RangeValuesSimple # RangeValues.__name__ = 'RangeValues' pythran-0.10.0+ds2/pythran/analyses/scope.py000066400000000000000000000071401416264035500207400ustar00rootroot00000000000000""" Scope computes scope information """ from pythran.analyses.ancestors import AncestorsWithBody from pythran.analyses.use_def_chain import DefUseChains from pythran.passmanager import FunctionAnalysis from collections import defaultdict import gast as ast class Scope(FunctionAnalysis): ''' Associate each variable declaration with the node that defines it Whenever possible, associate the variable declaration to an assignment, otherwise to a node that defines a bloc (e.g. a For) This takes OpenMP information into accounts! The result is a dictionary with nodes as key and set of names as values ''' def __init__(self): self.result = defaultdict(set) self.decl_holders = (ast.FunctionDef, ast.For, ast.excepthandler, ast.While, ast.If, tuple) super(Scope, self).__init__(AncestorsWithBody, DefUseChains) def visit_OMPDirective(self, node): for dep in node.deps: if dep in node.private_deps: continue if isinstance(dep, ast.Name): self.openmp_deps.setdefault(dep.id, []).append(dep) def visit_FunctionDef(self, node): self.ancestors = self.ancestors_with_body # first gather some info about OpenMP declarations self.openmp_deps = dict() self.generic_visit(node) name_to_defs = dict() for def_ in self.def_use_chains.locals[node]: name_to_defs.setdefault(def_.name(), []).append(def_) # then compute scope informations # unlike use-def chains, this takes OpenMP annotations into account for name, defs in name_to_defs.items(): # get all refs to that name refs = [d.node for d in defs] + [u.node for d in defs for u in d.users()] # add OpenMP refs (well, the parent of the holding stmt) refs.extend(self.ancestors[d][-3] # -3 to get the right parent for d in self.openmp_deps.get(name, [])) # get their ancestors ancestors = [self.ancestors[ref] for ref in refs] # common ancestors prefixes = [p for p in zip(*ancestors) if len(set(p)) == 1] common = prefixes[-1][0] # the last common ancestor # now try to attach the scope to an assignment. # This will be the first assignment found in the bloc if isinstance(common, self.decl_holders): # get all refs that define that name refs = [d.node for d in defs] refs.extend(self.openmp_deps.get(name, [])) # get their parent prefs = set() for r in refs: ancestor = r # walk up the ancestor tree until we find the one # right before common while self.ancestors[ancestor][-1] is not common: ancestor = self.ancestors[ancestor][-1] prefs.add(ancestor) # set the defining statement to the first assign in the body # unless another statements uses it before # or the common itselfs holds a dependency if common not in prefs: body = common if isinstance(common, tuple) else common.body for c in body: if c in prefs: if isinstance(c, ast.Assign): common = c break self.result[common].add(name) pythran-0.10.0+ds2/pythran/analyses/static_expressions.py000066400000000000000000000040711416264035500235600ustar00rootroot00000000000000""" StaticExpressions gathers constant expression that involve types. """ from pythran.passmanager import NodeAnalysis class HasStaticExpression(NodeAnalysis): def __init__(self): self.result = False super(HasStaticExpression, self).__init__() def visit_Attribute(self, node): self.generic_visit(node) self.result |= node.attr == 'is_none' class StaticExpressions(NodeAnalysis): """Identify constant expressions.""" def __init__(self): self.result = set() self.constant_expressions = set() super(StaticExpressions, self).__init__() def add(self, node): self.result.add(node) return True def not_add(self, _): return False def match_all(self, *args): assert len(args) > 1, "at least two arguments" static = False const = True for value in args: if self.visit(value): static = True else: const &= value in self.constant_expressions return static and const def visit_BoolOp(self, node): return self.match_all(*node.values) and self.add(node) def visit_BinOp(self, node): return self.match_all(node.left, node.right) and self.add(node) def visit_UnaryOp(self, node): return self.visit(node.operand) and self.add(node) def visit_IfExp(self, node): return (self.match_all(node.test, node.body, node.orelse) and self.add(node)) def visit_Compare(self, node): return self.match_all(node.left, *node.comparators) and self.add(node) def visit_Call(self, node): return self.visit(node.func)and self.add(node) # very limited def visit_Attribute(self, node): return node.attr in ('is_none', 'isinstance') def visit_Constant(self, node): self.constant_expressions.add(node) visit_Subscript = not_add visit_Name = not_add visit_Dict = not_add visit_List = not_add visit_Tuple = not_add visit_Set = not_add visit_Slice = not_add visit_Index = not_add pythran-0.10.0+ds2/pythran/analyses/use_def_chain.py000066400000000000000000000022011416264035500223740ustar00rootroot00000000000000""" UsedDefChain build used-define chains analysis for each variable. """ from pythran.passmanager import ModuleAnalysis import pythran.metadata as md import beniget class ExtendedDefUseChains(beniget.DefUseChains): def unbound_identifier(self, name, node): # don't warn on unbound identifier pass def visit(self, node): # be aware of metadata md.visit(self, node) return super(ExtendedDefUseChains, self).visit(node) class UseDefChains(ModuleAnalysis): """ Build use-define chains analysis for each variable. """ def __init__(self): self.result = None super(UseDefChains, self).__init__(DefUseChains) def visit_Module(self, node): udc = beniget.UseDefChains(self.def_use_chains) self.result = udc.chains class DefUseChains(ModuleAnalysis): """ Build define-use-define chains analysis for each variable. """ def __init__(self): self.result = None super(DefUseChains, self).__init__() def visit_Module(self, node): duc = ExtendedDefUseChains() duc.visit(node) self.result = duc pythran-0.10.0+ds2/pythran/analyses/use_omp.py000066400000000000000000000005131416264035500212730ustar00rootroot00000000000000""" UseOMP detects if a function use OpenMP """ from pythran.passmanager import FunctionAnalysis class UseOMP(FunctionAnalysis): """Detects if a function use openMP""" def __init__(self): self.result = False super(UseOMP, self).__init__() def visit_OMPDirective(self, _): self.result = True pythran-0.10.0+ds2/pythran/analyses/yield_points.py000066400000000000000000000005611416264035500223310ustar00rootroot00000000000000""" YieldPoints gathers all yield points from a node """ from pythran.passmanager import FunctionAnalysis class YieldPoints(FunctionAnalysis): '''Gathers all yield points of a generator, if any.''' def __init__(self): self.result = list() super(YieldPoints, self).__init__() def visit_Yield(self, node): self.result.append(node) pythran-0.10.0+ds2/pythran/backend.py000066400000000000000000001430401416264035500173770ustar00rootroot00000000000000''' This module contains all pythran backends. * Cxx dumps the AST into C++ code * Python dumps the AST into Python code ''' from pythran.analyses import LocalNodeDeclarations, GlobalDeclarations, Scope from pythran.analyses import YieldPoints, IsAssigned, ASTMatcher, AST_any from pythran.analyses import RangeValues, PureExpressions, Dependencies from pythran.analyses import Immediates, Ancestors from pythran.cxxgen import Template, Include, Namespace, CompilationUnit from pythran.cxxgen import Statement, Block, AnnotatedStatement, Typedef, Label from pythran.cxxgen import Value, FunctionDeclaration, EmptyStatement, Nop from pythran.cxxgen import FunctionBody, Line, ReturnStatement, Struct, Assign from pythran.cxxgen import For, While, TryExcept, ExceptHandler, If, AutoFor from pythran.openmp import OMPDirective from pythran.passmanager import Backend from pythran.syntax import PythranSyntaxError from pythran.tables import operator_to_lambda, update_operator_to_lambda from pythran.tables import pythran_ward from pythran.types.conversion import PYTYPE_TO_CTYPE_TABLE, TYPE_TO_SUFFIX from pythran.types.types import Types from pythran.utils import attr_to_path, pushpop, cxxid, isstr, isnum from pythran.utils import isextslice, ispowi from pythran import metadata, unparse from math import isnan, isinf import gast as ast import os from functools import reduce import io class Python(Backend): ''' Produces a Python representation of the AST. >>> import gast as ast, pythran.passmanager as passmanager >>> node = ast.parse("print('hello world')") >>> pm = passmanager.PassManager('test') >>> print(pm.dump(Python, node)) print('hello world') ''' def __init__(self): self.result = '' super(Python, self).__init__() def visit(self, node): output = io.StringIO() unparse.Unparser(node, output) self.result = output.getvalue() def templatize(node, types, default_types=None): if not default_types: default_types = [None] * len(types) if types: return Template( ["typename {0} {1}".format(t, "= {0}".format(d) if d else "") for t, d in zip(types, default_types)], node) else: return node def cxx_loop(visit): """ Decorator for loop node (For and While) to handle "else" branching. Decorated node will save flags for a goto statement used instead of usual break and add this flag at the end of the else statements. Examples -------- >> for i in range(12): >> if i == 5: >> break >> else: >> ... some code ... Becomes >> for(type i : range(12)) >> if(i==5) >> goto __no_breaking0; >> ... some code ... >> __no_breaking0; """ def loop_visitor(self, node): """ New decorate function. It push the breaking flag, run the visitor and add "else" statements. """ if not node.orelse: with pushpop(self.break_handlers, None): res = visit(self, node) return res break_handler = "__no_breaking{0}".format(id(node)) with pushpop(self.break_handlers, break_handler): res = visit(self, node) # handle the body of the for loop orelse = [self.visit(stmt) for stmt in node.orelse] if break_handler in self.used_break: orelse_label = [Label(break_handler)] else: orelse_label = [] return Block([res] + orelse + orelse_label) return loop_visitor class CachedTypeVisitor: def __init__(self, other=None): if other is None: self.cache = dict() self.rcache = dict() self.mapping = dict() else: self.cache = other.cache.copy() self.rcache = other.rcache.copy() self.mapping = other.mapping.copy() def __call__(self, node): if node not in self.mapping: t = node.generate(self) if node not in self.mapping: if t in self.rcache: self.mapping[node] = self.mapping[self.rcache[t]] self.cache[node] = self.cache[self.rcache[t]] else: self.rcache[t] = node self.mapping[node] = len(self.mapping) self.cache[node] = t return "__type{0}".format(self.mapping[node]) def typedefs(self): kv = sorted(self.mapping.items(), key=lambda x: x[1]) L = list() visited = set() # the same value must not be typedefed twice for k, v in kv: if v not in visited: typename = "__type" + str(v) L.append(Typedef(Value(self.cache[k], typename))) visited.add(v) return L def make_default(d): return "= {0}".format(d) if d else "" def make_function_declaration(self, node, rtype, name, ftypes, fargs, defaults=None, attributes=None): if defaults is None: defaults = [None] * len(ftypes) if attributes is None: attributes = [] arguments = list() first_default = len(node.args.args) - len(node.args.defaults) for i, (t, a, d) in enumerate(zip(ftypes, fargs, defaults)): # because universal reference and default don't get on well if isinstance(self, CxxGenerator) or i >= first_default: rvalue_ref = "" else: rvalue_ref = "&&" argument = Value(t + rvalue_ref, "{0}{1}".format(a, make_default(d))) arguments.append(argument) return FunctionDeclaration(Value(rtype, name), arguments, *attributes) def make_const_function_declaration(self, node, rtype, name, ftypes, fargs, defaults=None): return make_function_declaration(self, node, rtype, name, ftypes, fargs, defaults, ["const"]) class CxxFunction(ast.NodeVisitor): ''' Attributes ---------- ldecls : {str} set of local declarations. break_handler : [str] It contains flags for goto statements to jump on break in case of orelse statement in loop. None means there are no orelse statement so no jump are requiered. (else in loop means : don't execute if loop is terminated with a break) ''' def __init__(self, parent): """ Basic initialiser gathering analysis informations. """ self.parent = parent self.break_handlers = [] self.used_break = set() self.ldecls = None self.openmp_deps = set() def __getattr__(self, attr): return getattr(self.parent, attr) # local declaration processing def process_locals(self, node, node_visited, *skipped): """ Declare variable local to node and insert declaration before. Not possible for function yielding values. """ local_vars = self.scope[node].difference(skipped) local_vars = local_vars.difference(self.openmp_deps) if not local_vars: return node_visited # no processing locals_visited = [] for varname in local_vars: vartype = self.typeof(varname) decl = Statement("{} {}".format(vartype, varname)) locals_visited.append(decl) self.ldecls.difference_update(local_vars) return Block(locals_visited + [node_visited]) def visit_OMPDirective(self, node): self.openmp_deps.update(d.id for d in node.private_deps) self.openmp_deps.update(d.id for d in node.shared_deps) def visit(self, node): metadata.visit(self, node) return super(CxxFunction, self).visit(node) def process_omp_attachements(self, node, stmt, index=None): """ Add OpenMP pragma on the correct stmt in the correct order. stmt may be a list. On this case, index have to be specify to add OpenMP on the correct statement. """ omp_directives = metadata.get(node, OMPDirective) if omp_directives: directives = list() for directive in omp_directives: directive.deps = [self.visit(dep) for dep in directive.deps] directives.append(directive) if index is None: stmt = AnnotatedStatement(stmt, directives) else: stmt[index] = AnnotatedStatement(stmt[index], directives) return stmt def typeof(self, node): if isinstance(node, str): return self.typeof(self.local_names[node]) else: return self.types[node].generate(self.lctx) def prepare_functiondef_context(self, node): # prepare context and visit function body fargs = node.args.args formal_args = [cxxid(arg.id) for arg in fargs] formal_types = ["argument_type" + str(i) for i in range(len(fargs))] local_decls = set(self.gather(LocalNodeDeclarations, node)) self.local_names = {sym.id: sym for sym in local_decls} self.local_names.update({arg.id: arg for arg in fargs}) self.lctx = CachedTypeVisitor() self.ldecls = {n.id for n in local_decls} body = [self.visit(stmt) for stmt in node.body] return body, formal_types, formal_args def prepare_types(self, node): # compute arg dump dflt_argv = ( [None] * (len(node.args.args) - len(node.args.defaults)) + [self.visit(n) for n in node.args.defaults]) dflt_argt = ( [None] * (len(node.args.args) - len(node.args.defaults)) + [self.types[n] for n in node.args.defaults]) # compute type dump result_type = self.types[node][0] callable_type = Typedef(Value("void", "callable")) pure_type = (Typedef(Value("void", "pure")) if node in self.pure_expressions else EmptyStatement()) return dflt_argv, dflt_argt, result_type, callable_type, pure_type # stmt def visit_FunctionDef(self, node): self.fname = cxxid(node.name) tmp = self.prepare_functiondef_context(node) operator_body, formal_types, formal_args = tmp tmp = self.prepare_types(node) dflt_argv, dflt_argt, result_type, callable_type, pure_type = tmp # a function has a call operator to be called # and a default constructor to create instances fscope = "type{0}::".format("<{0}>".format(", ".join(formal_types)) if formal_types else "") ffscope = "{0}::{1}".format(self.fname, fscope) operator_declaration = [ templatize( make_const_function_declaration( self, node, "typename {0}result_type".format(fscope), "operator()", formal_types, formal_args, dflt_argv), formal_types, dflt_argt), EmptyStatement() ] operator_signature = make_const_function_declaration( self, node, "typename {0}result_type".format(ffscope), "{0}::operator()".format(self.fname), formal_types, formal_args) ctx = CachedTypeVisitor(self.lctx) operator_local_declarations = ( [Statement("{0} {1}".format( self.types[self.local_names[k]].generate(ctx), cxxid(k))) for k in self.ldecls] ) dependent_typedefs = ctx.typedefs() operator_definition = FunctionBody( templatize(operator_signature, formal_types), Block(dependent_typedefs + operator_local_declarations + operator_body) ) ctx = CachedTypeVisitor() extra_typedefs = ( [Typedef(Value(t.generate(ctx), t.name)) for t in self.types[node][1]] + [Typedef(Value( result_type.generate(ctx), "result_type"))] ) extra_typedefs = ctx.typedefs() + extra_typedefs return_declaration = [ templatize( Struct("type", extra_typedefs), formal_types, dflt_argt ) ] topstruct = Struct(self.fname, [callable_type, pure_type] + return_declaration + operator_declaration) return [topstruct], [operator_definition] def visit_Return(self, node): value = self.visit(node.value) if metadata.get(node, metadata.StaticReturn): # don't rely on auto because we want to make sure there's no # conversion each time we return # this happens for variant because the variant param # order may differ from the init order (because of the way we # do type inference rtype = "typename {}::type::result_type".format(self.fname) stmt = Block([Assign("static %s tmp_global" % rtype, value), ReturnStatement("tmp_global")]) else: stmt = ReturnStatement(value) return self.process_omp_attachements(node, stmt) def visit_Delete(self, _): return Nop() # nothing to do in there def visit_Assign(self, node): """ Create Assign node for final Cxx representation. It tries to handle multi assignment like: >> a = b = c = 2 If only one local variable is assigned, typing is added: >> int a = 2; TODO: Handle case of multi-assignement for some local variables. Finally, process OpenMP clause like #pragma omp atomic """ if not all(isinstance(n, (ast.Name, ast.Subscript)) for n in node.targets): raise PythranSyntaxError( "Must assign to an identifier or a subscript", node) value = self.visit(node.value) targets = [self.visit(t) for t in node.targets] alltargets = "= ".join(targets) islocal = (len(targets) == 1 and isinstance(node.targets[0], ast.Name) and node.targets[0].id in self.scope[node] and node.targets[0].id not in self.openmp_deps) if islocal: # remove this decls from local decls self.ldecls.difference_update(t.id for t in node.targets) # add a local declaration if self.types[node.targets[0]].iscombined(): alltargets = '{} {}'.format(self.typeof(node.targets[0]), alltargets) elif isinstance(self.types[node.targets[0]], self.types.builder.Assignable): alltargets = '{} {}'.format( self.types.builder.AssignableNoEscape( self.types.builder.NamedType( 'decltype({})'.format(value))), alltargets) else: assert isinstance(self.types[node.targets[0]], self.types.builder.Lazy) alltargets = '{} {}'.format( self.types.builder.Lazy( self.types.builder.NamedType( 'decltype({})'.format(value))), alltargets) stmt = Assign(alltargets, value) return self.process_omp_attachements(node, stmt) def visit_AugAssign(self, node): value = self.visit(node.value) target = self.visit(node.target) op = update_operator_to_lambda[type(node.op)] stmt = Statement(op(target, value)[1:-1]) # strip spurious parenthesis return self.process_omp_attachements(node, stmt) def visit_Print(self, node): values = [self.visit(n) for n in node.values] stmt = Statement("pythonic::builtins::print{0}({1})".format( "" if node.nl else "_nonl", ", ".join(values)) ) return self.process_omp_attachements(node, stmt) def is_in_collapse(self, loop, node): for ancestor in reversed(self.ancestors[loop]): if not isinstance(ancestor, ast.For): return False for directive in metadata.get(ancestor, OMPDirective): if 'collapse' in directive.s: # FIXME: check loop depth and range canonicalization if node not in self.pure_expressions: raise PythranSyntaxError( "not pure expression used as loop target inside a " "collapse clause", loop) return True assert False, "unreachable state" def gen_for(self, node, target, local_iter, local_iter_decl, loop_body): """ Create For representation on iterator for Cxx generation. Examples -------- >> "omp parallel for" >> for i in range(10): >> ... do things ... Becomes >> "omp parallel for shared(__iterX)" >> for(decltype(__iterX)::iterator __targetX = __iterX.begin(); __targetX < __iterX.end(); ++__targetX) >> auto&& i = *__targetX; >> ... do things ... It the case of not local variable, typing for `i` disappear and typing is removed for iterator in case of yields statement in function. """ # Choose target variable for iterator (which is iterator type) local_target = "__target{0}".format(id(node)) local_target_decl = self.types.builder.IteratorOfType(local_iter_decl) islocal = (node.target.id not in self.openmp_deps and node.target.id in self.scope[node] and not hasattr(self, 'yields')) # If variable is local to the for body it's a ref to the iterator value # type if islocal: local_type = "auto&&" self.ldecls.remove(node.target.id) else: local_type = "" # Assign iterable value loop_body_prelude = Statement("{} {}= *{}".format(local_type, target, local_target)) # Create the loop assign = self.make_assign(local_target_decl, local_target, local_iter) loop = For("{}.begin()".format(assign), "{0} < {1}.end()".format(local_target, local_iter), "++{0}".format(local_target), Block([loop_body_prelude, loop_body])) return [self.process_omp_attachements(node, loop)] def handle_real_loop_comparison(self, args, target, upper_bound): """ Handle comparison for real loops. Add the correct comparison operator if possible. """ # order is 1 for increasing loop, -1 for decreasing loop and 0 if it is # not known at compile time if len(args) <= 2: order = 1 elif isnum(args[2]): order = -1 + 2 * (int(args[2].value) > 0) elif isnum(args[1]) and isnum(args[0]): order = -1 + 2 * (int(args[1].value) > int(args[0].value)) else: order = 0 comparison = "{} < {}" if order == 1 else "{} > {}" comparison = comparison.format(target, upper_bound) return comparison def gen_c_for(self, node, local_iter, loop_body): """ Create C For representation for Cxx generation. Examples -------- >> for i in range(10): >> ... do things ... Becomes >> for(long i = 0, __targetX = 10; i < __targetX; i += 1) >> ... do things ... Or >> for i in range(10, 0, -1): >> ... do things ... Becomes >> for(long i = 10, __targetX = 0; i > __targetX; i += -1) >> ... do things ... It the case of not local variable, typing for `i` disappear """ args = node.iter.args step = "1L" if len(args) <= 2 else self.visit(args[2]) if len(args) == 1: lower_bound = "0L" upper_arg = 0 else: lower_bound = self.visit(args[0]) upper_arg = 1 upper_type = iter_type = "long " upper_value = self.visit(args[upper_arg]) if self.is_in_collapse(node, args[upper_arg]): upper_bound = upper_value # compatible with collapse else: upper_bound = "__target{0}".format(id(node)) islocal = (node.target.id not in self.openmp_deps and node.target.id in self.scope[node] and not hasattr(self, 'yields')) # If variable is local to the for body keep it local... if islocal: loop = list() self.ldecls.remove(node.target.id) else: # For yield function, upper_bound is globals. iter_type = "" # Back one step to keep Python behavior (except for break) loop = [If("{} == {}".format(local_iter, upper_bound), Statement("{} -= {}".format(local_iter, step)))] comparison = self.handle_real_loop_comparison(args, local_iter, upper_bound) forloop = For("{0} {1}={2}".format(iter_type, local_iter, lower_bound), comparison, "{0} += {1}".format(local_iter, step), loop_body) loop.insert(0, self.process_omp_attachements(node, forloop)) # Store upper bound value if needed if upper_bound is upper_value: header = [] else: assgnt = self.make_assign(upper_type, upper_bound, upper_value) header = [Statement(assgnt)] return header, loop def handle_omp_for(self, node, local_iter): """ Fix OpenMP directives on For loops. Add the target as private variable as a new variable may have been introduce to handle cxx iterator. Also, add the iterator as shared variable as all 'parallel for chunck' have to use the same iterator. """ for directive in metadata.get(node, OMPDirective): if any(key in directive.s for key in (' parallel ', ' task ')): # Eventually add local_iter in a shared clause as iterable is # shared in the for loop (for every clause with datasharing) directive.s += ' shared({})' directive.deps.append(ast.Name(local_iter, ast.Load(), None, None)) directive.shared_deps.append(directive.deps[-1]) target = node.target assert isinstance(target, ast.Name) hasfor = 'for' in directive.s nodefault = 'default' not in directive.s noindexref = all(isinstance(x, ast.Name) and x.id != target.id for x in directive.deps) if (hasfor and nodefault and noindexref and target.id not in self.scope[node]): # Target is private by default in omp but iterator use may # introduce an extra variable directive.s += ' private({})' directive.deps.append(ast.Name(target.id, ast.Load(), None, None)) directive.private_deps.append(directive.deps[-1]) def can_use_autofor(self, node): """ Check if given for Node can use autoFor syntax. To use auto_for: - iterator should have local scope - yield should not be use - OpenMP pragma should not be use TODO : Yield should block only if it is use in the for loop, not in the whole function. """ auto_for = (isinstance(node.target, ast.Name) and node.target.id in self.scope[node] and node.target.id not in self.openmp_deps) auto_for &= not metadata.get(node, OMPDirective) return auto_for def can_use_c_for(self, node): """ Check if a for loop can use classic C syntax. To use C syntax: - target should not be assign in the loop - range should be use as iterator - order have to be known at compile time """ assert isinstance(node.target, ast.Name) pattern_range = ast.Call(func=ast.Attribute( value=ast.Name('builtins', ast.Load(), None, None), attr='range', ctx=ast.Load()), args=AST_any(), keywords=[]) is_assigned = set() for stmt in node.body: is_assigned.update({n.id for n in self.gather(IsAssigned, stmt)}) nodes = ASTMatcher(pattern_range).search(node.iter) if node.iter not in nodes or node.target.id in is_assigned: return False args = node.iter.args if len(args) < 3: return True if isnum(args[2]): return True return False def make_assign(self, local_iter_decl, local_iter, iterable): return "{0} {1} = {2}".format(local_iter_decl, local_iter, iterable) @cxx_loop def visit_For(self, node): """ Create For representation for Cxx generation. Examples -------- >> for i in range(10): >> ... work ... Becomes >> typename returnable::type __iterX = builtins.range(10); >> ... possible container size reservation ... >> for (auto&& i: __iterX) >> ... the work ... This function also handle assignment for local variables. We can notice that three kind of loop are possible: - Normal for loop on iterator - Autofor loop. - Normal for loop using integer variable iteration Kind of loop used depend on OpenMP, yield use and variable scope. """ if not isinstance(node.target, ast.Name): raise PythranSyntaxError( "Using something other than an identifier as loop target", node.target) target = self.visit(node.target) # Handle the body of the for loop loop_body = Block([self.visit(stmt) for stmt in node.body]) # Declare local variables at the top of the loop body loop_body = self.process_locals(node, loop_body, node.target.id) iterable = self.visit(node.iter) if self.can_use_c_for(node): header, loop = self.gen_c_for(node, target, loop_body) else: if self.can_use_autofor(node): header = [] self.ldecls.remove(node.target.id) autofor = AutoFor(target, iterable, loop_body) loop = [self.process_omp_attachements(node, autofor)] else: # Iterator declaration local_iter = "__iter{0}".format(id(node)) local_iter_decl = self.types.builder.Assignable( self.types[node.iter]) self.handle_omp_for(node, local_iter) # Assign iterable # For C loop, it avoids issues # if the upper bound is assigned in the loop asgnt = self.make_assign(local_iter_decl, local_iter, iterable) header = [Statement(asgnt)] loop = self.gen_for(node, target, local_iter, local_iter_decl, loop_body) # For xxxComprehension, it is replaced by a for loop. In this case, # pre-allocate size of container. for comp in metadata.get(node, metadata.Comprehension): header.append(Statement("pythonic::utils::reserve({0},{1})".format( comp.target, iterable))) return Block(header + loop) @cxx_loop def visit_While(self, node): """ Create While node for Cxx generation. It is a cxx_loop to handle else clause. """ test = self.visit(node.test) body = [self.visit(n) for n in node.body] stmt = While(test, Block(body)) return self.process_omp_attachements(node, stmt) def visit_Try(self, node): body = [self.visit(n) for n in node.body] except_ = list() for n in node.handlers: except_.extend(self.visit(n)) return TryExcept(Block(body), except_) def visit_ExceptHandler(self, node): name = self.visit(node.name) if node.name else None body = [self.visit(m) for m in node.body] if isinstance(node.type, ast.Tuple): return [ExceptHandler(p.attr, Block(body), name) for p in node.type.elts] else: return [ExceptHandler( node.type and node.type.attr, Block(body), name)] def visit_If(self, node): test = self.visit(node.test) body = [self.visit(n) for n in node.body] orelse = [self.visit(n) for n in node.orelse] # compound statement required for some OpenMP Directives if isnum(node.test) and node.test.value == 1: stmt = Block(body) else: stmt = If(test, Block(body), Block(orelse) if orelse else None) return self.process_locals(node, self.process_omp_attachements(node, stmt)) def visit_Raise(self, node): exc = node.exc and self.visit(node.exc) return Statement("throw {0}".format(exc or "")) def visit_Assert(self, node): params = [self.visit(node.test), node.msg and self.visit(node.msg)] sparams = ", ".join(_f for _f in params if _f) return Statement("pythonic::pythran_assert({0})".format(sparams)) def visit_Import(self, _): return Nop() # everything is already #included def visit_ImportFrom(self, _): assert False, "should be filtered out by the expand_import pass" def visit_Expr(self, node): stmt = Statement(self.visit(node.value)) return self.process_locals(node, self.process_omp_attachements(node, stmt)) def visit_Pass(self, node): stmt = EmptyStatement() return self.process_omp_attachements(node, stmt) def visit_Break(self, _): """ Generate break statement in most case and goto for orelse clause. See Also : cxx_loop """ if self.break_handlers and self.break_handlers[-1]: self.used_break.add(self.break_handlers[-1]) return Statement("goto {0}".format(self.break_handlers[-1])) else: return Statement("break") def visit_Continue(self, _): return Statement("continue") # expr def visit_BoolOp(self, node): values = [self.visit(value) for value in node.values] op = operator_to_lambda[type(node.op)] return reduce(op, values) def visit_BinOp(self, node): left = self.visit(node.left) right = self.visit(node.right) # special case pow for positive integral exponent if ispowi(node): right = 'std::integral_constant{{}}'.format( node.right.value) if isstr(node.left): left = "pythonic::types::str({})".format(left) elif isstr(node.right): right = "pythonic::types::str({})".format(right) return operator_to_lambda[type(node.op)](left, right) def visit_UnaryOp(self, node): operand = self.visit(node.operand) return operator_to_lambda[type(node.op)](operand) def visit_IfExp(self, node): test = self.visit(node.test) body = self.visit(node.body) orelse = self.visit(node.orelse) return ( "(((bool){0}) " "? typename __combined::type({1}) " ": typename __combined::type({2}))" ).format(test, body, orelse) def visit_List(self, node): if not node.elts: # empty list return '{}(pythonic::types::empty_list())'.format(self.types[node]) else: elts = [self.visit(n) for n in node.elts] node_type = self.types[node] # constructor disambiguation, clang++ workaround if len(elts) == 1: return "{0}({1}, pythonic::types::single_value())".format( self.types.builder.Assignable(node_type).generate(self.lctx), elts[0]) else: return "{0}({{{1}}})".format( self.types.builder.Assignable(node_type).generate(self.lctx), ", ".join(elts)) def visit_Set(self, node): if not node.elts: # empty set return '{}(pythonic::types::empty_set())'.format(self.types[node]) else: elts = [self.visit(n) for n in node.elts] node_type = self.types.builder.Assignable(self.types[node]) # constructor disambiguation, clang++ workaround if len(elts) == 1: return "{0}({1}, pythonic::types::single_value())".format( self.types.builder.Assignable(node_type).generate(self.lctx), elts[0]) else: return "{0}{{{{{1}}}}}".format( node_type, ", ".join("static_cast<{}::value_type>({})" .format(node_type, elt) for elt in elts)) def visit_Dict(self, node): if not node.keys: # empty dict return '{}(pythonic::types::empty_dict())'.format(self.types[node]) else: keys = [self.visit(n) for n in node.keys] values = [self.visit(n) for n in node.values] return "{0}{{{{{1}}}}}".format( self.types.builder.Assignable(self.types[node]), ", ".join("{{ {0}, {1} }}".format(k, v) for k, v in zip(keys, values))) def visit_Tuple(self, node): elts = [self.visit(elt) for elt in node.elts] tuple_type = self.types[node] result = "pythonic::types::make_tuple({0})".format(", ".join(elts)) if isinstance(tuple_type, self.types.builder.CombinedTypes): return '({}){}'.format(tuple_type.generate(self.lctx), result) else: return result def visit_Compare(self, node): left = self.visit(node.left) ops = [operator_to_lambda[type(n)] for n in node.ops] comparators = [self.visit(n) for n in node.comparators] all_cmps = zip([left] + comparators[:-1], ops, comparators) return " and ".join(op(x, y) for x, op, y in all_cmps) def visit_Call(self, node): args = [self.visit(n) for n in node.args] func = self.visit(node.func) # special hook for getattr, as we cannot represent it in C++ if func == 'pythonic::builtins::functor::getattr{}': return ('pythonic::builtins::getattr({}{{}}, {})' .format('pythonic::types::attr::' + node.args[1].value.upper(), args[0])) else: return "{}({})".format(func, ", ".join(args)) def visit_Constant(self, node): if node.value is None: ret = 'pythonic::builtins::None' elif isinstance(node.value, bool): ret = str(node.value).lower() elif isinstance(node.value, str): quoted = node.value.replace('"', r'\"').replace('\n', r'\n') if len(node.value) == 1: quoted = quoted.replace("'", r"\'") ret = 'pythonic::types::chr(\'' + quoted + '\')' else: ret = 'pythonic::types::str("' + quoted + '")' elif isinstance(node.value, complex): ret = "{0}({1}, {2})".format( PYTYPE_TO_CTYPE_TABLE[complex], node.value.real, node.value.imag) elif isnan(node.value): ret = 'pythonic::numpy::nan' elif isinf(node.value): ret = ('+' if node.value >= 0 else '-') + 'pythonic::numpy::inf' else: ret = repr(node.value) + TYPE_TO_SUFFIX.get(type(node.value), "") if node in self.immediates: assert isinstance(node.value, int) return "std::integral_constant<%s, %s>{}" % ( PYTYPE_TO_CTYPE_TABLE[type(node.value)], str(node.value).lower()) return ret def visit_Attribute(self, node): obj, path = attr_to_path(node) sattr = '::'.join(map(cxxid, path)) if not obj.isliteral(): sattr += '{}' return sattr def all_positive(self, node): if isinstance(node, ast.Tuple): return all(self.range_values[elt].low >= 0 for elt in node.elts) return self.range_values[node].low >= 0 def visit_Subscript(self, node): value = self.visit(node.value) # we cannot overload the [] operator in that case if isstr(node.value): value = 'pythonic::types::str({})'.format(value) # positive static index case if (isnum(node.slice) and (node.slice.value >= 0) and isinstance(node.slice.value, int)): return "std::get<{0}>({1})".format(node.slice.value, value) # positive indexing case elif self.all_positive(node.slice): slice_ = self.visit(node.slice) return "{1}.fast({0})".format(slice_, value) # extended slice case elif isextslice(node.slice): slices = [self.visit(elt) for elt in node.slice.elts] return "{1}({0})".format(','.join(slices), value) # standard case else: slice_ = self.visit(node.slice) return "{1}[{0}]".format(slice_, value) def visit_Name(self, node): if node.id in self.local_names: return cxxid(node.id) elif node.id in self.global_declarations: return "{0}()".format(cxxid(node.id)) else: return cxxid(node.id) # other def visit_Slice(self, node): args = [] for field in ('lower', 'upper', 'step'): nfield = getattr(node, field) arg = (self.visit(nfield) if nfield else 'pythonic::builtins::None') args.append(arg) if node.step is None or (isnum(node.step) and node.step.value == 1): if self.all_positive(node.lower) and self.all_positive(node.upper): builder = "pythonic::types::fast_contiguous_slice({},{})" else: builder = "pythonic::types::contiguous_slice({},{})" return builder.format(args[0], args[1]) else: return "pythonic::types::slice({},{},{})".format(*args) class CxxGenerator(CxxFunction): # recover previous generator state StateHolder = "__generator_state" StateValue = "__generator_value" # flags the last statement of a generator FinalStatement = "that_is_all_folks" # local declaration processing def process_locals(self, node, node_visited, *skipped): return node_visited # no processing def prepare_functiondef_context(self, node): self.extra_declarations = [] # 0 is used as initial_state, thus the +1 self.yields = {k: (1 + v, "yield_point{0}".format(1 + v)) for (v, k) in enumerate(self.gather(YieldPoints, node))} return super(CxxGenerator, self).prepare_functiondef_context(node) # stmt def visit_FunctionDef(self, node): self.returns = False tmp = self.prepare_functiondef_context(node) operator_body, formal_types, formal_args = tmp tmp = self.prepare_types(node) dflt_argv, dflt_argt, result_type, callable_type, pure_type = tmp # a generator has a call operator that returns the iterator next_name = "__generator__{0}".format(cxxid(node.name)) instanciated_next_name = "{0}{1}".format( next_name, "<{0}>".format(", ".join(formal_types)) if formal_types else "") if self.returns: operator_body.append(Label(CxxGenerator.FinalStatement)) operator_body.append(Statement("return result_type()")) next_declaration = [ FunctionDeclaration(Value("result_type", "next"), []), EmptyStatement()] # empty statement to force a comma ... # the constructors next_constructors = [ FunctionBody( FunctionDeclaration(Value("", next_name), []), Line(': pythonic::yielder() {}') )] if formal_types: # If all parameters have a default value, we don't need default # constructor if dflt_argv and all(dflt_argv): next_constructors = list() next_constructors.append(FunctionBody( make_function_declaration(self, node, "", next_name, formal_types, formal_args, dflt_argv), Line(": {0} {{ }}".format( ", ".join(["pythonic::yielder()"] + ["{0}({0})".format(arg) for arg in formal_args]))) )) next_iterator = [ FunctionBody( FunctionDeclaration(Value("void", "operator++"), []), Block([Statement("next()")])), FunctionBody( FunctionDeclaration( Value("typename {0}::result_type".format( instanciated_next_name), "operator*"), [], "const"), Block([ ReturnStatement( CxxGenerator.StateValue)])), FunctionBody( FunctionDeclaration( Value("pythonic::types::generator_iterator<{0}>" .format(next_name), "begin"), []), Block([Statement("next()"), ReturnStatement( "pythonic::types::generator_iterator<{0}>" "(*this)".format(next_name))])), FunctionBody( FunctionDeclaration( Value("pythonic::types::generator_iterator<{0}>" .format(next_name), "end"), []), Block([ReturnStatement( "pythonic::types::generator_iterator<{0}>()" .format(next_name))])) ] next_signature = templatize( FunctionDeclaration( Value( "typename {0}::result_type".format( instanciated_next_name), "{0}::next".format(instanciated_next_name)), []), formal_types) next_body = operator_body # the dispatch table at the entry point next_body.insert(0, Statement("switch({0}) {{ {1} }}".format( CxxGenerator.StateHolder, " ".join("case {0}: goto {1};".format(num, where) for (num, where) in sorted( self.yields.values(), key=lambda x: x[0]))))) ctx = CachedTypeVisitor(self.lctx) next_members = ([Statement("{0} {1}".format(ft, fa)) for (ft, fa) in zip(formal_types, formal_args)] + [Statement("{0} {1}".format( self.types[self.local_names[k]].generate(ctx), k)) for k in self.ldecls] + [Statement("{0} {1}".format(v, k)) for k, v in self.extra_declarations] + [Statement( "typename {0}::result_type {1}".format( instanciated_next_name, CxxGenerator.StateValue))]) extern_typedefs = [Typedef(Value(t.generate(ctx), t.name)) for t in self.types[node][1]] iterator_typedef = [ Typedef( Value("pythonic::types::generator_iterator<{0}>".format( "{0}<{1}>".format(next_name, ", ".join(formal_types)) if formal_types else next_name), "iterator")), Typedef(Value(result_type.generate(ctx), "value_type"))] result_typedef = [ Typedef(Value(result_type.generate(ctx), "result_type"))] extra_typedefs = (ctx.typedefs() + extern_typedefs + iterator_typedef + result_typedef) next_struct = templatize( Struct(next_name, extra_typedefs + next_members + next_constructors + next_iterator + next_declaration, "pythonic::yielder"), formal_types) next_definition = FunctionBody(next_signature, Block(next_body)) operator_declaration = [ templatize( make_const_function_declaration( self, node, instanciated_next_name, "operator()", formal_types, formal_args, dflt_argv), formal_types, dflt_argt), EmptyStatement()] operator_signature = make_const_function_declaration( self, node, instanciated_next_name, "{0}::operator()".format(cxxid(node.name)), formal_types, formal_args) operator_definition = FunctionBody( templatize(operator_signature, formal_types), Block([ReturnStatement("{0}({1})".format( instanciated_next_name, ", ".join(formal_args)))]) ) topstruct_type = templatize( Struct("type", extra_typedefs), formal_types) topstruct = Struct( cxxid(node.name), [topstruct_type, callable_type, pure_type] + operator_declaration) return [next_struct, topstruct], [next_definition, operator_definition] def visit_Return(self, node): self.returns = True return Block([Statement("{0} = -1".format(CxxGenerator.StateHolder)), Statement("goto {0}".format(CxxGenerator.FinalStatement)) ]) def visit_Yield(self, node): num, label = self.yields[node] return "".join(n for n in Block([ Assign(CxxGenerator.StateHolder, num), ReturnStatement("{0} = {1}".format(CxxGenerator.StateValue, self.visit(node.value))), Statement("{0}:".format(label)) ]).generate()) def visit_Assign(self, node): value = self.visit(node.value) targets = [self.visit(t) for t in node.targets] alltargets = "= ".join(targets) stmt = Assign(alltargets, value) return self.process_omp_attachements(node, stmt) def can_use_autofor(self, node): """ TODO : Yield should block only if it is use in the for loop, not in the whole function. """ return False def make_assign(self, local_iter_decl, local_iter, iterable): # For yield function, iterable is globals. self.extra_declarations.append((local_iter, local_iter_decl,)) return super(CxxGenerator, self).make_assign("", local_iter, iterable) class Cxx(Backend): """ Produces a C++ representation of the AST. >>> import gast as ast, pythran.passmanager as passmanager, os >>> node = ast.parse("def foo(): return 'hello world'") >>> pm = passmanager.PassManager('test') >>> r = pm.dump(Cxx, node) >>> print(str(r).replace(os.sep, '/')) #include #include namespace __pythran_test { struct foo { typedef void callable; typedef void pure; struct type { typedef typename pythonic::returnable::type \ result_type; } ; inline typename type::result_type operator()() const; ; } ; inline typename foo::type::result_type foo::operator()() const { return pythonic::types::str("hello world"); } } """ def __init__(self): """ Basic initialiser gathering analysis informations. """ self.result = None super(Cxx, self).__init__(Dependencies, GlobalDeclarations, Types, Scope, RangeValues, PureExpressions, Immediates, Ancestors) # mod def visit_Module(self, node): """ Build a compilation unit. """ # build all types header_deps = sorted(self.dependencies) headers = [Include(os.path.join("pythonic", "include", *map(cxxid, t)) + ".hpp") for t in header_deps] headers += [Include(os.path.join("pythonic", *map(cxxid, t)) + ".hpp") for t in header_deps] decls_n_defns = list(filter(None, (self.visit(stmt) for stmt in node.body))) decls, defns = zip(*decls_n_defns) if decls_n_defns else ([], []) nsbody = [s for ls in decls + defns for s in ls] ns = Namespace(pythran_ward + self.passmanager.module_name, nsbody) self.result = CompilationUnit(headers + [ns]) def visit_FunctionDef(self, node): yields = self.gather(YieldPoints, node) visitor = (CxxGenerator if yields else CxxFunction)(self) return visitor.visit(node) pythran-0.10.0+ds2/pythran/config.py000066400000000000000000000316471416264035500172660ustar00rootroot00000000000000try: # python3 vs. python2 from configparser import ConfigParser except ImportError: from ConfigParser import SafeConfigParser as ConfigParser import io import logging import numpy.distutils.system_info as numpy_sys import numpy import os import sys logger = logging.getLogger('pythran') def get_include(): # using / as separator as advised in the distutils doc return (os.path.dirname(os.path.dirname(__file__)) or '.') + '/pythran' class silent(object): ''' Silent sys.stderr at the system level ''' def __enter__(self): try: self.prevfd = os.dup(sys.stderr.fileno()) os.close(sys.stderr.fileno()) except io.UnsupportedOperation: self.prevfd = None self.prevstream = sys.stderr sys.stderr = open(os.devnull, 'r') def __exit__(self, exc_type, exc_value, traceback): sys.stderr.close() sys.stderr = self.prevstream if self.prevfd: os.dup2(self.prevfd, sys.stderr.fileno()) os.close(self.prevfd) def get_paths_cfg( sys_file='pythran.cfg', platform_file='pythran-{}.cfg'.format(sys.platform), user_file='.pythranrc' ): sys_config_dir = os.path.dirname(__file__) sys_config_path = os.path.join(sys_config_dir, sys_file) platform_config_path = os.path.join(sys_config_dir, platform_file) if not os.path.exists(platform_config_path): platform_config_path = os.path.join(sys_config_dir, "pythran-default.cfg") user_config_path = os.environ.get('PYTHRANRC', None) if not user_config_path: user_config_dir = os.environ.get('XDG_CONFIG_HOME', None) if not user_config_dir: user_config_dir = os.environ.get('HOME', None) if not user_config_dir: user_config_dir = '~' user_config_path = os.path.expanduser( os.path.join(user_config_dir, user_file)) return {"sys": sys_config_path, "platform": platform_config_path, "user": user_config_path} def init_cfg(sys_file, platform_file, user_file, config_args=None): paths = get_paths_cfg(sys_file, platform_file, user_file) sys_config_path = paths["sys"] platform_config_path = paths["platform"] user_config_path = paths["user"] cfgp = ConfigParser() for required in (sys_config_path, platform_config_path): cfgp.read([required]) cfgp.read([user_config_path]) if config_args is not None: update_cfg(cfgp, config_args) return cfgp def update_cfg(cfgp, config_args): # Override the config options with those provided on the command line # e.g. compiler.blas=pythran-openblas. for arg in config_args: try: lhs, rhs = arg.split('=', maxsplit=1) section, item = lhs.split('.') if not cfgp.has_section(section): cfgp.add_section(section) cfgp.set(section, item, rhs) except Exception: pass def lint_cfg(cfgp, **paths): if not paths: paths = get_paths_cfg() # Use configuration from sys and platform as "reference" cfgp_ref = ConfigParser() cfgp_ref.read([paths["sys"], paths["platform"]]) # Check if pythran configuration files exists for loc, path in paths.items(): exists = os.path.exists(path) msg = " ".join([ "{} file".format(loc).rjust(13), "exists:" if exists else "does not exist:", path ]) logger.info(msg) if exists else logger.warning(msg) for section in cfgp.sections(): # Check if section in the current configuration exists in the # reference configuration if cfgp_ref.has_section(section): options = set(cfgp.options(section)) options_ref = set(cfgp_ref.options(section)) # Check if the options in the section are supported by the # reference configuration if options.issubset(options_ref): logger.info( ( "pythranrc section [{}] is valid and options are " "correct" ).format(section) ) else: logger.warning( ( "pythranrc section [{}] is valid but options {} " "are incorrect!" ).format(section, options.difference(options_ref)) ) else: logger.warning("pythranrc section [{}] is invalid!" .format(section)) def make_extension(python, **extra): # load platform specific configuration then user configuration cfg = init_cfg('pythran.cfg', 'pythran-{}.cfg'.format(sys.platform), '.pythranrc', extra.get('config', None)) if 'config' in extra: extra.pop('config') def parse_define(define): index = define.find('=') if index < 0: return (define, None) else: return define[:index], define[index + 1:] extension = { "language": "c++", # forcing str conversion to handle Unicode case (the default on MS) "define_macros": [str(x) for x in cfg.get('compiler', 'defines').split()], "undef_macros": [str(x) for x in cfg.get('compiler', 'undefs').split()], "include_dirs": [str(x) for x in cfg.get('compiler', 'include_dirs').split()], "library_dirs": [str(x) for x in cfg.get('compiler', 'library_dirs').split()], "libraries": [str(x) for x in cfg.get('compiler', 'libs').split()], "extra_compile_args": [str(x) for x in cfg.get('compiler', 'cflags').split()], "extra_link_args": [str(x) for x in cfg.get('compiler', 'ldflags').split()], "extra_objects": [] } if python: extension['define_macros'].append('ENABLE_PYTHON_MODULE') extension['define_macros'].append( '__PYTHRAN__={}'.format(sys.version_info.major)) pythonic_dir = get_include() extension["include_dirs"].append(pythonic_dir) extra.pop('language', None) # forced to c++ anyway cxx = extra.pop('cxx', None) cc = extra.pop('cc', None) if cxx is None: cxx = compiler() if cxx is not None: extension['cxx'] = cxx extension['cc'] = cc or cxx # Honor CXXFLAGS (note: Pythran calls this `cflags` everywhere, however the # standard environment variable is `CXXFLAGS` not `CFLAGS`). cflags = os.environ.get('CXXFLAGS', None) if cflags is not None: extension['extra_compile_args'].extend(cflags.split()) # Honor LDFLAGS ldflags = os.environ.get('LDFLAGS', None) if ldflags is not None: extension['extra_link_args'].extend(ldflags.split()) for k, w in extra.items(): extension[k].extend(w) if cfg.getboolean('pythran', 'complex_hook'): # the patch is *not* portable extension["include_dirs"].append(pythonic_dir + '/pythonic/patch') # numpy specific if python: extension['include_dirs'].append(numpy.get_include()) # blas dependency reserved_blas_entries = 'pythran-openblas', 'none' user_blas = cfg.get('compiler', 'blas') if user_blas == 'pythran-openblas': try: import pythran_openblas as openblas # required to cope with atlas missing extern "C" extension['define_macros'].append('PYTHRAN_BLAS_OPENBLAS') extension['include_dirs'].extend(openblas.include_dirs) extension['extra_objects'].append( os.path.join(openblas.library_dir, openblas.static_library) ) except ImportError: logger.warning("Failed to find 'pythran-openblas' package. " "Please install it or change the compiler.blas " "setting. Defaulting to 'blas'") user_blas = 'blas' elif user_blas == 'none': extension['define_macros'].append('PYTHRAN_BLAS_NONE') if user_blas not in reserved_blas_entries: # Numpy can pollute stdout with checks with silent(): numpy_blas = numpy_sys.get_info(user_blas) # required to cope with atlas missing extern "C" extension['define_macros'].append('PYTHRAN_BLAS_{}' .format(user_blas.upper())) extension['libraries'].extend(numpy_blas.get('libraries', [])) extension['library_dirs'].extend( numpy_blas.get('library_dirs', [])) extension['include_dirs'].extend( numpy_blas.get('include_dirs', [])) # final macro normalization extension["define_macros"] = [ dm if isinstance(dm, tuple) else parse_define(dm) for dm in extension["define_macros"]] return extension def compiler(): """Get compiler to use for C++ to binary process. The precedence for choosing the compiler is as follows:: 1. `CXX` environment variable 2. User configuration (~/.pythranrc) Returns None if none is set or if it's set to the empty string """ cfg_cxx = str(cfg.get('compiler', 'CXX')) if not cfg_cxx: cfg_cxx = None return os.environ.get('CXX', cfg_cxx) or None # load platform specific configuration then user configuration cfg = init_cfg('pythran.cfg', 'pythran-{}.cfg'.format(sys.platform), '.pythranrc') def run(): ''' Dump on stdout the config flags required to compile pythran-generated code. ''' import argparse import distutils.ccompiler import distutils.sysconfig import pythran import numpy parser = argparse.ArgumentParser( prog='pythran-config', description='output build options for pythran-generated code', epilog="It's a megablast!" ) parser.add_argument('--compiler', action='store_true', help='print default compiler') parser.add_argument('--cflags', action='store_true', help='print compilation flags') parser.add_argument('--libs', action='store_true', help='print linker flags') parser.add_argument('--no-python', action='store_true', help='do not include Python-related flags') parser.add_argument('--verbose', '-v', action='count', default=0, help=( 'verbose mode: [-v] prints warnings if pythranrc ' 'has an invalid configuration; use ' '[-vv] for more information') ) args = parser.parse_args(sys.argv[1:]) args.python = not args.no_python output = [] extension = pythran.config.make_extension(python=args.python) if args.verbose >= 1: if args.verbose == 1: logger.setLevel(logging.WARNING) else: logger.setLevel(logging.INFO) lint_cfg(cfg) if args.compiler or args.verbose >= 2: cxx = compiler() or 'c++' logger.info('CXX = '.rjust(10) + cxx) if args.compiler: output.append(cxx) compiler_obj = distutils.ccompiler.new_compiler() distutils.sysconfig.customize_compiler(compiler_obj) if args.cflags or args.verbose >= 2: def fmt_define(define): name, value = define if value is None: return '-D' + name else: return '-D' + name + '=' + value cflags = [] cflags.extend(fmt_define(define) for define in extension['define_macros']) cflags.extend(('-I' + include) for include in extension['include_dirs']) if args.python: cflags.append('-I' + numpy.get_include()) cflags.append('-I' + distutils.sysconfig.get_python_inc()) logger.info('CXXFLAGS = '.rjust(10) + ' '.join(cflags)) if args.cflags: output.extend(cflags) if args.libs or args.verbose >= 2: ldflags = [] ldflags.extend((compiler_obj.library_dir_option(include)) for include in extension['library_dirs']) ldflags.extend((compiler_obj.library_option(include)) for include in extension['libraries']) if args.python: ldflags.append(compiler_obj.library_dir_option(distutils.sysconfig.get_config_var('LIBPL'))) ldflags.extend(distutils.sysconfig.get_config_var('LIBS').split()) ldflags.append(compiler_obj.library_option('python') + distutils.sysconfig.get_config_var('VERSION')) logger.info('LDFLAGS = '.rjust(10) + ' '.join(ldflags)) if args.libs: output.extend(ldflags) if output: print(' '.join(output)) if __name__ == '__main__': run() pythran-0.10.0+ds2/pythran/conversion.py000066400000000000000000000122731416264035500202000ustar00rootroot00000000000000""" This module provides way to convert a Python value into an ast. """ import gast as ast import numpy as np import numbers # Maximum length of folded sequences # Containers larger than this are not unfolded to limit code size growth MAX_LEN = 2 ** 8 class ConversionError(Exception): """ Exception raised when conversion from value to ast can't be done. """ class ToNotEval(Exception): """ Exception raised when we don't want to evaluate the value. It is case of too long expression for example. """ def totuple(l): try: return tuple(map(totuple, l)) except TypeError: return l def dtype_to_ast(name): if name in ('bool',): return ast.Attribute( ast.Name('builtins', ast.Load(), None, None), name, ast.Load()) else: return ast.Attribute( ast.Name(mangle('numpy'), ast.Load(), None, None), name, ast.Load()) def size_container_folding(value): """ Convert value to ast expression if size is not too big. Converter for sized container. """ def size(x): return len(getattr(x, 'flatten', lambda: x)()) if size(value) < MAX_LEN: if isinstance(value, list): return ast.List([to_ast(elt) for elt in value], ast.Load()) elif isinstance(value, tuple): return ast.Tuple([to_ast(elt) for elt in value], ast.Load()) elif isinstance(value, set): if value: return ast.Set([to_ast(elt) for elt in value]) else: return ast.Call(func=ast.Attribute( ast.Name(mangle('builtins'), ast.Load(), None, None), 'set', ast.Load()), args=[], keywords=[]) elif isinstance(value, dict): keys = [to_ast(elt) for elt in value.keys()] values = [to_ast(elt) for elt in value.values()] return ast.Dict(keys, values) elif isinstance(value, np.ndarray): if len(value) == 0: return ast.Call(func=ast.Attribute( ast.Name(mangle('numpy'), ast.Load(), None, None), 'empty', ast.Load()), args=[to_ast(value.shape), dtype_to_ast(value.dtype.name)], keywords=[]) else: return ast.Call(func=ast.Attribute( ast.Name(mangle('numpy'), ast.Load(), None, None), 'array', ast.Load()), args=[to_ast(totuple(value.tolist())), dtype_to_ast(value.dtype.name)], keywords=[]) else: raise ConversionError() else: raise ToNotEval() def builtin_folding(value): """ Convert builtin function to ast expression. """ if isinstance(value, (type(None), bool)): name = str(value) else: name = value.__name__ return ast.Attribute(ast.Name('builtins', ast.Load(), None, None), name, ast.Load()) def to_ast(value): """ Turn a value into ast expression. >>> a = 1 >>> print(ast.dump(to_ast(a))) Num(n=1) >>> a = [1, 2, 3] >>> print(ast.dump(to_ast(a))) List(elts=[Num(n=1), Num(n=2), Num(n=3)], ctx=Load()) """ if any(value is t for t in (bool, int, float)): iinfo = np.iinfo(int) if isinstance(value, int) and not (iinfo.min <= value <= iinfo.max): from pythran.syntax import PythranSyntaxError raise PythranSyntaxError("constant folding results in big int") return builtin_folding(value) elif isinstance(value, np.generic): return to_ast(value.item()) elif isinstance(value, (numbers.Number, str, bool, type(None))): return ast.Constant(value, None) elif isinstance(value, (list, tuple, set, dict, np.ndarray)): return size_container_folding(value) elif hasattr(value, "__module__") and value.__module__ == "builtins": # TODO Can be done the same way for others modules return builtin_folding(value) # only meaningful for python3 elif isinstance(value, (filter, map, zip)): return to_ast(list(value)) elif isinstance(value, np._globals._NoValueType): return ast.Attribute(ast.Attribute(ast.Name('numpy', ast.Load(), None, None), '_globals', ast.Load()), '_NoValueType', ast.Load()) raise ToNotEval() PYTHRAN_IMPORT_MANGLING = '__pythran_import_' def mangle(name): ''' Mangle a module name, except the builtins module >>> mangle('numpy') __pythran_import_numpy >>> mangle('builtins') builtins ''' if name == 'builtins': return name else: return PYTHRAN_IMPORT_MANGLING + name def demangle(name): ''' Demangle a module name, if needed >>> demangle('__pythran_import_numpy') numpy >>> mangle('numpy') numpy ''' if name.startswith(PYTHRAN_IMPORT_MANGLING): return name[len(PYTHRAN_IMPORT_MANGLING):] else: return name pythran-0.10.0+ds2/pythran/cxxgen.py000066400000000000000000000541701416264035500173110ustar00rootroot00000000000000""" Generator for C/C++. """ # Serge Guelton: The licensing terms are not set in the source package, but # pypi[1] says the software is under the MIT license, so I reproduce it here # [1] http://pypi.python.org/pypi/cgen # # Copyright (C) 2008 Andreas Kloeckner # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # from textwrap import dedent from pythran.tables import pythran_ward from pythran.spec import signatures_to_string __copyright__ = "Copyright (C) 2008 Andreas Kloeckner" class Nop(object): def generate(self, with_semicolon=True): yield '' class Declarator(object): def generate(self, with_semicolon=True): tp_lines, tp_decl = self.get_decl_pair() tp_lines = list(tp_lines) for line in tp_lines[:-1]: yield line sc = ";" if with_semicolon else "" if tp_decl is None: yield "%s%s" % (tp_lines[-1], sc) else: yield "%s %s%s" % (tp_lines[-1], tp_decl, sc) def get_decl_pair(self): """Return a tuple ``(type_lines, rhs)``. *type_lines* is a non-empty list of lines (most often just a single one) describing the type of this declarator. *rhs* is the right- hand side that actually contains the function/array/constness notation making up the bulk of the declarator syntax. """ def inline(self): """Return the declarator as a single line.""" tp_lines, tp_decl = self.get_decl_pair() tp_lines = " ".join(tp_lines) if tp_decl is None: return tp_lines else: return "%s %s" % (tp_lines, tp_decl) class Value(Declarator): """A simple declarator: *typename* and *name* are given as strings.""" def __init__(self, typename, name): self.typename = typename self.name = name def get_decl_pair(self): return [self.typename], self.name class NestedDeclarator(Declarator): def __init__(self, subdecl): self.subdecl = subdecl @property def name(self): return self.subdecl.name def get_decl_pair(self): return self.subdecl.get_decl_pair() class DeclSpecifier(NestedDeclarator): def __init__(self, subdecl, spec, sep=' '): NestedDeclarator.__init__(self, subdecl) self.spec = spec self.sep = sep def get_decl_pair(self): def add_spec(sub_it): it = iter(sub_it) try: yield "%s%s%s" % (self.spec, self.sep, next(it)) except StopIteration: pass for line in it: yield line sub_tp, sub_decl = self.subdecl.get_decl_pair() return add_spec(sub_tp), sub_decl class Typedef(DeclSpecifier): def __init__(self, subdecl): DeclSpecifier.__init__(self, subdecl, "typedef") class FunctionDeclaration(NestedDeclarator): def __init__(self, subdecl, arg_decls, *attributes): NestedDeclarator.__init__(self, subdecl) self.inline = True self.arg_decls = arg_decls self.attributes = attributes def get_decl_pair(self): sub_tp, sub_decl = self.subdecl.get_decl_pair() if self.inline: sub_tp = ['inline'] + sub_tp return sub_tp, ("%s(%s) %s" % ( sub_decl, ", ".join(ad.inline() for ad in self.arg_decls), " ".join(self.attributes))) class Struct(Declarator): """ A structure declarator. Attributes ---------- tpname : str Name of the structure. (None for unnamed struct) fields : [Declarator] Content of the structure. inherit : str Parent class of current structure. """ def __init__(self, tpname, fields, inherit=None): """Initialize the structure declarator. """ self.tpname = tpname self.fields = fields self.inherit = inherit def get_decl_pair(self): """ See Declarator.get_decl_pair.""" def get_tp(): """ Iterator generating lines for struct definition. """ decl = "struct " if self.tpname is not None: decl += self.tpname if self.inherit is not None: decl += " : " + self.inherit yield decl yield "{" for f in self.fields: for f_line in f.generate(): yield " " + f_line yield "} " return get_tp(), "" # template -------------------------------------------------------------------- class Template(NestedDeclarator): def __init__(self, template_spec, subdecl): super(Template, self).__init__(subdecl) self.template_spec = template_spec def generate(self, with_semicolon=False): yield "template <%s>" % ", ".join(self.template_spec) for i in self.subdecl.generate(with_semicolon): yield i if not isinstance(self.subdecl, (Template, FunctionDeclaration)): yield ";" # control flow/statement stuff ------------------------------------------------ class ExceptHandler(object): def __init__(self, name, body, alias=None): self.name = name self.body = body self.alias = alias def generate(self): if self.name is None: yield "catch(...)" else: yield "catch (pythonic::types::%s const& %s)" % (self.name, self.alias or '') for line in self.body.generate(): yield line class TryExcept(object): def __init__(self, try_, except_): self.try_ = try_ self.except_ = except_ def generate(self): yield "try" for line in self.try_.generate(): yield line for exception in self.except_: for line in exception.generate(): yield " " + line class If(object): def __init__(self, condition, then_, else_=None): self.condition = condition self.then_ = then_ self.else_ = else_ def generate(self): yield "if (%s)" % self.condition for line in self.then_.generate(): yield line if self.else_ is not None: yield "else" for line in self.else_.generate(): yield line class Loop(object): def __init__(self, body): self.body = body def generate(self): yield self.intro_line() for line in self.body.generate(): yield line class While(Loop): def __init__(self, condition, body): super(While, self).__init__(body) self.condition = condition def intro_line(self): return "while (%s)" % self.condition class For(Loop): def __init__(self, start, condition, update, body): super(For, self).__init__(body) self.start = start self.condition = condition self.update = update def intro_line(self): return "for (%s; %s; %s)" % (self.start, self.condition, self.update) class AutoFor(Loop): def __init__(self, target, iter_, body): super(AutoFor, self).__init__(body) self.target = target self.iter = iter_ def intro_line(self): if self.target == '_': return "for (PYTHRAN_UNUSED auto&& {0}: {1})".format(self.target, self.iter) else: return "for (auto&& {0}: {1})".format(self.target, self.iter) # simple statements ----------------------------------------------------------- class Define(object): def __init__(self, symbol, value): self.symbol = symbol self.value = value def generate(self): yield "#define %s %s" % (self.symbol, self.value) class Include(object): def __init__(self, filename, system=True): self.filename = filename self.system = system def generate(self): if self.system: yield "#include <%s>" % self.filename else: yield "#include \"%s\"" % self.filename class Label(object): def __init__(self, label): self.label = label def generate(self): yield self.label + ':;' class Statement(object): def __init__(self, text): self.text = text def generate(self): yield self.text + ";" class AnnotatedStatement(object): def __init__(self, stmt, annotations): self.stmt = stmt self.annotations = annotations def generate(self): for directive in self.annotations: pragma = "#pragma " + directive.s yield pragma.format(*directive.deps) for s in self.stmt.generate(): yield s class ReturnStatement(Statement): def generate(self): yield "return " + self.text + ";" class EmptyStatement(Statement): def __init__(self): Statement.__init__(self, "") class Assign(object): def __init__(self, lvalue, rvalue): self.lvalue = lvalue self.rvalue = rvalue def generate(self): yield "%s = %s;" % (self.lvalue, self.rvalue) class Line(object): def __init__(self, text=""): self.text = text def generate(self): yield self.text # initializers ---------------------------------------------------------------- class FunctionBody(object): def __init__(self, fdecl, body): """Initialize a function definition. *fdecl* is expected to be a :class:`FunctionDeclaration` instance, while *body* is a :class:`Block`. """ self.fdecl = fdecl self.body = body def generate(self): for f_line in self.fdecl.generate(with_semicolon=False): yield f_line for b_line in self.body.generate(): yield b_line # block ----------------------------------------------------------------------- class Block(object): def __init__(self, contents=None): if contents is None: contents = [] self.contents = contents def generate(self): yield "{" for item in self.contents: for item_line in item.generate(): yield " " + item_line yield "}" class Module(Block): def generate(self): for c in self.contents: for line in c.generate(): yield line class Namespace(Block): def __init__(self, name, contents=None): Block.__init__(self, contents) self.name = name def generate(self): yield "namespace " + self.name yield "{" for item in self.contents: for item_line in item.generate(): yield " " + item_line yield "}" # copy-pasted from codepy.bpl, which is a real mess... # the original code was under MIT License # cf. http://pypi.python.org/pypi/codepy # so I reproduce it here # # Copyright (C) 2008 Andreas Kloeckner # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # class PythonModule(object): ''' Wraps the creation of a Pythran module wrapped a Python native Module ''' def __init__(self, name, docstrings, metadata): ''' Builds an empty PythonModule ''' self.name = name self.preamble = [] self.includes = [] self.functions = {} self.global_vars = [] self.implems = [] self.capsules = [] self.python_implems = [] self.wrappers = [] self.docstrings = docstrings self.metadata = metadata moduledoc = self.docstring(self.docstrings.get(None, "")) self.metadata['moduledoc'] = moduledoc def docstring(self, doc): return self.splitstring(dedent(doc).replace('"', '\\"') .replace('\n', '\\n') .replace('\r', '\\r')) def splitstring(self, doc): return '"{}"'.format('\\n""'.join(doc.split('\\n'))) def add_to_preamble(self, *pa): self.preamble.extend(pa) def add_to_includes(self, *incl): self.includes.extend(incl) def add_pyfunction(self, func, name, types, signature): self.add_function_to(self.python_implems, func, name, types, signature) def add_capsule(self, func, ptrname, sig): self.capsules.append((ptrname, sig)) self.implems.append(func) def add_function(self, func, name, types, signature): self.add_function_to(self.implems, func, name, types, signature) def add_function_to(self, to, func, name, ctypes, signature): """ Add a function to be exposed. *func* is expected to be a :class:`cgen.FunctionBody`. Because a function can have several signatures exported, this method actually creates a wrapper for each specialization and a global wrapper that checks the argument types and runs the correct candidate, if any """ to.append(func) args_unboxing = [] # turns PyObject to c++ object args_checks = [] # check if the above conversion is valid wrapper_name = pythran_ward + 'wrap_' + func.fdecl.name for i, t in enumerate(ctypes): args_unboxing.append('from_python<{}>(args_obj[{}])'.format(t, i)) args_checks.append('is_convertible<{}>(args_obj[{}])'.format(t, i)) arg_decls = func.fdecl.arg_decls[:len(ctypes)] keywords = "".join('"{}", '.format(arg.name) for arg in arg_decls) wrapper = dedent(''' static PyObject * {wname}(PyObject *self, PyObject *args, PyObject *kw) {{ PyObject* args_obj[{size}+1]; {silent_warning} char const* keywords[] = {{{keywords} nullptr}}; if(! PyArg_ParseTupleAndKeywords(args, kw, "{fmt}", (char**)keywords {objs})) return nullptr; if({checks}) return to_python({name}({args})); else {{ return nullptr; }} }}''') self.wrappers.append( wrapper.format(name=func.fdecl.name, silent_warning= '' if ctypes else '(void)args_obj;', size=len(ctypes), fmt="O" * len(ctypes), objs=''.join(', &args_obj[%d]' % i for i in range(len(ctypes))), args=', '.join(args_unboxing), checks=' && '.join(args_checks) or '1', wname=wrapper_name, keywords=keywords, ) ) func_descriptor = wrapper_name, ctypes, signature self.functions.setdefault(name, []).append(func_descriptor) def add_global_var(self, name, init): self.global_vars.append(name) self.python_implems.append(Assign('static PyObject* ' + name, 'to_python({})'.format(init))) def __str__(self): """Generate (i.e. yield) the source code of the module line-by-line. """ themethods = [] theextraobjects = [] theoverloads = [] for vname in self.global_vars: theextraobjects.append( 'PyModule_AddObject(theModule, "{0}", {0});'.format(vname)) for fname, overloads in self.functions.items(): tryall = [] signatures = [] for overload, ctypes, signature in overloads: try_ = dedent(""" if(PyObject* obj = {name}(self, args, kw)) return obj; PyErr_Clear(); """.format(name=overload)) tryall.append(try_) signatures.append(signature) candidates = signatures_to_string(fname, signatures) wrapper_name = pythran_ward + 'wrapall_' + fname candidate = dedent(''' static PyObject * {wname}(PyObject *self, PyObject *args, PyObject *kw) {{ return pythonic::handle_python_exception([self, args, kw]() -> PyObject* {{ {tryall} return pythonic::python::raise_invalid_argument( "{name}", {candidates}, args, kw); }}); }} '''.format(name=fname, tryall="\n".join(tryall), candidates=self.splitstring( candidates.replace('\n', '\\n') ), wname=wrapper_name)) fdoc = self.docstring(self.docstrings.get(fname, '')) themethod = dedent('''{{ "{name}", (PyCFunction){wname}, METH_VARARGS | METH_KEYWORDS, {doc}}}'''.format(name=fname, wname=wrapper_name, doc=fdoc)) themethods.append(themethod) theoverloads.append(candidate) for ptrname, sig in self.capsules: capsule = ''' PyModule_AddObject(theModule, "{ptrname}", PyCapsule_New((void*)&{ptrname}, "{sig}", NULL) );'''.format(ptrname=ptrname, sig=sig) theextraobjects.append(capsule) methods = dedent(''' static PyMethodDef Methods[] = {{ {methods} {{NULL, NULL, 0, NULL}} }}; '''.format(methods="".join(m + "," for m in themethods))) module = dedent(''' #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef moduledef = {{ PyModuleDef_HEAD_INIT, "{name}", /* m_name */ {moduledoc}, /* m_doc */ -1, /* m_size */ Methods, /* m_methods */ NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL, /* m_free */ }}; #define PYTHRAN_RETURN return theModule #define PYTHRAN_MODULE_INIT(s) PyInit_##s #else #define PYTHRAN_RETURN return #define PYTHRAN_MODULE_INIT(s) init##s #endif PyMODINIT_FUNC PYTHRAN_MODULE_INIT({name})(void) #ifndef _WIN32 __attribute__ ((visibility("default"))) #if defined(GNUC) && !defined(__clang__) __attribute__ ((externally_visible)) #endif #endif ; PyMODINIT_FUNC PYTHRAN_MODULE_INIT({name})(void) {{ import_array() #if PY_MAJOR_VERSION >= 3 PyObject* theModule = PyModule_Create(&moduledef); #else PyObject* theModule = Py_InitModule3("{name}", Methods, {moduledoc} ); #endif if(! theModule) PYTHRAN_RETURN; PyObject * theDoc = Py_BuildValue("(sss)", "{version}", "{date}", "{hash}"); if(! theDoc) PYTHRAN_RETURN; PyModule_AddObject(theModule, "__pythran__", theDoc); {extraobjects} PYTHRAN_RETURN; }} '''.format(name=self.name, extraobjects='\n'.join(theextraobjects), **self.metadata)) body = (self.preamble + self.includes + self.implems + [Line('#ifdef ENABLE_PYTHON_MODULE')] + self.python_implems + [Line(code) for code in self.wrappers + theoverloads] + [Line(methods), Line(module), Line('#endif')]) return "\n".join(Module(body).generate()) class CompilationUnit(object): def __init__(self, body): self.body = body def __str__(self): return '\n'.join('\n'.join(s.generate()) for s in self.body) pythran-0.10.0+ds2/pythran/cxxtypes.py000066400000000000000000000431671416264035500177100ustar00rootroot00000000000000''' This module defines classes needed to manipulate c++ types from pythran. ''' from inspect import isclass class ordered_set(object): def __init__(self, elements=None): self.values = list() self.unique_values = set() if elements is not None: for elt in elements: self.append(elt) def append(self, value): if value not in self.unique_values: self.values.append(value) self.unique_values.add(value) def __iter__(self): return iter(self.values) def __len__(self): return len(self.values) def __getitem__(self, index): return self.values[index] class TypeBuilder(object): ''' >>> builder = TypeBuilder() >>> builder.NamedType('long long') long long >>> l_ty = builder.NamedType('long') >>> i_ty = builder.NamedType('int') >>> f_ty = builder.NamedType('float') >>> l_ty + builder.NamedType('long') long >>> builder.NamedType('long') + builder.NamedType('char') typename __combined::type >>> builder.ArgumentType(4) typename std::remove_cv::\ type>::type >>> builder.Assignable(builder.NamedType("long")) typename pythonic::assignable::type >>> builder.Returnable(builder.NamedType("long")) typename pythonic::returnable::type >>> builder.Lazy(builder.NamedType("long")) typename pythonic::lazy::type >>> builder.DeclType("toto") typename std::remove_cv<\ typename std::remove_reference::type>::type >>> builder.IteratorOfType(builder.NamedType('some')) typename some::iterator >>> builder.IteratorOfType(builder.NamedType('typename some::stuff')) typename some::stuff::iterator >>> builder.IteratorContentType(builder.NamedType('str')) typename std::remove_cv::type::iterator>::value_type>::type >>> builder.GetAttr(builder.NamedType('complex'), 'real') decltype(pythonic::builtins::getattr(\ pythonic::types::attr::REAL{}, std::declval())) >>> builder.ReturnType(builder.NamedType('math::cos'), [f_ty]) decltype(std::declval()(std::declval())) >>> t = builder.TupleType([i_ty, builder.NamedType('str')]) >>> builder.ElementType(1, t) typename std::tuple_element<1,typename std::remove_reference<\ decltype(pythonic::types::make_tuple(std::declval(), \ std::declval()))>::type>::type >>> builder.ListType(builder.NamedType('int')) pythonic::types::list::type> >>> builder.SetType(builder.NamedType('int')) pythonic::types::set >>> builder.TupleType([i_ty, builder.NamedType('bool')]) decltype(pythonic::types::make_tuple(std::declval(), \ std::declval())) >>> builder.DictType(builder.NamedType('int'), builder.NamedType('float')) pythonic::types::dict >>> builder.ContainerType(builder.NamedType('int')) container::type> >>> builder.IndexableType(builder.NamedType('int')) indexable >>> op = lambda x,y: x + '+' + y >>> builder.ExpressionType(op, [l_ty, i_ty]) decltype(std::declval()+std::declval()) ''' def __init__(builder): builder._instances = dict() class Type(object): """ A generic type object to be sub-classed The keyword arguments are used to built the internal representation one attribute per key with the associated value """ def __new__(cls, *args, **kwargs): # no memoization for PType if cls.__name__ == 'PType': return super(Type, cls).__new__(cls) key = cls, for v in args + tuple(v for k, v in sorted(kwargs.items())): if isinstance(v, list): v = tuple(v) key += v, if key not in builder._instances: builder._instances[key] = super(Type, cls).__new__(cls) return builder._instances[key] def __init__(self, **kwargs): for k, v in kwargs.items(): if isinstance(v, list): v = tuple(v) setattr(self, k, v) def iscombined(self): return False def __add__(self, other): if self is other: return self return CombinedTypes(self, other) def __repr__(self): return self.generate(str) class NamedType(Type): """ A generic type object, to hold scalar types and such """ def __init__(self, srepr): super(NamedType, self).__init__(srepr=srepr) def generate(self, _): return self.srepr class PType(Type): """ A generic parametric type """ prefix = "__ptype{0}" count = 0 def __init__(self, fun, ptype): super(PType, self).__init__(fun=fun, type=ptype, name=PType.prefix.format( PType.count)) PType.count += 1 def generate(self, ctx): return ctx(self.type) def instanciate(self, caller, arguments): if self.fun is caller: return builder.UnknownType else: return InstantiatedType(self.fun, self.name, arguments) class LType(Type): def __init__(self, base, node): super(LType, self).__init__(node=node) self.isrec = False self.orig = base self.final_type = base def generate(self, ctx): if self.isrec: return self.orig.generate(ctx) else: self.isrec = True return self.final_type.generate(ctx) class InstantiatedType(Type): """ A type instantiated from a parametric type """ def __init__(self, fun, name, arguments): super(InstantiatedType, self).__init__(fun=fun, name=name, arguments=arguments) def generate(self, ctx): if self.arguments: args = ", ".join(ctx(arg) for arg in self.arguments) template_params = "<{0}>".format(args) else: template_params = "" return "typename {0}::type{1}::{2}".format(self.fun.name, template_params, self.name) class CombinedTypes(Type): """ type resulting from the combination of other types """ def __init__(self, *types): super(CombinedTypes, self).__init__(types=types) def iscombined(self): return True def __add__(self, other): worklist = list(self.types) visited = set() while worklist: item = worklist.pop() if item is other: return self if item in visited: continue visited.add(item) if isinstance(item, CombinedTypes): worklist.extend(item.types) return Type.__add__(self, other) def __radd__(self, other): return self.__add__(other) def generate(self, ctx): import sys current_recursion_limit = sys.getrecursionlimit() try: return 'typename __combined<{}>::type'.format( ','.join(ctx(t) for t in self.types)) except RuntimeError: # this is a situation where we accept to somehow extend # the recursion limit, because of degenerated trees sys.setrecursionlimit(current_recursion_limit * 2) res = self.generate(ctx) sys.setrecursionlimit(current_recursion_limit) return res class ArgumentType(Type): """ A type to hold function arguments """ def __init__(self, num): super(ArgumentType, self).__init__(num=num) def generate(self, _): argtype = "argument_type{0}".format(self.num) noref = "typename std::remove_reference<{0}>::type".format( argtype) return "typename std::remove_cv<{0}>::type".format(noref) class DependentType(Type): """ A class to be sub-classed by any type that depends on another type """ def __init__(self, of): assert of is not None super(DependentType, self).__init__(of=of) def iscombined(self): return self.of.iscombined() class Assignable(DependentType): """ A type which can be assigned It is used to make the difference between * transient types (e.g. generated from expression template) * assignable types (typically type of a variable) """ def generate(self, ctx): return 'typename pythonic::assignable<{0}>::type'.format( self.of.generate(ctx)) class AssignableNoEscape(DependentType): """ Similar to Assignable, but it doesn't escape it's declaration scope """ def generate(self, ctx): return 'typename pythonic::assignable_noescape<{0}>::type'.format( self.of.generate(ctx)) class Returnable(DependentType): """ A type which can be returned It is used to make the difference between * returned types (that cannot hold a reference to avoid dangling reference) * assignable types (local to a function) """ def generate(self, ctx): return 'typename pythonic::returnable<{0}>::type'.format( self.of.generate(ctx)) class Lazy(DependentType): """ A type which can be a reference It is used to make a lazy evaluation of numpy expressions """ def generate(self, ctx): return 'typename pythonic::lazy<{}>::type'.format(ctx(self.of)) class DeclType(NamedType): """ Gather the type of a variable """ def generate(self, _): return ('typename std::remove_cv<' 'typename std::remove_reference<' 'decltype({0})>::type>::type'.format(self.srepr)) class IteratorOfType(DependentType): ''' Type of an Iterator of a container ''' def generate(self, ctx): container_type = ctx(self.of) if container_type.startswith('typename'): return container_type + '::iterator' else: return 'typename ' + container_type + '::iterator' class IteratorContentType(DependentType): ''' Type of an iterator over the content of a container ''' def generate(self, ctx): iterator_value_type = ctx(self.of) return 'typename std::remove_cv<{0}>::type'.format( 'typename std::iterator_traits<{0}>::value_type'.format( 'typename std::remove_reference<{0}>::type::iterator' .format(iterator_value_type) ) ) class GetAttr(Type): ''' Type of a named attribute ''' def __init__(self, param, attr): super(GetAttr, self).__init__(param=param, attr=attr) def generate(self, ctx): return ('decltype(pythonic::builtins::getattr({}{{}}, {}))' .format('pythonic::types::attr::' + self.attr.upper(), 'std::declval<' + ctx(self.param) + '>()')) class ReturnType(Type): ''' Return type of a call with arguments ''' def __init__(self, ftype, args): super(ReturnType, self).__init__(ftype=ftype, args=args) def generate(self, ctx): # the return type of a constructor is obvious cg = 'std::declval<{0}>()'.format(ctx(self.ftype)) args = ("std::declval<{0}>()".format(ctx(arg)) for arg in self.args) return 'decltype({0}({1}))'.format(cg, ", ".join(args)) class ElementType(Type): ''' Type of the ith element of a tuple or container ''' def __init__(self, index, of): super(ElementType, self).__init__(of=of, index=index) def iscombined(self): return self.of.iscombined() def generate(self, ctx): return 'typename std::tuple_element<{0},{1}>::type'.format( self.index, 'typename std::remove_reference<{0}>::type'.format( ctx(self.of) ) ) class ListType(DependentType): ''' Type holding a list of stuff of the same type ''' def generate(self, ctx): return 'pythonic::types::list<{}>'.format( 'typename std::remove_reference<{0}>::type'.format( ctx(self.of))) class SetType(DependentType): ''' Type holding a set of stuff of the same type ''' def generate(self, ctx): return 'pythonic::types::set<{0}>'.format(ctx(self.of)) class TupleType(Type): ''' Type holding a tuple of stuffs of various types ''' def __init__(self, ofs): super(TupleType, self).__init__(ofs=ofs) def iscombined(self): return any(of.iscombined() for of in self.ofs) def generate(self, ctx): elts = (ctx(of) for of in self.ofs) telts = ('std::declval<{0}>()'.format(elt) for elt in elts) return 'decltype(pythonic::types::make_tuple({0}))'.format( ", ".join(telts)) class DictType(Type): ''' Type holding a dict of stuff of the same key and value type ''' def __init__(self, of_key, of_val): super(DictType, self).__init__(of_key=of_key, of_val=of_val) def iscombined(self): return any((of.iscombined() for of in (self.of_key, self.of_val))) def generate(self, ctx): return 'pythonic::types::dict<{},{}>'.format(ctx(self.of_key), ctx(self.of_val)) class ContainerType(DependentType): ''' Type of any container of stuff of the same type ''' def generate(self, ctx): return ('container::type>' .format(ctx(self.of))) class IndexableType(DependentType): ''' Type of any container indexed by the same type ''' def generate(self, ctx): return 'indexable<{0}>'.format(ctx(self.of)) class IndexableContainerType(Type): ''' Type of any container of stuff of the same type, indexable by another type ''' def __init__(self, of_key, of_val): super(IndexableContainerType, self).__init__(of_key=of_key, of_val=of_val) def iscombined(self): return any((of.iscombined() for of in (self.of_key, self.of_val))) def generate(self, ctx): return ('indexable_container<' '{0}, typename std::remove_reference<{1}>::type' '>' .format(ctx(self.of_key), ctx(self.of_val))) class ExpressionType(Type): """ Result type of an operator call. """ def __init__(self, op, exprs): super(ExpressionType, self).__init__(op=op, exprs=exprs) def iscombined(self): return any(expr.iscombined() for expr in self.exprs) def generate(self, ctx): texprs = (ctx(expr) for expr in self.exprs) return 'decltype({0})'.format(self.op( *["std::declval<{0}>()".format(t) for t in texprs])) builder.UnknownType = Type() for objname, obj in locals().items(): if isclass(obj): setattr(builder, objname, obj) pythran-0.10.0+ds2/pythran/dist.py000066400000000000000000000135061416264035500167560ustar00rootroot00000000000000''' This modules contains a distutils extension mechanism for Pythran * PythranExtension: is used as distutils's Extension ''' import pythran.config as cfg from collections import defaultdict try: from collections.abc import Iterable except ImportError: from collections import Iterable import os.path import os from distutils.command.build_ext import build_ext as LegacyBuildExt from numpy.distutils.extension import Extension class PythranBuildExtMixIn(object): """Subclass of `distutils.command.build_ext.build_ext` which is required to build `PythranExtension` with the configured C++ compiler. It may also be subclassed if you want to combine with another build_ext class (NumPy, Cython implementations). """ def build_extension(self, ext): StringTypes = str, def get_value(obj, key): var = getattr(obj, key) if isinstance(var, Iterable) and not isinstance(var, StringTypes): return var[0] else: return var def set_value(obj, key, value): var = getattr(obj, key) if isinstance(var, Iterable) and not isinstance(var, StringTypes): var[0] = value else: setattr(obj, key, value) prev = { # linux-like 'preprocessor': None, 'compiler_cxx': None, 'compiler_so': None, 'compiler': None, 'linker_exe': None, 'linker_so': None, # Windows-like 'cc': None, } # Backup compiler settings for key in list(prev.keys()): if hasattr(self.compiler, key): prev[key] = get_value(self.compiler, key) else: del prev[key] # try hard to modify the compiler if getattr(ext, 'cxx', None) is not None: for comp in prev: if hasattr(self.compiler, comp): set_value(self.compiler, comp, ext.cxx) find_exe = None if getattr(ext, 'cc', None) is not None: try: import distutils._msvccompiler as msvc # install hook find_exe = msvc._find_exe def _find_exe(exe, *args, **kwargs): if exe == 'cl.exe': exe = ext.cc return find_exe(exe, *args, **kwargs) msvc._find_exe = _find_exe except ImportError: pass # In general, distutils uses -Wstrict-prototypes, but this option # is not valid for C++ code, only for C. Remove it if it's there # to avoid a spurious warning on every compilation. for flag in cfg.cfg.get('compiler', "ignoreflags").split(): for target in ('compiler_so', 'linker_so'): try: while True: getattr(self.compiler, target).remove(flag) except (AttributeError, ValueError): pass # Remove -arch i386 if 'x86_64' is specified, otherwise incorrect # code is generated, at least on OSX if hasattr(self.compiler, 'compiler_so'): archs = defaultdict(list) for i, flag in enumerate(self.compiler.compiler_so[1:]): if self.compiler.compiler_so[i] == '-arch': archs[flag].append(i + 1) if 'x86_64' in archs and 'i386' in archs: for i in archs['i386']: self.compiler.compiler_so[i] = 'x86_64' try: return super(PythranBuildExtMixIn, self).build_extension(ext) finally: # Revert compiler settings for key in prev.keys(): set_value(self.compiler, key, prev[key]) # uninstall hook if find_exe is not None: import distutils._msvccompiler as msvc msvc._find_exe = find_exe class PythranBuildExtMeta(type): def __getitem__(self, base): class PythranBuildExt(PythranBuildExtMixIn, base): pass return PythranBuildExt class PythranBuildExt(PythranBuildExtMixIn, LegacyBuildExt, metaclass=PythranBuildExtMeta): pass class PythranExtension(Extension): ''' Description of a Pythran extension Similar to distutils.core.Extension except that the sources are .py files They must be processable by pythran, of course. The compilation process ends up in a native Python module. ''' def __init__(self, name, sources, *args, **kwargs): cfg_ext = cfg.make_extension(python=True, **kwargs) self.cxx = cfg_ext.pop('cxx', None) self.cc = cfg_ext.pop('cc', None) self._sources = sources Extension.__init__(self, name, sources, *args, **cfg_ext) self.__dict__.pop("sources", None) @property def sources(self): import pythran.toolchain as tc cxx_sources = [] for source in self._sources: base, ext = os.path.splitext(source) if ext != '.py': cxx_sources.append(source) continue output_file = base + '.cpp' # target name if os.path.exists(source) and (not os.path.exists(output_file) or os.path.getmtime(output_file) < os.path.getmtime(source)): # get the last name in the path if '.' in self.name: module_name = os.path.splitext(self.name)[-1][1:] else: module_name = self.name tc.compile_pythranfile(source, output_file, module_name, cpponly=True) cxx_sources.append(output_file) return cxx_sources @sources.setter def sources(self, sources): self._sources = sources pythran-0.10.0+ds2/pythran/errors.py000066400000000000000000000002351416264035500173220ustar00rootroot00000000000000""" Module to handle errors in Pythran. """ class PythranInternalError(Exception): """ Exception raise on Incorrect internal behavior in Pythran. """ pythran-0.10.0+ds2/pythran/frontend.py000066400000000000000000000013621416264035500176270ustar00rootroot00000000000000""" This module contains pythran frontend """ from pythran.openmp import GatherOMPData from pythran.syntax import check_syntax from pythran.transformations import ExtractDocStrings, HandleImport import gast as ast import re def raw_parse(code): # hacky way to turn OpenMP comments into strings code = re.sub(r'(\s*)#\s*(omp\s[^\n]+)', r'\1"\2"', code) return ast.parse(code) def parse(pm, code): # front end ir = raw_parse(code) # Handle user-defined import pm.apply(HandleImport, ir) # parse openmp directive pm.apply(GatherOMPData, ir) # extract docstrings _, docstrings = pm.apply(ExtractDocStrings, ir) # avoid conflicts with cxx keywords check_syntax(ir) return ir, docstrings pythran-0.10.0+ds2/pythran/graph.py000066400000000000000000000106271416264035500171150ustar00rootroot00000000000000''' Minimal directed graph replacement for networkx.DiGraph This has the sole advantage of being a standalone file that doesn't bring any dependency with it. ''' class DiGraph(object): def __init__(self): # adjacency[i][j] = True means j is a successor of i self._adjacency = {} self._edges = {} def successors(self, node): return (n for n in self._adjacency[node]) def predecessors(self, node): return (k for k, v in self._adjacency.items() if node in v) def add_node(self, node): self._adjacency.setdefault(node, set()) def add_edge(self, src, dest, **props): self.add_node(dest) self._adjacency.setdefault(src, set()).add(dest) self._edges[(src, dest)] = props @property def edges(self): return self._edges def remove_edge(self, src, dest): self._adjacency[src].remove(dest) del self._edges[(src, dest)] def __len__(self): return len(self._adjacency) def __iter__(self): return iter(self._adjacency.keys()) def __contains__(self, value): return value in self._adjacency def __getitem__(self, node): return self._adjacency[node] class Unfeasible(RuntimeError): pass def has_path(graph, src, dest): visited = set() worklist = [src] while worklist: current = worklist.pop() if current in visited: continue visited.add(current) if dest in graph.successors(current): return True worklist.extend(graph.successors(current)) return False # Copied verbatim from NetworkX 2.6.1 # # NetworkX is distributed with the 3-clause BSD license. # # :: # # Copyright (C) 2004-2021, NetworkX Developers # Aric Hagberg # Dan Schult # Pieter Swart # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # * Neither the name of the NetworkX Developers nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. def _all_simple_paths_graph(G, source, targets, cutoff): visited = dict.fromkeys([source]) stack = [iter(G[source])] while stack: children = stack[-1] child = next(children, None) if child is None: stack.pop() visited.popitem() elif len(visited) < cutoff: if child in visited: continue if child in targets: yield list(visited) + [child] visited[child] = None if targets - set(visited.keys()): # expand stack until find all targets stack.append(iter(G[child])) else: visited.popitem() # maybe other ways to child else: # len(visited) == cutoff: for target in (targets & (set(children) | {child})) - set(visited.keys()): yield list(visited) + [target] stack.pop() visited.popitem() def all_simple_paths(graph, src, target): return _all_simple_paths_graph(graph, src, {target}, len(graph) - 1) pythran-0.10.0+ds2/pythran/interval.py000066400000000000000000000333701416264035500176400ustar00rootroot00000000000000""" Module with facilities to represent range values. """ from math import isinf, isnan import itertools import numpy class Interval(object): """ Representation for a range of values. """ def __init__(self, low, high): """ Set initial bound of the range object. """ if isnan(low): low = -float('inf') if isnan(high): high = +float('inf') self._low = low self._high = high @property def low(self): return self._low @property def high(self): return self._high def __repr__(self): """ Return a nicely formatted representation string. """ return "Interval(low={low}, high={high})".format(low=self.low, high=self.high) def bounds(self): return self.low, self.high def __contains__(self, value): return self.low <= value <= self.high def union(self, other): """ Intersect current range with other.""" return Interval(min(self.low, other.low), max(self.high, other.high)) def intersect(self, other): return Interval(max(self.low, other.low), min(self.high, other.high)) def copy(self): return Interval(self.low, self.high) def widen(self, other): """ Widen current range. """ if self.low < other.low: low = -float("inf") else: low = self.low if self.high > other.high: high = float("inf") else: high = self.high return Interval(low, high) def __mul__(self, other): """ Combiner for Multiplication operation. >>> Interval(1, 5) * Interval(-5, -4) Interval(low=-25, high=-4) >>> Interval(-1, 5) * Interval(-5, 3) Interval(low=-25, high=15) >>> Interval(1, 5) * Interval(3, 8) Interval(low=3, high=40) """ def all_bounds(): return itertools.chain(self.bounds(), other.bounds()) if any(map(isinf, all_bounds())) and any(x == 0 for x in all_bounds()): return UNKNOWN_RANGE res = [v1 * v2 for v1, v2 in itertools.product(self.bounds(), other.bounds())] return Interval(min(res), max(res)) __mult__ = __mul__ def __div__(self, other): """ Combiner for Divide operation. >>> Interval(-1, 5) / Interval(3, 8) Interval(low=-1, high=1) >>> Interval(-1, 5) / Interval(-5, -4) Interval(low=-2, high=0) >>> Interval(-1, 5) / Interval(-5, 3) Interval(low=-inf, high=inf) """ if other.low <= 0 and other.high >= 0: return UNKNOWN_RANGE if other.low == 0: return UNKNOWN_RANGE def all_bounds(): return itertools.chain(self.bounds(), other.bounds()) if any(isinf(x) for x in all_bounds()): return UNKNOWN_RANGE res = [v1 // v2 for v1, v2 in itertools.product(self.bounds(), other.bounds())] return Interval(min(res), max(res)) __truediv__ = __div__ def __add__(self, other): """ Combiner for Addition operation. >>> Interval(-12, 5) + Interval(-5, -3) Interval(low=-17, high=2) """ if isinstance(other, IntervalTuple): return UNKNOWN_RANGE sl, sh, ol, oh = self.low, self.high, other.low, other.high if isinf(sl) and isinf(ol) and sl * ol < 0: return UNKNOWN_RANGE if isinf(sh) and isinf(oh) and sh * oh < 0: return UNKNOWN_RANGE return Interval(sl + ol, sh + oh) def __sub__(self, other): """ Combiner for Subtraction operation. >>> Interval(1, 5) - Interval(-5, -4) Interval(low=5, high=10) """ sl, sh, ol, oh = self.low, self.high, other.low, other.high if isinf(sl) and isinf(oh): return UNKNOWN_RANGE if isinf(sh) and isinf(ol): return UNKNOWN_RANGE return Interval(sl - oh, sh - ol) def __rshift__(range1, range2): """ Combiner for Right shift operation. >>> Interval(10, 100) >> Interval(3, 8) Interval(low=0, high=12) >>> Interval(10, float("inf")) >> Interval(3, 8) Interval(low=0, high=inf) >>> Interval(-float("inf"), 0) >> Interval(3, 8) Interval(low=-inf, high=0) >>> Interval(-30, 10) >> Interval(3, float('inf')) Interval(low=-4, high=1) """ if range1.low <= 0: if isinf(range1.low): min_ = range1.low else: min_ = range1.low >> range2.low elif isinf(range2.high): min_ = 0 else: min_ = range1.low >> range2.high if isinf(range1.high): max_ = range1.high elif isinf(range2.low): max_ = 0 else: max_ = range1.high >> range2.low return Interval(min_, max_) def __mod__(range1, range2): """ Combiner for Modulo operation. >>> Interval(-1, 5) % Interval(1, 13) Interval(low=0, high=5) >>> Interval(-21, 5) % Interval(1, 13) Interval(low=0, high=13) """ return Interval(0, min(range2.high, max(abs(range1.high), abs(range1.low)))) def __pow__(range1, range2): """ Combiner for Power operation. >>> Interval(1, 5) ** Interval(-5, -4) Interval(low=1.0, high=1.0) >>> Interval(-1, 5) ** Interval(-5, 3) Interval(low=-1.0, high=125.0) >>> Interval(1, 5) ** Interval(3, 8) Interval(low=1.0, high=390625.0) """ res = [v1 ** v2 for v1, v2 in itertools.product(range1.bounds(), range2.bounds())] return Interval(numpy.ceil(min(res)), numpy.floor(max(res))) def __lshift__(range1, range2): """ Combiner for Left shift operation. >>> Interval(1, 5) << Interval(3, 8) Interval(low=8, high=1280) >>> Interval(1, float("inf")) << Interval(3, 8) Interval(low=8, high=inf) >>> Interval(-float("inf"), 0) << Interval(3, 8) Interval(low=-inf, high=0) >>> Interval(-3, 1) << Interval(3, float('inf')) Interval(low=-24, high=inf) """ min_inf = isinf(range1.low) or isinf(range2.low) max_inf = isinf(range1.high) or isinf(range2.high) min_ = -float("inf") if min_inf else (range1.low << range2.low) max_ = float("inf") if max_inf else (range1.high << range2.high) return Interval(min_, max_) def __floordiv__(range1, range2): """ Combiner for Floor divide operation. >>> Interval(-1, 5) // Interval(3, 8) Interval(low=-1, high=1) >>> Interval(-1, 5) // Interval(-5, -4) Interval(low=-2, high=0) >>> Interval(-1, 5) // Interval(-5, 3) Interval(low=-inf, high=inf) """ if range2.low <= 0 and range2.high >= 0: return UNKNOWN_RANGE if range2.low == 0: return UNKNOWN_RANGE res = [v1 if isinf(v1) else (v1 // v2) for v1, v2 in itertools.product(range1.bounds(), range2.bounds())] return Interval(min(res), max(res)) def __lt__(self, other): """ Combiner for lower than operation. >>> Interval(-1, 5) < Interval(6, 7) Interval(low=1, high=1) >>> Interval(-1, 5) < Interval(5, 7) Interval(low=0, high=1) >>> Interval(-1, 5) < Interval(-16, -7) Interval(low=0, high=0) >>> Interval(1, 5) < Interval(3, 7) Interval(low=0, high=1) """ if self.high < other.low: return Interval(1, 1) if self.low >= other.high: return Interval(0, 0) return Interval(0, 1) def __le__(self, other): """ Combiner for lower than or equal operation. >>> Interval(-1, 5) <= Interval(6, 7) Interval(low=1, high=1) >>> Interval(-1, 5) <= Interval(5, 7) Interval(low=1, high=1) >>> Interval(-1, 5) <= Interval(-16, -7) Interval(low=0, high=0) >>> Interval(1, 5) <= Interval(3, 7) Interval(low=0, high=1) """ if self.high <= other.low: return Interval(1, 1) if self.low > other.high: return Interval(0, 0) return Interval(0, 1) def __gt__(self, other): """ Combiner for greater than operation. >>> Interval(-5, 1) > Interval(-7, -6) Interval(low=1, high=1) >>> Interval(-5, 1) > Interval(-7, -5) Interval(low=0, high=1) >>> Interval(-1, 5) > Interval(6, 7) Interval(low=0, high=0) >>> Interval(1, 5) > Interval(3, 7) Interval(low=0, high=1) """ if self.low > other.high: return Interval(1, 1) if self.high <= other.low: return Interval(0, 0) return Interval(0, 1) def __ge__(self, other): """ Combiner for greater than or equal operation. >>> Interval(-5, 1) >= Interval(-7, -6) Interval(low=1, high=1) >>> Interval(-5, 1) >= Interval(-7, -5) Interval(low=1, high=1) >>> Interval(-1, 5) >= Interval(6, 7) Interval(low=0, high=0) >>> Interval(1, 5) >= Interval(3, 7) Interval(low=0, high=1) """ if self.low >= other.high: return Interval(1, 1) if self.high < other.low: return Interval(0, 0) return Interval(0, 1) def __eq__(self, other): """ Combiner for equal operation. >>> Interval(-5, 1) == Interval(-7, -6) Interval(low=0, high=0) >>> Interval(-5, 1) == Interval(-7, -5) Interval(low=0, high=1) >>> Interval(-1, 5) == Interval(6, 7) Interval(low=0, high=0) """ if isinf(self.low): return Interval(0, 1) elif self.low == self.high == other.low == other.high: return Interval(1, 1) elif (self < other) or (self > other): return Interval(0, 0) else: return Interval(0, 1) def __ne__(self, other): """ Combiner for not equal operation. >>> Interval(-5, 1) != Interval(-7, -6) Interval(low=1, high=1) >>> Interval(-5, 1) != Interval(-7, -5) Interval(low=0, high=1) >>> Interval(-1, 5) != Interval(6, 7) Interval(low=1, high=1) """ if isinf(self.low): return Interval(0, 1) elif self.low == self.high == other.low == other.high: return Interval(1, 1) elif (self < other) or (self > other): return Interval(1, 1) else: return Interval(0, 1) def __nonzero__(self): return not isinf(self.high) and self.low == self.high and self .low > 0 def __getitem__(self, index): return UNKNOWN_RANGE __bool__ = __nonzero__ class IntervalTuple(object): def __init__(self, values): self.values = tuple(values) def union(self, other): if isinstance(other, Interval): return UNKNOWN_TUPLE_RANGE return IntervalTuple(x.union(y) for x, y in zip(self.values, other.values)) def intersect(self, other): if isinstance(other, Interval): return UNKNOWN_TUPLE_RANGE return IntervalTuple(x.intersect(y) for x, y in zip(self.values, other.values)) @property def high(self): return UNKNOWN_RANGE.high @property def low(self): return UNKNOWN_RANGE.low def __getitem__(self, index): out = None low = max(0, index.low) high = min(len(self.values) - 1, index.high) for i in range(low, high + 1): if out is None: out = self.values[i] else: out = out.union(self.values[i]) return out or UNKNOWN_RANGE def widen(self, other): if isinstance(other, Interval): return UNKNOWN_TUPLE_RANGE return IntervalTuple(s.widen(o) for s, o in zip(self.values, other.values)) def __add__(self, other): if isinstance(other, Interval): return UNKNOWN_TUPLE_RANGE return IntervalTuple(self.values + other.values) UNKNOWN_RANGE = Interval(-float("inf"), float("inf")) UNKNOWN_TUPLE_RANGE = IntervalTuple([UNKNOWN_RANGE]) def range_values(args): """ Function used to compute returned range value of [x]range function. """ if len(args) == 1: return Interval(0, args[0].high) elif len(args) == 2: return Interval(args[0].low, args[1].high) elif len(args) == 3: is_neg = args[2].low < 0 is_pos = args[2].high > 0 if is_neg and is_pos: return UNKNOWN_RANGE elif is_neg: return Interval(args[1].low, args[0].high - 1) else: return Interval(args[0].low, args[1].high - 1) def bool_values(_): """ Return the range of a boolean value, i.e. [0, 1]. """ return Interval(0, 1) def cmp_values(_): """ Return the range of a comparison value, i.e. [-1, 1]. """ return Interval(-1, 1) def positive_values(_): """ Return a positive range without upper bound. """ return Interval(0, float("inf")) def max_values(args): """ Return possible range for max function. """ return Interval(max(x.low for x in args), max(x.high for x in args)) def min_values(args): """ Return possible range for min function. """ return Interval(min(x.low for x in args), min(x.high for x in args)) def ord_values(_): """ Return possible range for ord function. """ return Interval(0, 255) pythran-0.10.0+ds2/pythran/intrinsic.py000066400000000000000000000175301416264035500200160ustar00rootroot00000000000000""" This module contains all classes used to model intrinsics behavior. """ from pythran.conversion import to_ast from pythran.interval import UNKNOWN_RANGE, bool_values from pythran.types.signature import extract_combiner from pythran.typing import Any, Union, Fun, Generator import gast as ast class UnboundValueType(object): ''' Represents a new location, bound to no identifier ''' UnboundValue = UnboundValueType() # FIXME: we should find a better way to implement default behavior DefaultArgNum = 20 class UpdateEffect(object): pass class ReadEffect(object): pass class ReadOnceEffect(ReadEffect): pass class Intrinsic(object): """ Model any Method/Function. Its member variables are: - argument_effects that describes the effect of the function on its argument (either UpdateEffect, ReadEffect or ReadOnceEffect) - global_effects that describes whether the function has side effects - return_alias that describes the aliasing between the return value and the parameters. The lambda returns an ast expression, generally depending on the node arguments (see dict.setdefault) - args that describes the name and default value of each arg, using the same representation as ast.FunctionDef, i.e. ast.arguments """ def __init__(self, **kwargs): self.argument_effects = kwargs.get('argument_effects', (UpdateEffect(),) * DefaultArgNum) self.global_effects = kwargs.get('global_effects', False) self.return_alias = kwargs.get('return_alias', lambda x: {UnboundValue}) self.args = ast.arguments( [ast.Name(n, ast.Param(), None, None) for n in kwargs.get('args', [])], [], None, [ast.Name(n, ast.Param(), None, None) for n in kwargs.get('kwonlyargs', [])], [], None, [to_ast(d) for d in kwargs.get('defaults', [])]) self.return_range = kwargs.get("return_range", lambda call: UNKNOWN_RANGE) self.return_range_content = kwargs.get("return_range_content", lambda c: UNKNOWN_RANGE) def isliteral(self): return False def isfunction(self): return False def isstaticfunction(self): return False def ismethod(self): return False def isattribute(self): return False def isconst(self): return not any( isinstance(x, UpdateEffect) for x in self.argument_effects ) and not self.global_effects def isreadonce(self, n): return isinstance(self.argument_effects[n], ReadOnceEffect) def combiner(self, s, node): pass class FunctionIntr(Intrinsic): def __init__(self, **kwargs): kwargs.setdefault('combiners', ()) super(FunctionIntr, self).__init__(**kwargs) self.combiners = kwargs['combiners'] if 'signature' in kwargs: self.signature = kwargs['signature'] deduced_combiner = extract_combiner(self.signature) if deduced_combiner is not None: self.combiners += deduced_combiner, if 'return_range' not in kwargs: if isinstance(self.signature, Union): if all(r.__args__[-1] is bool for r in self.signature.__args__): self.return_range = bool_values elif isinstance(self.signature, Generator): if self.signature.__args__[0] is bool: self.return_range = bool_values elif isinstance(self.signature, Fun): if self.signature.__args__[-1] is bool: self.return_range = bool_values else: self.signature = Any if 'immediate_arguments' in kwargs: self.immediate_arguments = kwargs['immediate_arguments'] else: self.immediate_arguments = [] def isfunction(self): return True def isstaticfunction(self): return True def add_combiner(self, _combiner): self.combiners += (_combiner,) def combiner(self, s, node): for comb in self.combiners: comb(s, node) class UserFunction(FunctionIntr): def __init__(self, *combiners, **kwargs): kwargs['combiners'] = combiners super(UserFunction, self).__init__(**kwargs) class ConstFunctionIntr(FunctionIntr): def __init__(self, **kwargs): kwargs.setdefault('argument_effects', (ReadEffect(),) * DefaultArgNum) super(ConstFunctionIntr, self).__init__(**kwargs) class ConstExceptionIntr(ConstFunctionIntr): def __init__(self, **kwargs): kwargs.setdefault('argument_effects', (ReadEffect(),) * DefaultArgNum) super(ConstExceptionIntr, self).__init__(**kwargs) class ReadOnceFunctionIntr(ConstFunctionIntr): def __init__(self, **kwargs): super(ReadOnceFunctionIntr, self).__init__( argument_effects=(ReadOnceEffect(),) * DefaultArgNum, **kwargs) class MethodIntr(FunctionIntr): def __init__(self, *combiners, **kwargs): kwargs.setdefault('argument_effects', (UpdateEffect(),) + (ReadEffect(),) * DefaultArgNum) kwargs['combiners'] = combiners super(MethodIntr, self).__init__(**kwargs) def ismethod(self): return True def isstaticfunction(self): return False class ConstMethodIntr(MethodIntr): def __init__(self, *combiners, **kwargs): kwargs.setdefault('argument_effects', (ReadEffect(),) * DefaultArgNum) super(ConstMethodIntr, self).__init__(*combiners, **kwargs) class ReadOnceMethodIntr(ConstMethodIntr): def __init__(self, **kwargs): super(ReadOnceMethodIntr, self).__init__( argument_effects=(ReadOnceEffect(),) * DefaultArgNum, **kwargs) class AttributeIntr(Intrinsic): """ Internal representation for any attributes. Examples -------- >> a.real """ def __init__(self, **kwargs): """ Forward arguments. """ super(AttributeIntr, self).__init__(**kwargs) if 'signature' in kwargs: self.signature = kwargs['signature'] else: self.signature = Any def isattribute(self): """ Mark this intrinsic as an attribute. """ return True class ConstantIntr(Intrinsic): """ Internal representation for any constant. Examples -------- >> math.pi """ def __init__(self, **kwargs): """ Forward arguments and remove arguments effects. """ kwargs["argument_effects"] = () super(ConstantIntr, self).__init__(**kwargs) def isliteral(self): """ Mark this intrinsic as a literal. """ return True class Class(Intrinsic): def __init__(self, d, *args, **kwargs): super(Class, self).__init__(*args, **kwargs) self.fields = d def __getitem__(self, key): return self.fields[key] def __iter__(self): return self.fields.__iter__() def __contains__(self, key): """ Forward key content to aliased module. """ return key in self.fields class ClassWithReadOnceConstructor(Class, ReadOnceFunctionIntr): def __init__(self, d, *args, **kwargs): super(ClassWithReadOnceConstructor, self).__init__(d, *args, **kwargs) class ClassWithConstConstructor(Class, ConstFunctionIntr): def __init__(self, d, *args, **kwargs): super(ClassWithConstConstructor, self).__init__(d, *args, **kwargs) class ExceptionClass(Class, ConstExceptionIntr): def __init__(self, d, *args, **kwargs): super(ExceptionClass, self).__init__(d, *args, **kwargs) class UFunc(Class, ConstFunctionIntr): """ Representation of ufunc from numpy. """ pythran-0.10.0+ds2/pythran/log.py000066400000000000000000000014751416264035500165760ustar00rootroot00000000000000import logging logger = logging.getLogger("pythran") stream = logging.StreamHandler() # Initialize logging try: # Set a nice colored output from colorlog import ColoredFormatter formatter = ColoredFormatter( "%(log_color)s%(levelname)-8s%(reset)s %(blue)s%(message)s", log_colors={ 'DEBUG': 'cyan', 'INFO': 'green', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red', } ) except ImportError: # No color available, use default config formatter = logging.Formatter("%(levelname)s: %(message)s") color_disabled = True else: color_disabled = False stream.setFormatter(formatter) logger.addHandler(stream) if color_disabled: logger.info("Disabling color, you really want to install colorlog.") pythran-0.10.0+ds2/pythran/magic.py000066400000000000000000000053401416264035500170700ustar00rootroot00000000000000""" Pythran integration into IPython. * provides the %%pythran magic function to ipython """ # ----------------------------------------------------------------------------- # Copyright (C) 2010-2011, IPython Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ----------------------------------------------------------------------------- import hashlib import importlib from IPython.core.magic import Magics, magics_class, cell_magic from IPython.core import magic_arguments import pythran @magics_class class PythranMagics(Magics): """ Class to make it possible to use pythran as a magic IPython command.""" def __init__(self, shell): """ Init the pythran magic stuff. """ super(PythranMagics, self).__init__(shell) self._reloads = {} def _import_all(self, module): """ Import only globals modules. """ self.shell.push({k: v for k, v in module.__dict__.items() if not k.startswith('__')}) @magic_arguments.magic_arguments() @magic_arguments.argument('-D', action='append', default=[]) @magic_arguments.argument('-O', action='append', default=[]) @magic_arguments.argument('-m', action='append', default=[]) @magic_arguments.argument('-W', action='append', default=[]) @magic_arguments.argument('-f', action='append', default=[]) @cell_magic def pythran(self, line, cell): """ Compile and import everything from a Pythran code cell. %%pythran #pythran export foo(int) def foo(x): return x + x """ args = magic_arguments.parse_argstring(self.pythran, line) kwargs = {} if args.D: kwargs['define_macros'] = args.D for v in "OmWf": args_v = getattr(args, v) for target in ('extra_compile_args', 'extra_link_args'): kwargs.setdefault(target, []).extend('-{}{}'.format(v, x) for x in args_v) m = hashlib.md5() m.update(line.encode('utf-8')) m.update(cell.encode('utf-8')) module_name = "pythranized_" + m.hexdigest() module_path = pythran.compile_pythrancode(module_name, cell, **kwargs) loader = importlib.machinery.ExtensionFileLoader(module_name, module_path) spec = importlib.machinery.ModuleSpec(name=module_name, loader=loader, origin=module_path) module = importlib._bootstrap._load(spec) self._import_all(module) def load_ipython_extension(ipython): """Load the extension in IPython.""" ipython.register_magics(PythranMagics) pythran-0.10.0+ds2/pythran/metadata.py000066400000000000000000000037051416264035500175730ustar00rootroot00000000000000""" This module provides a way to pass information between passes as metadata. * add attaches a metadata to a node * get retrieves all metadata from a particular class attached to a node """ from gast import AST # so that metadata are walkable as regular ast nodes class Metadata(AST): """ Base class to add information on a node to improve code generation. """ def __init__(self): """ Initialize content of these metadata. """ self.data = list() self._fields = ('data',) super(Metadata, self).__init__() def __iter__(self): """ Enable iteration over every metadata informations. """ return iter(self.data) def append(self, data): """ Add a metadata information. """ self.data.append(data) class Lazy(AST): """ Metadata to mark variable which doesn't need to be evaluated now. """ class Comprehension(AST): def __init__(self, *args): # no positional argument to be deep copyable super(Comprehension, self).__init__() if args: self.target = args[0] class StaticReturn(AST): """ Metadata to mark return with a constant value. """ class Local(AST): """ Metadata to mark function as non exported. """ def add(node, data): if not hasattr(node, 'metadata'): node.metadata = Metadata() node._fields += ('metadata',) node.metadata.append(data) def get(node, class_): if hasattr(node, 'metadata'): return [s for s in node.metadata if isinstance(s, class_)] else: return [] def clear(node, class_): if hasattr(node, 'metadata'): node.metadata.data = [s for s in node.metadata if not isinstance(s, class_)] if not node.metadata.data: del node.metadata assert node._fields[-1] == 'metadata' node._fields = node._fields[:-1] def visit(self, node): if hasattr(node, 'metadata'): self.visit(node.metadata) pythran-0.10.0+ds2/pythran/middlend.py000066400000000000000000000052601416264035500175710ustar00rootroot00000000000000"""This module turns a python AST into an optimized, pythran compatible ast.""" from pythran.analyses import ExtendedSyntaxCheck from pythran.optimizations import (ComprehensionPatterns, ListCompToGenexp, RemoveDeadFunctions) from pythran.transformations import (ExpandBuiltins, ExpandImports, ExpandImportAll, FalsePolymorphism, NormalizeCompare, NormalizeException, NormalizeMethodCalls, NormalizeReturn, NormalizeTuples, RemoveComprehension, RemoveNestedFunctions, RemoveLambdas, UnshadowParameters, RemoveNamedArguments, ExpandGlobals, NormalizeIsNone, NormalizeIfElse, NormalizeStaticIf, SplitStaticExpression, RemoveFStrings) def refine(pm, node, optimizations): """ Refine node in place until it matches pythran's expectations. """ # Sanitize input pm.apply(RemoveDeadFunctions, node) pm.apply(ExpandGlobals, node) pm.apply(ExpandImportAll, node) pm.apply(NormalizeTuples, node) pm.apply(RemoveFStrings, node) pm.apply(ExpandBuiltins, node) pm.apply(ExpandImports, node) pm.apply(NormalizeMethodCalls, node) pm.apply(NormalizeIfElse, node) pm.apply(NormalizeIsNone, node) pm.apply(SplitStaticExpression, node) pm.apply(NormalizeStaticIf, node) pm.apply(NormalizeTuples, node) pm.apply(NormalizeException, node) pm.apply(NormalizeMethodCalls, node) # Some early optimizations pm.apply(ComprehensionPatterns, node) pm.apply(RemoveLambdas, node) pm.apply(RemoveNestedFunctions, node) pm.apply(NormalizeCompare, node) pm.gather(ExtendedSyntaxCheck, node) pm.apply(ListCompToGenexp, node) pm.apply(RemoveComprehension, node) pm.apply(RemoveNamedArguments, node) # sanitize input pm.apply(NormalizeReturn, node) pm.apply(UnshadowParameters, node) pm.apply(FalsePolymorphism, node) # some extra optimizations apply_optimisation = True while apply_optimisation: apply_optimisation = False for optimization in optimizations: apply_optimisation |= pm.apply(optimization, node)[0] def mark_unexported_functions(ir, exported_functions): from pythran.metadata import add as MDadd, Local as MDLocal for node in ir.body: if hasattr(node, 'name'): if node.name not in exported_functions: MDadd(node, MDLocal()) return ir pythran-0.10.0+ds2/pythran/openmp.py000066400000000000000000000160171416264035500173110ustar00rootroot00000000000000''' This modules contains OpenMP-related stuff. * OMPDirective is used to represent OpenMP annotations in the AST * GatherOMPData turns OpenMP-like string annotations into metadata ''' from pythran.passmanager import Transformation import pythran.metadata as metadata from pythran.types.conversion import PYTYPE_TO_CTYPE_TABLE from pythran.utils import isstr from gast import AST import gast as ast import re typenames = {t.__name__: t for t in PYTYPE_TO_CTYPE_TABLE} keywords = { 'atomic', 'barrier', 'capture', 'cancel', 'collapse', 'copyin', 'copyprivate', 'critical', 'declare', 'default', 'final', 'firstprivate', 'flush', 'for', 'if', 'initializer', 'lastprivate', 'master', 'mergeable', 'none', 'nowait', 'num_threads', 'omp', 'ordered', 'parallel', 'private', 'read', 'reduction', 'schedule', 'section', 'sections', 'shared', 'simd', 'single', 'task', 'taskwait', 'taskyield', 'threadprivate', 'untied', 'update', 'write' } declare_keywords = { 'omp_in', 'omp_init', 'omp_orig', 'omp_out', 'omp_priv', } reserved_contex = { 'critical', 'declare', 'default', 'schedule', 'reduction', } class OMPDirective(AST): '''Turn a string into a context-dependent metadata. >>> o = OMPDirective("omp for private(a,b) shared(c)") >>> o.s 'omp for private({},{}) shared({})' >>> [ type(dep) for dep in o.deps ] [, , \ ] >>> [ dep.id for dep in o.deps ] ['a', 'b', 'c'] ''' def __init__(self, *args): # no positional argument to be deep copyable super(OMPDirective, self).__init__() if not args: return self.deps = [] self.private_deps = [] self.shared_deps = [] def tokenize(s): '''A simple contextual "parser" for an OpenMP string''' # not completely satisfying if there are strings in if expressions out = '' par_count = 0 curr_index = 0 in_reserved_context = False in_declare = False in_shared = in_private = False while curr_index < len(s): m = re.match(r'^([a-zA-Z_]\w*)', s[curr_index:]) if m: word = m.group(0) curr_index += len(word) if word in typenames: out += PYTYPE_TO_CTYPE_TABLE[typenames[word]] elif(in_reserved_context or (in_declare and word in declare_keywords) or (par_count == 0 and word in keywords)): out += word in_reserved_context = word in reserved_contex in_declare |= word == 'declare' in_private |= word == 'private' in_shared |= word == 'shared' else: v = '{}' self.deps.append(ast.Name(word, ast.Load(), None, None)) if in_private: self.private_deps.append(self.deps[-1]) if in_shared: self.shared_deps.append(self.deps[-1]) out += v elif s[curr_index] == '(': par_count += 1 curr_index += 1 out += '(' elif s[curr_index] == ')': par_count -= 1 curr_index += 1 out += ')' if par_count == 0: in_reserved_context = False in_shared = in_private = False else: if s[curr_index] in ',:': in_reserved_context = False out += s[curr_index] curr_index += 1 return out self.s = tokenize(args[0]) self._fields = ('deps', 'shared_deps', 'private_deps') ## class GatherOMPData(Transformation): '''Walks node and collect string comments looking for OpenMP directives.''' # there is a special handling for If and Expr, so not listed here statements = ("FunctionDef", "Return", "Delete", "Assign", "AugAssign", "Print", "For", "While", "Raise", "TryExcept", "TryFinally", "Assert", "Import", "ImportFrom", "Pass", "Break",) # these fields hold statement lists statement_lists = ("body", "orelse", "finalbody",) def __init__(self): Transformation.__init__(self) # Remap self.visit_XXXX() to self.attach_data() generic method for s in GatherOMPData.statements: setattr(self, "visit_" + s, self.attach_data) self.current = list() def isompdirective(self, node): return isstr(node) and node.value.startswith("omp ") def visit_Expr(self, node): if self.isompdirective(node.value): self.current.append(node.value.value) return None else: self.attach_data(node) return node def visit_If(self, node): if self.isompdirective(node.test): self.visit(ast.Expr(node.test)) return self.visit(ast.If(ast.Constant(1, None), node.body, node.orelse)) else: return self.attach_data(node) def attach_data(self, node): '''Generic method called for visit_XXXX() with XXXX in GatherOMPData.statements list ''' if self.current: for curr in self.current: md = OMPDirective(curr) metadata.add(node, md) self.current = list() # add a Pass to hold some directives for field_name, field in ast.iter_fields(node): if field_name in GatherOMPData.statement_lists: if(field and isinstance(field[-1], ast.Expr) and self.isompdirective(field[-1].value)): field.append(ast.Pass()) self.generic_visit(node) # add an If to hold scoping OpenMP directives directives = metadata.get(node, OMPDirective) field_names = {n for n, _ in ast.iter_fields(node)} has_no_scope = field_names.isdisjoint(GatherOMPData.statement_lists) if directives and has_no_scope: # some directives create a scope, but the holding stmt may not # artificially create one here if needed sdirective = ''.join(d.s for d in directives) scoping = ('parallel', 'task', 'section') if any(s in sdirective for s in scoping): metadata.clear(node, OMPDirective) node = ast.If(ast.Constant(1, None), [node], []) for directive in directives: metadata.add(node, directive) return node pythran-0.10.0+ds2/pythran/optimizations/000077500000000000000000000000001416264035500203455ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/optimizations/__init__.py000066400000000000000000000020611416264035500224550ustar00rootroot00000000000000""" The optimisations submodule contains all the optimisations offered in Pythran. This file is just for convenience and turns the import from import optimisations.xxxxx.xxxxx into import optimisations.xxxxx """ from .constant_folding import ConstantFolding, PartialConstantFolding from .dead_code_elimination import DeadCodeElimination from .forward_substitution import ForwardSubstitution from .iter_transformation import IterTransformation from .comprehension_patterns import ComprehensionPatterns from .list_comp_to_genexp import ListCompToGenexp from .loop_full_unrolling import LoopFullUnrolling from .modindex import ModIndex from .pattern_transform import PatternTransform from .range_loop_unfolding import RangeLoopUnfolding from .range_based_simplify import RangeBasedSimplify from .square import Square from .inlining import Inlining from .inline_builtins import InlineBuiltins from .list_to_tuple import ListToTuple from .tuple_to_shape import TupleToShape from .remove_dead_functions import RemoveDeadFunctions from .simplify_except import SimplifyExcept pythran-0.10.0+ds2/pythran/optimizations/comprehension_patterns.py000066400000000000000000000130601416264035500255100ustar00rootroot00000000000000 """ Comprehension patterns transforms list comprehension into intrinsics. """ from pythran.analyses import OptimizableComprehension from pythran.passmanager import Transformation from pythran.transformations.normalize_tuples import ConvertToTuple from pythran.conversion import mangle from pythran.utils import attr_to_path, path_to_attr import gast as ast class ComprehensionPatterns(Transformation): ''' Transforms list comprehension into intrinsics. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(y) : return (x for x in y)") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(ComprehensionPatterns, node) >>> 'map' in pm.dump(backend.Python, node) True >>> node = ast.parse("def foo(y) : return [0 for _ in builtins.range(y)]") >>> _, node = pm.apply(ComprehensionPatterns, node) >>> print(pm.dump(backend.Python, node)) def foo(y): return ([0] * builtins.len(builtins.range(y))) ''' def __init__(self): Transformation.__init__(self, OptimizableComprehension) def visit_Module(self, node): self.use_itertools = False self.generic_visit(node) if self.use_itertools: import_alias = ast.alias(name='itertools', asname=mangle('itertools')) importIt = ast.Import(names=[import_alias]) node.body.insert(0, importIt) return node def make_Iterator(self, gen): if gen.ifs: ldFilter = ast.Lambda( ast.arguments([ast.Name(gen.target.id, ast.Param(), None, None)], [], None, [], [], None, []), ast.BoolOp(ast.And(), gen.ifs) if len(gen.ifs) > 1 else gen.ifs[0]) ifilterName = ast.Attribute( value=ast.Name(id='builtins', ctx=ast.Load(), annotation=None, type_comment=None), attr='filter', ctx=ast.Load()) return ast.Call(ifilterName, [ldFilter, gen.iter], []) else: return gen.iter def visitComp(self, node, make_attr): if node in self.optimizable_comprehension: self.update = True self.generic_visit(node) iters = [self.make_Iterator(gen) for gen in node.generators] variables = [ast.Name(gen.target.id, ast.Param(), None, None) for gen in node.generators] # If dim = 1, product is useless if len(iters) == 1: iterAST = iters[0] varAST = ast.arguments([variables[0]], [], None, [], [], None, []) else: self.use_itertools = True prodName = ast.Attribute( value=ast.Name(id=mangle('itertools'), ctx=ast.Load(), annotation=None, type_comment=None), attr='product', ctx=ast.Load()) varid = variables[0].id # retarget this id, it's free renamings = {v.id: (i,) for i, v in enumerate(variables)} node.elt = ConvertToTuple(varid, renamings).visit(node.elt) iterAST = ast.Call(prodName, iters, []) varAST = ast.arguments([ast.Name(varid, ast.Param(), None, None)], [], None, [], [], None, []) ldBodymap = node.elt ldmap = ast.Lambda(varAST, ldBodymap) return make_attr(ldmap, iterAST) else: return self.generic_visit(node) def visit_ListComp(self, node): def makeattr(*args): r = ast.Attribute( value=ast.Name(id='builtins', ctx=ast.Load(), annotation=None, type_comment=None), attr='map', ctx=ast.Load()) r = ast.Call(r, list(args), []) r = ast.Call(ast.Attribute(ast.Name('builtins', ast.Load(), None, None), 'list', ast.Load()), [r], []) return r if isinstance(node.elt, ast.Constant) and len(node.generators) == 1: gen = node.generators[0] if not gen.ifs and isinstance(gen.iter, ast.Call): try: path = attr_to_path(gen.iter.func)[1] range_path = 'pythonic', 'builtins', 'functor', 'range' if path == range_path and len(gen.iter.args) == 1: self.update = True return ast.BinOp( ast.List([node.elt], ast.Load()), ast.Mult(), ast.Call(path_to_attr(('builtins', 'len')), [gen.iter], [])) except TypeError: pass return self.visitComp(node, makeattr) def visit_GeneratorExp(self, node): def makeattr(*args): return ast.Call(ast.Attribute( value=ast.Name(id='builtins', ctx=ast.Load(), annotation=None, type_comment=None), attr='map', ctx=ast.Load()), list(args), []) return self.visitComp(node, makeattr) pythran-0.10.0+ds2/pythran/optimizations/constant_folding.py000066400000000000000000000211721416264035500242550ustar00rootroot00000000000000""" ConstantFolding performs some kind of partial evaluation. """ from pythran.analyses import ConstantExpressions, ASTMatcher from pythran.passmanager import Transformation from pythran.tables import MODULES from pythran.conversion import to_ast, ConversionError, ToNotEval, mangle from pythran.analyses.ast_matcher import DamnTooLongPattern from pythran.syntax import PythranSyntaxError from pythran.utils import isintegral, isnum import gast as ast from copy import deepcopy import logging logger = logging.getLogger('pythran') class ConstantFolding(Transformation): """ Replace constant expression by their evaluation. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(): return 1+3") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(ConstantFolding, node) >>> print(pm.dump(backend.Python, node)) def foo(): return 4 """ def __init__(self): Transformation.__init__(self, ConstantExpressions) def prepare(self, node): assert isinstance(node, ast.Module) self.env = { 'builtins': __import__('builtins'), } for module_name in MODULES: # __dispatch__ is the only fake top-level module if module_name != '__dispatch__': alias_module_name = mangle(module_name) try: self.env[alias_module_name] = __import__(module_name) except ImportError: pass # we need to parse the whole code to be able to apply user-defined pure # function but import are resolved before so we remove them to avoid # ImportError (for operator_ for example) dummy_module = ast.Module([s for s in node.body if not isinstance(s, ast.Import)], []) eval(compile(ast.gast_to_ast(dummy_module), '', 'exec'), self.env) super(ConstantFolding, self).prepare(node) def skip(self, node): return node visit_Constant = visit_Name = skip visit_List = visit_Set = Transformation.generic_visit visit_Dict = visit_Tuple = Transformation.generic_visit def generic_visit(self, node): if isinstance(node, ast.expr) and node in self.constant_expressions: fake_node = ast.Expression(node) code = compile(ast.gast_to_ast(fake_node), '', 'eval') try: value = eval(code, self.env) new_node = to_ast(value) try: if not ASTMatcher(node).search(new_node): self.update = True return new_node except DamnTooLongPattern as e: print("W: ", e, " Assume no update happened.") return Transformation.generic_visit(self, node) except ConversionError as e: print('error in constant folding: ', e) raise except ToNotEval: return Transformation.generic_visit(self, node) except AttributeError as e: # this may miss a few optimization logger.info('During constant folding, bailing out due to: ' + e.args[0]) return Transformation.generic_visit(self, node) except NameError as e: # FIXME dispatched function are not processed by constant # folding if "__dispatch__" in e.args[0]: return Transformation.generic_visit(self, node) # this may miss a few optimization logger.info('During constant folding, bailing out due to: ' + e.args[0]) return Transformation.generic_visit(self, node) except Exception as e: raise PythranSyntaxError(str(e), node) else: return Transformation.generic_visit(self, node) class PartialConstantFolding(Transformation): """ Replace partially constant expression by their evaluation. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(n): return [n] * 2") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(PartialConstantFolding, node) >>> print(pm.dump(backend.Python, node)) def foo(n): return [n, n] >>> node = ast.parse("def foo(n): return 2 * (n,)") >>> _, node = pm.apply(PartialConstantFolding, node) >>> print(pm.dump(backend.Python, node)) def foo(n): return (n, n) >>> node = ast.parse("def foo(n): return [n] + [n]") >>> _, node = pm.apply(PartialConstantFolding, node) >>> print(pm.dump(backend.Python, node)) def foo(n): return [n, n] >>> node = ast.parse("def foo(n, m): return (n,) + (m, n)") >>> _, node = pm.apply(PartialConstantFolding, node) >>> print(pm.dump(backend.Python, node)) def foo(n, m): return (n, m, n) """ def __init__(self): Transformation.__init__(self, ConstantExpressions) def fold_mult_left(self, node): if not isinstance(node.left, (ast.List, ast.Tuple)): return False if not isnum(node.right): return False # FIXME: remove that check once we have a proper type inference engine if not isintegral(node.right): raise PythranSyntaxError("Multiplying a sequence by a float", node) return isinstance(node.op, ast.Mult) def fold_mult_right(self, node): if not isinstance(node.right, (ast.List, ast.Tuple)): return False if not isnum(node.left): return False # FIXME: remove that check once we have a proper type inference engine if not isintegral(node.left): raise PythranSyntaxError("Multiplying a sequence by a float", node) return isinstance(node.op, ast.Mult) def fold_add(self, node, ty): if not isinstance(node.left, ty): return False if not isinstance(node.right, ty): return False return isinstance(node.op, ast.Add) def visit_BinOp(self, node): if node in self.constant_expressions: return node node = self.generic_visit(node) if self.fold_mult_left(node): self.update = True node.left.elts = [deepcopy(elt) for _ in range(node.right.value) for elt in node.left.elts] return node.left if self.fold_mult_right(node): self.update = True node.left, node.right = node.right, node.left return self.visit(node) for ty in (ast.List, ast.Tuple): if self.fold_add(node, ty): self.update = True node.left.elts += node.right.elts return node.left return node def visit_Subscript(self, node): """ >>> import gast as ast >>> from pythran import passmanager, backend >>> pm = passmanager.PassManager("test") >>> node = ast.parse("def foo(a): a[1:][3]") >>> _, node = pm.apply(PartialConstantFolding, node) >>> _, node = pm.apply(ConstantFolding, node) >>> print(pm.dump(backend.Python, node)) def foo(a): a[4] >>> node = ast.parse("def foo(a): a[::2][3]") >>> _, node = pm.apply(PartialConstantFolding, node) >>> _, node = pm.apply(ConstantFolding, node) >>> print(pm.dump(backend.Python, node)) def foo(a): a[6] >>> node = ast.parse("def foo(a): a[-4:][5]") >>> _, node = pm.apply(PartialConstantFolding, node) >>> _, node = pm.apply(ConstantFolding, node) >>> print(pm.dump(backend.Python, node)) def foo(a): a[1] """ self.generic_visit(node) if not isinstance(node.value, ast.Subscript): return node if not isinstance(node.value.slice, ast.Slice): return node if not isintegral(node.slice): return node slice_ = node.value.slice index = node.slice node = node.value lower = slice_.lower or ast.Constant(0, None) step = slice_.step or ast.Constant(1, None) node.slice = ast.BinOp(lower, ast.Add(), ast.BinOp(index, ast.Mult(), step)) self.update = True return node pythran-0.10.0+ds2/pythran/optimizations/dead_code_elimination.py000066400000000000000000000115651416264035500252060ustar00rootroot00000000000000""" DeadCodeElimination remove useless code. """ from pythran.analyses import PureExpressions, DefUseChains, Ancestors from pythran.openmp import OMPDirective from pythran.passmanager import Transformation import pythran.metadata as metadata import gast as ast class ClumsyOpenMPDependencyHandler(ast.NodeVisitor): def __init__(self): self.blacklist = set() def visit_OMPDirective(self, node): for dep in node.deps: if isinstance(dep, ast.Name): self.blacklist.add(dep.id) return node class DeadCodeElimination(Transformation): """ Remove useless statement like: - assignment to unused variables - remove alone pure statement - remove empty if >>> import gast as ast >>> from pythran import passmanager, backend >>> pm = passmanager.PassManager("test") >>> node = ast.parse("def foo(): a = [2, 3]; return 1") >>> _, node = pm.apply(DeadCodeElimination, node) >>> print(pm.dump(backend.Python, node)) def foo(): pass return 1 >>> node = ast.parse("def foo(): 'a simple string'; return 1") >>> _, node = pm.apply(DeadCodeElimination, node) >>> print(pm.dump(backend.Python, node)) def foo(): pass return 1 >>> node = ast.parse(''' ... def bar(a): ... return a ... def foo(a): ... bar(a) ... return 1''') >>> _, node = pm.apply(DeadCodeElimination, node) >>> print(pm.dump(backend.Python, node)) def bar(a): return a def foo(a): pass return 1 """ def __init__(self): super(DeadCodeElimination, self).__init__(PureExpressions, DefUseChains, Ancestors) self.blacklist = set() def used_target(self, node): if isinstance(node, ast.Name): if node.id in self.blacklist: return True chain = self.def_use_chains.chains[node] return bool(chain.users()) return True def visit_FunctionDef(self, node): codh = ClumsyOpenMPDependencyHandler() codh.visit(node) self.blacklist = codh.blacklist return self.generic_visit(node) def visit_Pass(self, node): ancestor = self.ancestors[node][-1] if getattr(ancestor, 'body', ()) == [node]: return node if getattr(ancestor, 'orelse', ()) == [node]: return node if metadata.get(node, OMPDirective): return node return None def visit_Assign(self, node): targets = [target for target in node.targets if self.used_target(target)] if len(targets) == len(node.targets): return node node.targets = targets self.update = True if targets: return node if node.value in self.pure_expressions: return ast.Pass() else: return ast.Expr(value=node.value) def visit_Expr(self, node): if (node in self.pure_expressions and not isinstance(node.value, ast.Yield)): self.update = True return ast.Pass() self.generic_visit(node) return node def visit_If(self, node): self.generic_visit(node) try: if ast.literal_eval(node.test): if not metadata.get(node, OMPDirective): self.update = True return node.body else: if not metadata.get(node, OMPDirective): self.update = True return node.orelse except ValueError: # not a constant expression pass have_body = any(not isinstance(x, ast.Pass) for x in node.body) have_else = any(not isinstance(x, ast.Pass) for x in node.orelse) # If the "body" is empty but "else content" is useful, switch branches # and remove else content if not have_body and have_else: test = ast.UnaryOp(op=ast.Not(), operand=node.test) self.update = True return ast.If(test=test, body=node.orelse, orelse=list()) # if neither "if" and "else" are useful, keep test if it is not pure elif not have_body: self.update = True if node.test in self.pure_expressions: return ast.Pass() else: node = ast.Expr(value=node.test) self.generic_visit(node) return node def visit(self, node): """ Add OMPDirective from the old node to the new one. """ old_omp = metadata.get(node, OMPDirective) node = super(DeadCodeElimination, self).visit(node) if not metadata.get(node, OMPDirective): for omp_directive in old_omp: metadata.add(node, omp_directive) return node pythran-0.10.0+ds2/pythran/optimizations/forward_substitution.py000066400000000000000000000121351416264035500252210ustar00rootroot00000000000000""" Replace variable that can be lazy evaluated and used only once by their full computation code. """ from pythran.analyses import LazynessAnalysis, UseDefChains, DefUseChains from pythran.analyses import Literals, Ancestors, Identifiers, CFG, IsAssigned from pythran.passmanager import Transformation import pythran.graph as graph from collections import defaultdict import gast as ast try: from math import isfinite except ImportError: from math import isinf, isnan def isfinite(x): return not isinf(x) and not isnan(x) class Remover(ast.NodeTransformer): def __init__(self, nodes): self.nodes = nodes def visit_Assign(self, node): if node in self.nodes: to_prune = self.nodes[node] node.targets = [tgt for tgt in node.targets if tgt not in to_prune] if node.targets: return node else: return ast.Pass() return node class ForwardSubstitution(Transformation): """ Replace variable that can be computed later. >>> import gast as ast >>> from pythran import passmanager, backend >>> pm = passmanager.PassManager("test") >>> node = ast.parse("def foo(): a = [2, 3]; builtins.print(a)") >>> _, node = pm.apply(ForwardSubstitution, node) >>> print(pm.dump(backend.Python, node)) def foo(): pass builtins.print([2, 3]) >>> node = ast.parse("def foo(): a = 2; builtins.print(a + a)") >>> _, node = pm.apply(ForwardSubstitution, node) >>> print(pm.dump(backend.Python, node)) def foo(): a = 2 builtins.print((2 + 2)) >>> node = ast.parse("def foo():\\n a=b=2\\n while a: a -= 1\\n return b") >>> _, node = pm.apply(ForwardSubstitution, node) >>> print(pm.dump(backend.Python, node)) def foo(): a = 2 while a: a -= 1 return 2 """ def __init__(self): """ Satisfy dependencies on others analyses. """ super(ForwardSubstitution, self).__init__(LazynessAnalysis, UseDefChains, DefUseChains, Ancestors, CFG, Literals) self.to_remove = None def visit_FunctionDef(self, node): self.to_remove = defaultdict(list) self.locals = self.def_use_chains.locals[node] # prune some assignment as a second phase, as an assignment could be # forward-substituted several times (in the case of constants) self.generic_visit(node) Remover(self.to_remove).visit(node) return node def visit_Name(self, node): if not isinstance(node.ctx, ast.Load): return node # OpenMP metdata are not handled by beniget, which is fine in our case if node not in self.use_def_chains: if __debug__: from pythran.openmp import OMPDirective assert any(isinstance(p, OMPDirective) for p in self.ancestors[node]) return node defuses = self.use_def_chains[node] if len(defuses) != 1: return node defuse = defuses[0] dnode = defuse.node if not isinstance(dnode, ast.Name): return node # multiple definition, which one should we forward? if sum(1 for d in self.locals if d.name() == dnode.id) > 1: return node # either a constant or a value fwd = (dnode in self.literals and isfinite(self.lazyness_analysis[dnode.id])) fwd |= self.lazyness_analysis[dnode.id] == 1 if not fwd: return node parent = self.ancestors[dnode][-1] if isinstance(parent, ast.Assign): value = parent.value if dnode in self.literals: self.update = True if len(defuse.users()) == 1: self.to_remove[parent].append(dnode) return value else: # FIXME: deepcopy here creates an unknown node # for alias computations return value elif len(parent.targets) == 1: ids = self.gather(Identifiers, value) node_stmt = next(reversed([s for s in self.ancestors[node] if isinstance(s, ast.stmt)])) all_paths = graph.all_simple_paths(self.cfg, parent, node_stmt) for path in all_paths: for stmt in path[1:-1]: assigned_ids = {n.id for n in self.gather(IsAssigned, stmt)} if not ids.isdisjoint(assigned_ids): break else: continue break else: self.update = True self.to_remove[parent].append(dnode) return value return node pythran-0.10.0+ds2/pythran/optimizations/inline_builtins.py000066400000000000000000000147351416264035500241200ustar00rootroot00000000000000""" Expand some builtins implementation when it is profitable.""" from pythran.analyses import Aliases from pythran.analyses.pure_expressions import PureExpressions from pythran.passmanager import Transformation from pythran.tables import MODULES from pythran.intrinsic import FunctionIntr from pythran.utils import path_to_attr, path_to_node from pythran.syntax import PythranSyntaxError from copy import deepcopy import gast as ast class InlineBuiltins(Transformation): """ Replace some builtins by their bodies. This may trigger some extra optimizations later on! >>> import gast as ast >>> from pythran import passmanager, backend >>> pm = passmanager.PassManager("test") >>> node = ast.parse(''' ... def foo(a): ... return a + 1 ... def bar(b): ... return builtins.map(bar, (1, 2))''') >>> _, node = pm.apply(InlineBuiltins, node) >>> print(pm.dump(backend.Python, node)) def foo(a): return (a + 1) def bar(b): return [bar(1), bar(2)] """ def __init__(self): Transformation.__init__(self, Aliases, PureExpressions) def inlineBuiltinsXMap(self, node): self.update = True elts = [] nelts = min(len(n.elts) for n in node.args[1:]) for i in range(nelts): elts.append([n.elts[i] for n in node.args[1:]]) return ast.List([ast.Call(node.args[0], elt, []) for elt in elts], ast.Load()) def inlineBuiltinsMap(self, node): if not isinstance(node, ast.Call): return node func_aliases = self.aliases[node.func] if len(func_aliases) != 1: return node obj = next(iter(func_aliases)) if obj is not MODULES['builtins']['map']: return node if not all(isinstance(arg, (ast.List, ast.Tuple)) for arg in node.args[1:]): return node mapped_func_aliases = self.aliases[node.args[0]] if len(mapped_func_aliases) != 1: return node obj = next(iter(mapped_func_aliases)) if not isinstance(obj, (ast.FunctionDef, FunctionIntr)): return node # all preconditions are met, do it! return self.inlineBuiltinsXMap(node) def visit_Call(self, node): node = self.generic_visit(node) node = self.inlineBuiltinsMap(node) return node def make_array_index(self, base, size, index): if isinstance(base, ast.Constant): return ast.Constant(base.value, None) if size == 1: return deepcopy(base.elts[0]) return base.elts[index] def fixedSizeArray(self, node): if isinstance(node, ast.Constant): return node, 1 if isinstance(node, (ast.List, ast.Tuple)): return node, len(node.elts) if not isinstance(node, ast.Call): return None, 0 func_aliases = self.aliases[node.func] if len(func_aliases) != 1: return None, 0 obj = next(iter(func_aliases)) if obj not in (MODULES['numpy']['array'], MODULES['numpy']['asarray']): return None, 0 if len(node.args) != 1: return None, 0 if isinstance(node.args[0], (ast.List, ast.Tuple)): return node.args[0], len(node.args[0].elts) return None, 0 def inlineFixedSizeArrayBinOp(self, node): alike = ast.List, ast.Tuple, ast.Constant if isinstance(node.left, alike) and isinstance(node.right, alike): return node lbase, lsize = self.fixedSizeArray(node.left) rbase, rsize = self.fixedSizeArray(node.right) if not lbase or not rbase: return node if rsize != 1 and lsize != 1 and rsize != lsize: raise PythranSyntaxError("Invalid numpy broadcasting", node) self.update = True operands = [ast.BinOp(self.make_array_index(lbase, lsize, i), type(node.op)(), self.make_array_index(rbase, rsize, i)) for i in range(max(lsize, rsize))] res = ast.Call(path_to_attr(('numpy', 'array')), [ast.Tuple(operands, ast.Load())], []) self.aliases[res.func] = {path_to_node(('numpy', 'array'))} return res def visit_BinOp(self, node): node = self.generic_visit(node) node = self.inlineFixedSizeArrayBinOp(node) return node def inlineFixedSizeArrayUnaryOp(self, node): if isinstance(node.operand, (ast.Constant, ast.List, ast.Tuple)): return node base, size = self.fixedSizeArray(node.operand) if not base: return node self.update = True operands = [ast.UnaryOp(type(node.op)(), self.make_array_index(base, size, i)) for i in range(size)] res = ast.Call(path_to_attr(('numpy', 'array')), [ast.Tuple(operands, ast.Load())], []) self.aliases[res.func] = {path_to_node(('numpy', 'array'))} return res def visit_UnaryOp(self, node): node = self.generic_visit(node) node = self.inlineFixedSizeArrayUnaryOp(node) return node def inlineFixedSizeArrayCompare(self, node): if len(node.comparators) != 1: return node node_right = node.comparators[0] alike = ast.Constant, ast.List, ast.Tuple if isinstance(node.left, alike) and isinstance(node_right, alike): return node lbase, lsize = self.fixedSizeArray(node.left) rbase, rsize = self.fixedSizeArray(node_right) if not lbase or not rbase: return node if rsize != 1 and lsize != 1 and rsize != lsize: raise PythranSyntaxError("Invalid numpy broadcasting", node) self.update = True operands = [ast.Compare(self.make_array_index(lbase, lsize, i), [type(node.ops[0])()], [self.make_array_index(rbase, rsize, i)]) for i in range(max(lsize, rsize))] res = ast.Call(path_to_attr(('numpy', 'array')), [ast.Tuple(operands, ast.Load())], []) self.aliases[res.func] = {path_to_node(('numpy', 'array'))} return res def visit_Compare(self, node): node = self.generic_visit(node) node = self.inlineFixedSizeArrayCompare(node) return node pythran-0.10.0+ds2/pythran/optimizations/inlining.py000066400000000000000000000101441416264035500225260ustar00rootroot00000000000000""" Inlining inline functions body. """ from pythran.analyses import Inlinable, Aliases from pythran.passmanager import Transformation import gast as ast import copy class Inlining(Transformation): """ Inline one line functions. >>> import gast as ast >>> from pythran import passmanager, backend >>> pm = passmanager.PassManager("test") >>> node = ast.parse(''' ... def foo(a, b): ... return b + b * a ... def bar(b): ... return foo(2 * b, b) * foo(b, b)''') >>> _, node = pm.apply(Inlining, node) >>> print(pm.dump(backend.Python, node)) def foo(a, b): return (b + (b * a)) def bar(b): __pythran_inlinefooa0 = (2 * b) __pythran_inlinefoob0 = b __pythran_inlinefooa1 = b __pythran_inlinefoob1 = b return ((__pythran_inlinefoob0 + (__pythran_inlinefoob0 * \ __pythran_inlinefooa0)) * (__pythran_inlinefoob1 + \ (__pythran_inlinefoob1 * __pythran_inlinefooa1))) """ def __init__(self): """ fun : Function {name :body} for inlinable functions. """ self.update = False self.defs = list() self.call_count = 0 super(Inlining, self).__init__(Inlinable, Aliases) def visit_Stmt(self, node): """ Add new variable definition before the Statement. """ save_defs, self.defs = self.defs or list(), list() self.generic_visit(node) new_defs, self.defs = self.defs, save_defs return new_defs + [node] visit_Return = visit_Stmt visit_Assign = visit_Stmt visit_AugAssign = visit_Stmt visit_Print = visit_Stmt visit_For = visit_Stmt visit_While = visit_Stmt visit_If = visit_Stmt visit_With = visit_Stmt visit_Assert = visit_Stmt visit_Expr = visit_Stmt def visit_Call(self, node): """ Replace function call by inlined function's body. We can inline if it aliases on only one function. """ func_aliases = self.aliases[node.func] if len(func_aliases) == 1: function_def = next(iter(func_aliases)) if (isinstance(function_def, ast.FunctionDef) and function_def.name in self.inlinable): self.update = True to_inline = copy.deepcopy(self.inlinable[function_def.name]) arg_to_value = dict() values = node.args values += to_inline.args.defaults[len(node.args) - len(to_inline.args.args):] for arg_fun, arg_call in zip(to_inline.args.args, values): v_name = "__pythran_inline{}{}{}".format(function_def.name, arg_fun.id, self.call_count) new_var = ast.Name(id=v_name, ctx=ast.Store(), annotation=None, type_comment=None) self.defs.append(ast.Assign(targets=[new_var], value=arg_call, type_comment=None)) arg_to_value[arg_fun.id] = ast.Name(id=v_name, ctx=ast.Load(), annotation=None, type_comment=None) self.call_count += 1 return Inliner(arg_to_value).visit(to_inline.body[0]) return node class Inliner(ast.NodeTransformer): """ Helper transform that performed inlined body transformation. """ def __init__(self, match): """ match : {original_variable_name : Arguments use on call}. """ self.match = match super(Inliner, self).__init__() def visit_Name(self, node): """ Transform name from match values if available. """ return self.match.get(node.id, node) def visit_Return(self, node): """ Remove return keyword after inline. """ return self.visit(node.value) pythran-0.10.0+ds2/pythran/optimizations/iter_transformation.py000066400000000000000000000042251416264035500250130ustar00rootroot00000000000000"""IterTransformation replaces expressions by iterators when possible.""" from pythran.analyses import PotentialIterator, Aliases from pythran.passmanager import Transformation from pythran.utils import path_to_attr, path_to_node EQUIVALENT_ITERATORS = { ('builtins', "list"): None, ('builtins', "tuple"): None, ('numpy', "array"): None, ('numpy', "asarray"): None, ('numpy', "copy"): None, } class IterTransformation(Transformation): """ Replaces expressions by iterators when possible. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(l): ... return builtins.sum(l) ... def bar(n): ... return foo(builtins.list(n)) ... ''') >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(IterTransformation, node) >>> print(pm.dump(backend.Python, node)) def foo(l): return builtins.sum(l) def bar(n): return foo(n) """ def __init__(self): """Gather required information.""" Transformation.__init__(self, PotentialIterator, Aliases) def find_matching_builtin(self, node): """ Return matched keyword. If the node alias on a correct keyword (and only it), it matches. """ for path in EQUIVALENT_ITERATORS.keys(): correct_alias = {path_to_node(path)} if self.aliases[node.func] == correct_alias: return path def visit_Call(self, node): """Replace function call by its correct iterator if it is possible.""" if node in self.potential_iterator: matched_path = self.find_matching_builtin(node) if matched_path is None: return self.generic_visit(node) # if any kind of specific (~ with more arg) behavior is required if len(node.args) != 1: return self.generic_visit(node) path = EQUIVALENT_ITERATORS[matched_path] if path: node.func = path_to_attr(path) else: node = node.args[0] self.update = True return self.generic_visit(node) pythran-0.10.0+ds2/pythran/optimizations/list_comp_to_genexp.py000066400000000000000000000022671416264035500247670ustar00rootroot00000000000000""" ListCompToGenexp transforms list comprehension into genexp. """ from pythran.analyses import PotentialIterator from pythran.passmanager import Transformation import gast as ast class ListCompToGenexp(Transformation): ''' Transforms list comprehension into genexp >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(""" \\n\ def foo(l): \\n\ return builtins.sum(l) \\n\ def bar(n): \\n\ return foo([x for x in builtins.range(n)]) \ """) >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(ListCompToGenexp, node) >>> print(pm.dump(backend.Python, node)) def foo(l): return builtins.sum(l) def bar(n): return foo((x for x in builtins.range(n))) ''' def __init__(self): Transformation.__init__(self, PotentialIterator) def visit_ListComp(self, node): self.generic_visit(node) if node in self.potential_iterator: self.update = True return ast.GeneratorExp(node.elt, node.generators) else: return node pythran-0.10.0+ds2/pythran/optimizations/list_to_tuple.py000066400000000000000000000071401416264035500236070ustar00rootroot00000000000000""" ListToTuple transforms some List node into more Efficient Tuple nodes. """ from pythran.analyses import Aliases, FixedSizeList from pythran.tables import MODULES from pythran.passmanager import Transformation from pythran.utils import path_to_attr import gast as ast patterns = (MODULES['numpy']['full'], MODULES['numpy']['ones'], MODULES['numpy']['zeros'], MODULES['numpy']['empty'], MODULES['numpy']['concatenate'], MODULES['builtins']['tuple'], ) def islist(node): return isinstance(node, ast.List) def totuple(node): return ast.Tuple(node.elts, node.ctx) class ListToTuple(Transformation): """ Replace list nodes by tuple nodes when possible >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(n): import numpy; return numpy.ones([n,n])") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(ListToTuple, node) >>> print(pm.dump(backend.Python, node)) def foo(n): import numpy return numpy.ones((n, n)) """ def __init__(self): self.update = False super(ListToTuple, self).__init__(Aliases, FixedSizeList) def visit_AugAssign(self, node): if not islist(node.value): return self.generic_visit(node) node.value = totuple(node.value) self.update = True return self.generic_visit(node) def visit_Call(self, node): func_aliases = self.aliases.get(node.func, set()) if func_aliases.issubset(patterns): if islist(node.args[0]): self.update = True node.args[0] = totuple(node.args[0]) return self.generic_visit(node) def visit_List(self, node): self.generic_visit(node) if node in self.fixed_size_list: return self.convert(node) else: return node def visit_Assign(self, node): """ Replace list calls by static_list calls when possible >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(n):\\n" ... " x = builtins.list(n)\\n" ... " x[0] = 0\\n" ... " return builtins.tuple(x)") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(ListToTuple, node) >>> print(pm.dump(backend.Python, node)) def foo(n): x = builtins.pythran.static_list(n) x[0] = 0 return builtins.tuple(x) >>> node = ast.parse("def foo(n):\\n" ... " x = builtins.list(n)\\n" ... " x[0] = 0\\n" ... " return x") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(ListToTuple, node) >>> print(pm.dump(backend.Python, node)) def foo(n): x = builtins.list(n) x[0] = 0 return x """ self.generic_visit(node) if node.value not in self.fixed_size_list: return node node.value = self.convert(node.value) return node def convert(self, node): self.update = True if isinstance(node, ast.Call): if not node.args: node = ast.Tuple([]) else: node = node.args[0] elif isinstance(node, ast.List): node = ast.Tuple(node.elts, ast.Load()) return ast.Call(path_to_attr(('builtins', 'pythran', 'static_list')), [node], []) pythran-0.10.0+ds2/pythran/optimizations/loop_full_unrolling.py000066400000000000000000000072401416264035500250060ustar00rootroot00000000000000""" LoopFullUnrolling fully unrolls loops with static bounds. """ from pythran import metadata from pythran.analyses import HasBreak, HasContinue, NodeCount from pythran.openmp import OMPDirective from pythran.conversion import to_ast from pythran.passmanager import Transformation from copy import deepcopy import gast as ast class LoopFullUnrolling(Transformation): ''' Fully unroll loops with static bounds >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse('for j in [1,2,3]: i += j') >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(LoopFullUnrolling, node) >>> print(pm.dump(backend.Python, node)) j = 1 i += j j = 2 i += j j = 3 i += j >>> node = ast.parse('for j in (a,b): i += j') >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(LoopFullUnrolling, node) >>> print(pm.dump(backend.Python, node)) j = a i += j j = b i += j >>> node = ast.parse('for j in {1}: i += j') >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(LoopFullUnrolling, node) >>> print(pm.dump(backend.Python, node)) j = 1 i += j >>> node = ast.parse('for j in builtins.enumerate("1"): j') >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(LoopFullUnrolling, node) >>> print(pm.dump(backend.Python, node)) j = (0, '1') j ''' MAX_NODE_COUNT = 4096 def visit_For(self, node): # if the user added some OpenMP directive, trust him and no unroll if metadata.get(node, OMPDirective): return node # don't visit children because of collapse # first unroll children if needed or possible self.generic_visit(node) # a break or continue in the loop prevents unrolling too has_break = any(self.gather(HasBreak, n) for n in node.body) has_cont = any(self.gather(HasContinue, n) for n in node.body) if has_break or has_cont: return node # do not unroll too much to prevent code growth node_count = self.gather(NodeCount, node) def unroll(elt, body): return [ast.Assign([deepcopy(node.target)], elt, None)] + body def dc(body, i, n): if i == n - 1: return body else: return deepcopy(body) def getrange(n): return getattr(getattr(n, 'func', None), 'attr', None) if isinstance(node.iter, (ast.Tuple, ast.List)): elts_count = len(node.iter.elts) total_count = node_count * elts_count issmall = total_count < LoopFullUnrolling.MAX_NODE_COUNT if issmall: self.update = True return sum([unroll(elt, dc(node.body, i, elts_count)) for i, elt in enumerate(node.iter.elts)], []) code = compile(ast.gast_to_ast(ast.Expression(node.iter)), '', 'eval') try: values = list(eval(code, {'builtins': __import__('builtins')})) except Exception: return node values_count = len(values) total_count = node_count * values_count issmall = total_count < LoopFullUnrolling.MAX_NODE_COUNT if issmall: try: new_node = sum([unroll(to_ast(elt), dc(node.body, i, values_count)) for i, elt in enumerate(values)], []) self.update = True return new_node except Exception: return node return node pythran-0.10.0+ds2/pythran/optimizations/modindex.py000066400000000000000000000105721416264035500225330ustar00rootroot00000000000000''' Simplify modulo computation based on index''' from pythran.analyses import UseDefChains, Ancestors, Aliases, RangeValues from pythran.analyses import Identifiers from pythran.passmanager import Transformation from pythran.tables import MODULES import gast as ast from copy import deepcopy class ModIndex(Transformation): ''' Simplify modulo on loop index >>> import gast as ast >>> from pythran import passmanager, backend >>> pm = passmanager.PassManager("test") >>> code = """ ... def foo(x): ... y = builtins.len(x) ... for i in builtins.range(8): ... z = i % y""" >>> node = ast.parse(code) >>> _, node = pm.apply(ModIndex, node) >>> print(pm.dump(backend.Python, node)) def foo(x): y = builtins.len(x) i_m = ((0 - 1) % y) for i in builtins.range(8): i_m = (0 if ((i_m + 1) == y) else (i_m + 1)) z = i_m ''' def __init__(self): Transformation.__init__(self, UseDefChains, Ancestors, Aliases, RangeValues, Identifiers) self.loops_mod = dict() def single_def(self, node): chain = self.use_def_chains[node] return len(chain) == 1 and chain[0].node def visit_BinOp(self, node): if not isinstance(node.op, ast.Mod): return self.generic_visit(node) # check that right is a name defined once outside of loop # TODO: handle expression instead of names if not isinstance(node.right, ast.Name): return self.generic_visit(node) right_def = self.single_def(node.right) if not right_def: return self.generic_visit(node) if self.range_values[node.right.id].low < 0: return self.generic_visit(node) # same for lhs if not isinstance(node.left, ast.Name): return self.generic_visit(node) head = self.single_def(node.left) if not head: return self.generic_visit(node) # check lhs is the actual index of a loop loop = self.ancestors[head][-1] if not isinstance(loop, ast.For): return self.generic_visit(node) if not isinstance(loop.iter, ast.Call): return self.generic_visit(node) # make sure rhs is defined out of the loop if loop in self.ancestors[right_def]: return self.generic_visit(node) # gather range informations range_ = None for alias in self.aliases[loop.iter.func]: if alias is MODULES['builtins']['range']: range_ = alias else: break if range_ is None: return self.generic_visit(node) # everything is setup for the transformation! new_id = node.left.id + '_m' i = 0 while new_id in self.identifiers: new_id = '{}_m{}'.format(node.left.id, i) i += 1 rargs = range_.args.args lower = rargs[0] if len(rargs) > 1 else ast.Constant(0, None) header = ast.Assign([ast.Name(new_id, ast.Store(), None, None)], ast.BinOp( ast.BinOp(deepcopy(lower), ast.Sub(), ast.Constant(1, None)), ast.Mod(), deepcopy(node.right)), None) incr = ast.BinOp(ast.Name(new_id, ast.Load(), None, None), ast.Add(), ast.Constant(1, None)) step = ast.Assign([ast.Name(new_id, ast.Store(), None, None)], ast.IfExp( ast.Compare(incr, [ast.Eq()], [deepcopy(node.right)]), ast.Constant(0, None), deepcopy(incr)), None) self.loops_mod.setdefault(loop, []).append((header, step)) self.update = True return ast.Name(new_id, ast.Load(), None, None) def visit_For(self, node): self.generic_visit(node) if node not in self.loops_mod: return node headers = [h for h, _ in self.loops_mod[node]] steps = [s for _, s in self.loops_mod[node]] node.body = steps + node.body return headers + [node] pythran-0.10.0+ds2/pythran/optimizations/pattern_transform.py000066400000000000000000000315471416264035500245010ustar00rootroot00000000000000""" Optimization for Python costly pattern. """ from pythran.conversion import mangle from pythran.analyses import Check, Placeholder from pythran.passmanager import Transformation from copy import deepcopy import gast as ast class Pattern(object): def match(self, node): self.check = Check(node, dict()) return self.check.visit(self.pattern) def replace(self): return PlaceholderReplace(self.check.placeholders).visit(self.sub()) def imports(self): return deepcopy(getattr(self, 'extra_imports', [])) class LenSetPattern(Pattern): # builtins.len(builtins.set(X)) => builtins.pythran.len_set(X) pattern = ast.Call(func=ast.Attribute(value=ast.Name('builtins', ast.Load(), None, None), attr="len", ctx=ast.Load()), args=[ast.Call( func=ast.Attribute( value=ast.Name('builtins', ast.Load(), None, None), attr="set", ctx=ast.Load()), args=[Placeholder(0)], keywords=[])], keywords=[]) @staticmethod def sub(): return ast.Call( func=ast.Attribute( value=ast.Attribute(value=ast.Name('builtins', ast.Load(), None, None), attr="pythran", ctx=ast.Load()), attr="len_set", ctx=ast.Load()), args=[Placeholder(0)], keywords=[]) class LenRangePattern(Pattern): # builtins.len(builtins.range(X)) => max(0, X) pattern = ast.Call(func=ast.Attribute(value=ast.Name('builtins', ast.Load(), None, None), attr="len", ctx=ast.Load()), args=[ast.Call( func=ast.Attribute( value=ast.Name('builtins', ast.Load(), None, None), attr="range", ctx=ast.Load()), args=[Placeholder(0)], keywords=[])], keywords=[]) @staticmethod def sub(): return ast.Call( func=ast.Attribute(value=ast.Name('builtins', ast.Load(), None, None), attr="max", ctx=ast.Load()), args=[ast.Constant(0, None), Placeholder(0)], keywords=[]) class TupleListPattern(Pattern): # builtins.tuple(builtins.list(X)) => builtins.tuple(X) pattern = ast.Call(func=ast.Attribute(value=ast.Name('builtins', ast.Load(), None, None), attr="tuple", ctx=ast.Load()), args=[ast.Call( func=ast.Attribute( value=ast.Name('builtins', ast.Load(), None, None), attr="list", ctx=ast.Load()), args=[Placeholder(0)], keywords=[])], keywords=[]) @staticmethod def sub(): return ast.Call( func=ast.Attribute(value=ast.Name(id='builtins', ctx=ast.Load(), annotation=None, type_comment=None), attr="tuple", ctx=ast.Load()), args=[Placeholder(0)], keywords=[]) class AbsSqrPattern(Pattern): # builtins.abs(X) ** 2 => builtins.pythran.abssqr(X) pattern = ast.Call(func=ast.Attribute(value=ast.Name(id=mangle('numpy'), ctx=ast.Load(), annotation=None, type_comment=None), attr="square", ctx=ast.Load()), args=[ast.Call(func=ast.Attribute( value=ast.Name(id='builtins', ctx=ast.Load(), annotation=None, type_comment=None), attr="abs", ctx=ast.Load()), args=[Placeholder(0)], keywords=[])], keywords=[]) @staticmethod def sub(): return ast.Call( func=ast.Attribute( value=ast.Attribute(value=ast.Name(id='builtins', ctx=ast.Load(), annotation=None, type_comment=None), attr="pythran", ctx=ast.Load()), attr="abssqr", ctx=ast.Load()), args=[Placeholder(0)], keywords=[]) class AbsSqrPatternNumpy(AbsSqrPattern): # numpy.abs(X) ** 2 => builtins.pythran.abssqr(X) pattern = ast.Call(func=ast.Attribute(value=ast.Name(id=mangle('numpy'), ctx=ast.Load(), annotation=None, type_comment=None), attr="square", ctx=ast.Load()), args=[ast.Call(func=ast.Attribute( value=ast.Name(id=mangle('numpy'), ctx=ast.Load(), annotation=None, type_comment=None), attr="abs", ctx=ast.Load()), args=[Placeholder(0)], keywords=[])], keywords=[]) class PowFuncPattern(Pattern): # builtins.pow(X, Y) => X ** Y pattern = ast.Call(func=ast.Attribute( value=ast.Name(id=mangle('builtins'), ctx=ast.Load(), annotation=None, type_comment=None), attr='pow', ctx=ast.Load()), args=[Placeholder(0), Placeholder(1)], keywords=[]) @staticmethod def sub(): return ast.BinOp(Placeholder(0), ast.Pow(), Placeholder(1)) class SqrtPattern(Pattern): # X ** .5 => numpy.sqrt(X) pattern = ast.BinOp(Placeholder(0), ast.Pow(), ast.Constant(0.5, None)) @staticmethod def sub(): return ast.Call( func=ast.Attribute(value=ast.Name(id=mangle('numpy'), ctx=ast.Load(), annotation=None, type_comment=None), attr="sqrt", ctx=ast.Load()), args=[Placeholder(0)], keywords=[]) extra_imports = [ast.Import([ast.alias('numpy', mangle('numpy'))])] class CbrtPattern(Pattern): # X ** .33333 => numpy.cbrt(X) pattern = ast.BinOp(Placeholder(0), ast.Pow(), ast.Constant(1./3., None)) @staticmethod def sub(): return ast.Call( func=ast.Attribute(value=ast.Name(id=mangle('numpy'), ctx=ast.Load(), annotation=None, type_comment=None), attr="cbrt", ctx=ast.Load()), args=[Placeholder(0)], keywords=[]) extra_imports = [ast.Import([ast.alias('numpy', mangle('numpy'))])] class TuplePattern(Pattern): # builtins.tuple([X, ..., Z]) => (X, ..., Z) pattern = ast.Call(func=ast.Attribute(value=ast.Name(id='builtins', ctx=ast.Load(), annotation=None, type_comment=None), attr="tuple", ctx=ast.Load()), args=[ast.List(Placeholder(0), ast.Load())], keywords=[]) @staticmethod def sub(): return ast.Tuple(Placeholder(0), ast.Load()) class ReversedRangePattern(Pattern): # builtins.reversed(builtins.range(X)) => # builtins.range(X-1, -1, -1) # FIXME : We should do it even when begin/end/step are given pattern = ast.Call(func=ast.Attribute(value=ast.Name(id='builtins', ctx=ast.Load(), annotation=None, type_comment=None), attr="reversed", ctx=ast.Load()), args=[ast.Call( func=ast.Attribute( value=ast.Name(id='builtins', ctx=ast.Load(), annotation=None, type_comment=None), attr='range', ctx=ast.Load()), args=[Placeholder(0)], keywords=[])], keywords=[]) @staticmethod def sub(): return ast.Call( func=ast.Attribute(value=ast.Name(id='builtins', ctx=ast.Load(), annotation=None, type_comment=None), attr='range', ctx=ast.Load()), args=[ast.BinOp(left=Placeholder(0), op=ast.Sub(), right=ast.Constant(1, None)), ast.Constant(-1, None), ast.Constant(-1, None)], keywords=[]) class SqrPattern(Pattern): # X * X => X ** 2 pattern = ast.BinOp(left=Placeholder(0), op=ast.Mult(), right=Placeholder(0)) @staticmethod def sub(): return ast.BinOp(left=Placeholder(0), op=ast.Pow(), right=ast.Constant(2, None)) class StrJoinPattern(Pattern): # a + "..." + b => "...".join((a, b)) pattern = ast.BinOp(left=ast.BinOp(left=Placeholder(0), op=ast.Add(), right=ast.Constant(Placeholder(1, str), None)), op=ast.Add(), right=Placeholder(2)) @staticmethod def sub(): return ast.Call(func=ast.Attribute( ast.Attribute( ast.Name('builtins', ast.Load(), None, None), 'str', ast.Load()), 'join', ast.Load()), args=[ast.Constant(Placeholder(1), None), ast.Tuple([Placeholder(0), Placeholder(2)], ast.Load())], keywords=[]) know_pattern = [x for x in globals().values() if hasattr(x, "pattern")] class PlaceholderReplace(Transformation): """ Helper class to replace the placeholder once value is collected. """ def __init__(self, placeholders): """ Store placeholders value collected. """ self.placeholders = placeholders super(PlaceholderReplace, self).__init__() def visit(self, node): """ Replace the placeholder if it is one or continue. """ if isinstance(node, Placeholder): return self.placeholders[node.id] else: return super(PlaceholderReplace, self).visit(node) class PatternTransform(Transformation): """ Replace all known pattern by pythran function call. Based on BaseMatcher to search correct pattern. """ def __init__(self): """ Initialize the Basematcher to search for placeholders. """ super(PatternTransform, self).__init__() def visit_Module(self, node): self.extra_imports = [] self.generic_visit(node) node.body = self.extra_imports + node.body return node def visit(self, node): """ Try to replace if node match the given pattern or keep going. """ for pattern in know_pattern: matcher = pattern() if matcher.match(node): self.extra_imports.extend(matcher.imports()) node = matcher.replace() self.update = True return super(PatternTransform, self).visit(node) pythran-0.10.0+ds2/pythran/optimizations/range_based_simplify.py000066400000000000000000000053201416264035500250650ustar00rootroot00000000000000''' Simplify expressions based on range information when possible''' from pythran.analyses import RangeValues from pythran.passmanager import Transformation import gast as ast from math import isinf from copy import deepcopy class RangeBasedSimplify(Transformation): ''' Simplify expressions based on range analysis >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("def any():\\n for x in builtins.range(10): y=x%8") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(RangeBasedSimplify, node) >>> print(pm.dump(backend.Python, node)) def any(): for x in builtins.range(10): y = (x if (x < 8) else (x - 8)) >>> node = ast.parse("def any(): x = 1 or 2; return 3 == x") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(RangeBasedSimplify, node) >>> print(pm.dump(backend.Python, node)) def any(): x = (1 or 2) return 0 >>> node = ast.parse("def a(i): x = 1,1,2; return x[2], x[0 if i else 1]") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(RangeBasedSimplify, node) >>> print(pm.dump(backend.Python, node)) def a(i): x = (1, 1, 2) return (2, 1) ''' def __init__(self): Transformation.__init__(self, RangeValues) def visit_OMPDirective(self, node): return node def visit_BinOp(self, node): node = self.generic_visit(node) if not isinstance(node.op, ast.Mod): return node right_range = self.range_values[node.right] left_range = self.range_values[node.left] if right_range.low < 0 or isinf(right_range.high): return node if left_range.low < -right_range.low: return node if left_range.high > right_range.high * 2: return node cleft0, cleft1 = deepcopy(node.left), deepcopy(node.left) cright = deepcopy(node.right) self.update = True return ast.IfExp(ast.Compare(node.left, [ast.Lt()], [node.right]), cleft0, ast.BinOp(cleft1, ast.Sub(), cright)) def visit_range(self, node): range_value = self.range_values[node] if isinf(range_value.high): return self.generic_visit(node) elif range_value.low == range_value.high: self.update = True return ast.Constant(range_value.low, None) else: return self.generic_visit(node) visit_Compare = visit_range def visit_Name(self, node): if isinstance(node.ctx, ast.Load): return self.visit_range(node) return self.generic_visit(node) visit_Subscript = visit_Name pythran-0.10.0+ds2/pythran/optimizations/range_loop_unfolding.py000066400000000000000000000033531416264035500251150ustar00rootroot00000000000000""" RangeLoopUnfolding turns unfolded range of non unrolled loops back to range. """ from pythran.passmanager import Transformation from pythran.utils import isnum import gast as ast class RangeLoopUnfolding(Transformation): """ Turns iteration over an incrementing list of literals into a range >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("for i in [1,2,3]: print(i)") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(RangeLoopUnfolding, node) >>> print(pm.dump(backend.Python, node)) for i in builtins.range(1, 4, 1): print(i) """ def isrange(self, elts): if not elts: return None if not all(isnum(x) and isinstance(x.value, int) for x in elts): return None unboxed_ints = [x.value for x in elts] start = unboxed_ints[0] if len(unboxed_ints) == 1: return start, start + 1, 1 else: step = unboxed_ints[1] - start stop = unboxed_ints[-1] + step if unboxed_ints == list(range(start, stop, step)): return start, stop, step else: return None def visit_For(self, node): if isinstance(node.iter, (ast.List, ast.Tuple)): range_params = self.isrange(node.iter.elts) if range_params: node.iter = ast.Call(ast.Attribute( ast.Name('builtins', ast.Load(), None, None), 'range', node.iter.ctx), [ast.Constant(param, None) for param in range_params], []) self.update = True return self.generic_visit(node) pythran-0.10.0+ds2/pythran/optimizations/remove_dead_functions.py000066400000000000000000000021371416264035500252640ustar00rootroot00000000000000""" DeadCodeElimination remove useless code. """ from pythran.analyses import DefUseChains from pythran.passmanager import Transformation import pythran.metadata as metadata class RemoveDeadFunctions(Transformation): """ Remove useless local functions >>> import gast as ast >>> from pythran import passmanager, backend, metadata >>> pm = passmanager.PassManager("test") >>> node = ast.parse("def foo(): return 1") >>> _, node = pm.apply(RemoveDeadFunctions, node) >>> print(pm.dump(backend.Python, node)) def foo(): return 1 >>> node = ast.parse("def foo(): return 1") >>> metadata.add(node.body[0], metadata.Local()) >>> _, node = pm.apply(RemoveDeadFunctions, node) >>> print(pm.dump(backend.Python, node)) """ def __init__(self): super(RemoveDeadFunctions, self).__init__(DefUseChains) def visit_FunctionDef(self, node): if metadata.get(node, metadata.Local): if not self.def_use_chains.chains[node].users(): self.update = True return None return node pythran-0.10.0+ds2/pythran/optimizations/simplify_except.py000066400000000000000000000025641416264035500241320ustar00rootroot00000000000000""" Replaces **2 by a call to numpy.square. """ from pythran.passmanager import Transformation import gast as ast def getid(node): if isinstance(node, ast.Attribute): return getid(node.value), node.attr if isinstance(node, ast.Name): return node.id return node class SimplifyExcept(Transformation): """ Remove redundant except clauses >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse('try: pass\\nexcept (OSError, OSError): pass') >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(SimplifyExcept, node) >>> print(pm.dump(backend.Python, node)) try: pass except OSError: pass """ def visit_ExceptHandler(self, node): if isinstance(node.type, ast.Tuple): all_ids = {getid(elt) for elt in node.type.elts} to_remove = [] for i, elt in enumerate(node.type.elts): eltid = getid(elt) if eltid in all_ids: all_ids.remove(eltid) else: to_remove.append(i) for i in reversed(to_remove): node.type.elts.pop(i) if len(node.type.elts) == 1: node.type = node.type.elts[0] self.update = True self.update |= bool(to_remove) return node pythran-0.10.0+ds2/pythran/optimizations/square.py000066400000000000000000000056021416264035500222220ustar00rootroot00000000000000""" Replaces **2 by a call to numpy.square. """ from pythran.passmanager import Transformation from pythran.analyses.ast_matcher import ASTMatcher, AST_any from pythran.conversion import mangle from pythran.utils import isnum import gast as ast import copy class Square(Transformation): """ Replaces **2 by a call to numpy.square. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse('a**2') >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(Square, node) >>> print(pm.dump(backend.Python, node)) import numpy as __pythran_import_numpy __pythran_import_numpy.square(a) >>> node = ast.parse('__pythran_import_numpy.power(a,2)') >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(Square, node) >>> print(pm.dump(backend.Python, node)) import numpy as __pythran_import_numpy __pythran_import_numpy.square(a) """ POW_PATTERN = ast.BinOp(AST_any(), ast.Pow(), ast.Constant(2, None)) POWER_PATTERN = ast.Call( ast.Attribute(ast.Name(mangle('numpy'), ast.Load(), None, None), 'power', ast.Load()), [AST_any(), ast.Constant(2, None)], []) def __init__(self): Transformation.__init__(self) def replace(self, value): self.update = self.need_import = True module_name = ast.Name(mangle('numpy'), ast.Load(), None, None) return ast.Call(ast.Attribute(module_name, 'square', ast.Load()), [value], []) def visit_Module(self, node): self.need_import = False self.generic_visit(node) if self.need_import: import_alias = ast.alias(name='numpy', asname=mangle('numpy')) importIt = ast.Import(names=[import_alias]) node.body.insert(0, importIt) return node def expand_pow(self, node, n): if n == 0: return ast.Constant(1, None) elif n == 1: return node else: node_square = self.replace(node) node_pow = self.expand_pow(node_square, n >> 1) if n & 1: return ast.BinOp(node_pow, ast.Mult(), copy.deepcopy(node)) else: return node_pow def visit_BinOp(self, node): self.generic_visit(node) if ASTMatcher(Square.POW_PATTERN).search(node): return self.replace(node.left) elif isinstance(node.op, ast.Pow) and isnum(node.right): n = node.right.value if int(n) == n and n > 0: return self.expand_pow(node.left, n) else: return node else: return node def visit_Call(self, node): self.generic_visit(node) if ASTMatcher(Square.POWER_PATTERN).search(node): return self.replace(node.args[0]) else: return node pythran-0.10.0+ds2/pythran/optimizations/tuple_to_shape.py000066400000000000000000000035271416264035500237410ustar00rootroot00000000000000""" TupleToShap transforms some Tuple node into shape nodes when relevant. """ from pythran.analyses import Aliases from pythran.tables import MODULES from pythran.passmanager import Transformation from pythran.utils import pythran_builtin_attr import gast as ast patterns = (MODULES['numpy']['full'], MODULES['numpy']['ones'], MODULES['numpy']['zeros'], MODULES['numpy']['empty'], ) reshape_patterns = MODULES['numpy']['ndarray']['reshape'], def istuple(node): return isinstance(node, ast.Tuple) def toshape(node): b = pythran_builtin_attr("make_shape") return ast.Call(b, node.elts, []) class TupleToShape(Transformation): """ Replace tuple nodes by shape when relevant >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(n): import numpy; return numpy.ones((n,4))") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(TupleToShape, node) >>> print(pm.dump(backend.Python, node)) def foo(n): import numpy return numpy.ones(builtins.pythran.make_shape(n, 4)) """ def __init__(self): self.update = False super(TupleToShape, self).__init__(Aliases) def visit_Call(self, node): func_aliases = self.aliases.get(node.func, None) if func_aliases is not None: if func_aliases.issubset(patterns): if istuple(node.args[0]): self.update = True node.args[0] = toshape(node.args[0]) elif func_aliases.issubset(reshape_patterns): if len(node.args) > 2: self.update = True node.args[1:] = [toshape(ast.List(node.args[1:], ast.Load()))] return self.generic_visit(node) pythran-0.10.0+ds2/pythran/passmanager.py000066400000000000000000000172441416264035500203170ustar00rootroot00000000000000""" This module provides classes and functions for pass management. There are two kinds of passes: transformations and analysis. * ModuleAnalysis, FunctionAnalysis and NodeAnalysis are to be subclassed by any pass that collects information about the AST. * gather is used to gather (!) the result of an analyses on an AST node. * Backend is to be sub-classed by any pass that dumps the AST content. * dump is used to dump (!) the AST using the given backend. * Transformation is to be sub-classed by any pass that updates the AST. * apply is used to apply (sic) a transformation on an AST node. """ import gast as ast import os import re def uncamel(name): """Transform CamelCase naming convention into C-ish convention.""" s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() class AnalysisContext(object): """ Class that stores the hierarchy of node visited. Contains: * parent module * parent function """ def __init__(self): self.module = None self.function = None class ContextManager(object): """ Class to be inherited from to add automatic update of context. The optional analysis dependencies are listed in `dependencies'. """ def __init__(self, *dependencies): """ Create default context and save all dependencies. """ self.deps = dependencies self.verify_dependencies() super(ContextManager, self).__init__() def attach(self, pm, ctx=None): self.passmanager = pm self.ctx = ctx or AnalysisContext() def verify_dependencies(self): """ Checks no analysis are called before a transformation, as the transformation could invalidate the analysis. """ for i in range(1, len(self.deps)): assert(not (isinstance(self.deps[i], Transformation) and isinstance(self.deps[i - 1], Analysis)) ), "invalid dep order for %s" % self def visit(self, node): if isinstance(node, ast.FunctionDef): self.ctx.function = node for D in self.deps: if issubclass(D, FunctionAnalysis): # this should have already been computed as part of the run # method of function analysis triggered by prepare result = self.passmanager._cache[node, D] setattr(self, uncamel(D.__name__), result) return super(ContextManager, self).visit(node) def prepare(self, node): '''Gather analysis result required by this analysis''' if isinstance(node, ast.Module): self.ctx.module = node elif isinstance(node, ast.FunctionDef): self.ctx.function = node for D in self.deps: d = D() d.attach(self.passmanager, self.ctx) result = d.run(node) setattr(self, uncamel(D.__name__), result) def run(self, node): """Override this to add special pre or post processing handlers.""" self.prepare(node) return self.visit(node) def gather(self, analysis, node): a = analysis() a.attach(self.passmanager, self.ctx) return a.run(node) class Analysis(ContextManager, ast.NodeVisitor): """ A pass that does not change its content but gathers informations about it. """ def __init__(self, *dependencies): '''`dependencies' holds the type of all analysis required by this analysis. `self.result' must be set prior to calling this constructor.''' assert hasattr(self, "result"), ( "An analysis must have a result attribute when initialized") self.update = False ContextManager.__init__(self, *dependencies) def run(self, node): key = node, type(self) if key in self.passmanager._cache: self.result = self.passmanager._cache[key] else: super(Analysis, self).run(node) self.passmanager._cache[key] = self.result return self.result def display(self, data): print(data) def apply(self, node): self.display(self.run(node)) return False, node class ModuleAnalysis(Analysis): """An analysis that operates on a whole module.""" def run(self, node): if not isinstance(node, ast.Module): if self.ctx.module is None: raise ValueError("{} called in an uninitialized context" .format(type(self).__name__)) node = self.ctx.module return super(ModuleAnalysis, self).run(node) class FunctionAnalysis(Analysis): """An analysis that operates on a function.""" def run(self, node): if isinstance(node, ast.Module): self.ctx.module = node last = None for stmt in node.body: if isinstance(stmt, ast.FunctionDef): last = self.gather(type(self), stmt) # last is None if there's no function to process return self.result if last is None else last elif not isinstance(node, ast.FunctionDef): if self.ctx.function is None: raise ValueError("{} called in an uninitialized context" .format(type(self).__name__)) node = self.ctx.function return super(FunctionAnalysis, self).run(node) class NodeAnalysis(Analysis): """An analysis that operates on any node.""" class Backend(ModuleAnalysis): """A pass that produces code from an AST.""" class Transformation(ContextManager, ast.NodeTransformer): """A pass that updates its content.""" def __init__(self, *args, **kwargs): """ Initialize the update used to know if update happened. """ super(Transformation, self).__init__(*args, **kwargs) self.update = False def run(self, node): """ Apply transformation and dependencies and fix new node location.""" n = super(Transformation, self).run(node) # the transformation updated the AST, so analyse may need to be rerun # we could use a finer-grain caching system, and provide a way to flag # some analyses as `unmodified' by the transformation, as done in LLVM # (and PIPS ;-) if self.update: ast.fix_missing_locations(n) self.passmanager._cache.clear() return n def apply(self, node): """ Apply transformation and return if an update happened. """ new_node = self.run(node) return self.update, new_node class PassManager(object): ''' Front end to the pythran pass system. ''' def __init__(self, module_name, module_dir=None): self.module_name = module_name self.module_dir = module_dir or os.getcwd() self._cache = {} def gather(self, analysis, node): "High-level function to call an `analysis' on a `node'" assert issubclass(analysis, Analysis) a = analysis() a.attach(self) return a.run(node) def dump(self, backend, node): '''High-level function to call a `backend' on a `node' to generate code for module `module_name'.''' assert issubclass(backend, Backend) b = backend() b.attach(self) return b.run(node) def apply(self, transformation, node): ''' High-level function to call a `transformation' on a `node'. If the transformation is an analysis, the result of the analysis is displayed. ''' assert issubclass(transformation, (Transformation, Analysis)) a = transformation() a.attach(self) return a.apply(node) pythran-0.10.0+ds2/pythran/pythonic/000077500000000000000000000000001416264035500172715ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/__dispatch__/000077500000000000000000000000001416264035500216645ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/__dispatch__/clear.hpp000066400000000000000000000004641416264035500234670ustar00rootroot00000000000000#ifndef PYTHONIC_DISPATCH_CLEAR_HPP #define PYTHONIC_DISPATCH_CLEAR_HPP #include "pythonic/include/__dispatch__/clear.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace __dispatch__ { template auto clear(Any &&any) -> decltype(any.clear()); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/__dispatch__/conjugate.hpp000066400000000000000000000006711416264035500243600ustar00rootroot00000000000000#ifndef PYTHONIC_DISPATCH_CONJUGATE_HPP #define PYTHONIC_DISPATCH_CONJUGATE_HPP #include "pythonic/include/__dispatch__/conjugate.hpp" #include "pythonic/numpy/conjugate.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace __dispatch__ { template auto conjugate(Any const &any) -> decltype(numpy::functor::conjugate{}(any)) { return numpy::functor::conjugate{}(any); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/__dispatch__/copy.hpp000066400000000000000000000005211416264035500233450ustar00rootroot00000000000000#ifndef PYTHONIC_DISPATCH_COPY_HPP #define PYTHONIC_DISPATCH_COPY_HPP #include "pythonic/include/__dispatch__/copy.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace __dispatch__ { template auto copy(Any const &any) -> decltype(any.copy()) { return any.copy(); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/__dispatch__/count.hpp000066400000000000000000000006511416264035500235270ustar00rootroot00000000000000#ifndef PYTHONIC_DISPATCH_COUNT_HPP #define PYTHONIC_DISPATCH_COUNT_HPP #include "pythonic/include/__dispatch__/count.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace __dispatch__ { template auto count(Any &&any, Value &&value) -> decltype(any.count(std::forward(value))) { return any.count(std::forward(value)); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/__dispatch__/index.hpp000066400000000000000000000002561416264035500235070ustar00rootroot00000000000000#ifndef PYTHONIC_DISPATCH_INDEX_HPP #define PYTHONIC_DISPATCH_INDEX_HPP #include "pythonic/include/__dispatch__/index.hpp" #include "pythonic/operator_/indexOf.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/__dispatch__/pop.hpp000066400000000000000000000006421416264035500231750ustar00rootroot00000000000000#ifndef PYTHONIC_DISPATCH_POP_HPP #define PYTHONIC_DISPATCH_POP_HPP #include "pythonic/include/__dispatch__/pop.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace __dispatch__ { template auto pop(Any &&any, Arg0 &&... arg0) -> decltype(any.pop(std::forward(arg0)...)) { return any.pop(std::forward(arg0)...); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/__dispatch__/remove.hpp000066400000000000000000000005751416264035500237010ustar00rootroot00000000000000#ifndef PYTHONIC_DISPATCH_REMOVE_HPP #define PYTHONIC_DISPATCH_REMOVE_HPP #include "pythonic/include/__dispatch__/remove.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace __dispatch__ { template auto remove(Any &any, Arg0 const &arg0) -> decltype(any.remove(arg0)) { return any.remove(arg0); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/__dispatch__/sort.hpp000066400000000000000000000022211416264035500233610ustar00rootroot00000000000000#ifndef PYTHONIC_DISPATCH_SORT_HPP #define PYTHONIC_DISPATCH_SORT_HPP #include "pythonic/include/__dispatch__/sort.hpp" #include "pythonic/builtins/list/sort.hpp" #include "pythonic/numpy/sort.hpp" PYTHONIC_NS_BEGIN namespace __dispatch__ { template auto sort(types::list &l, Args &&... args) -> decltype(pythonic::builtins::list::sort(l, std::forward(args)...)) { return pythonic::builtins::list::sort(l, std::forward(args)...); } template auto sort(types::list &&l, Args &&... args) -> decltype(pythonic::builtins::list::sort(std::move(l), std::forward(args)...)) { return pythonic::builtins::list::sort(std::move(l), std::forward(args)...); } template types::none_type sort(Any &&any, Args &&... args) { return pythonic::numpy::ndarray::sort(std::forward(any), std::forward(args)...); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/__dispatch__/update.hpp000066400000000000000000000006651416264035500236660ustar00rootroot00000000000000#ifndef PYTHONIC_DISPATCH_UPDATE_HPP #define PYTHONIC_DISPATCH_UPDATE_HPP #include "pythonic/include/__dispatch__/update.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace __dispatch__ { template auto update(Any &&any, Arg0 &&... arg0) -> decltype(any.update(std::forward(arg0)...)) { return any.update(std::forward(arg0)...); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/bisect/000077500000000000000000000000001416264035500205425ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/bisect/bisect.hpp000066400000000000000000000015411416264035500225250ustar00rootroot00000000000000#ifndef PYTHONIC_BISECT_BISECT_HPP #define PYTHONIC_BISECT_BISECT_HPP #include "pythonic/include/bisect/bisect.hpp" #include "pythonic/builtins/ValueError.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace bisect { template long bisect(X const &x, A const &a, long lo, details::bisect_fun const &fun) { if (lo < 0) throw types::ValueError("lo must be non-negative"); return std::distance(x.begin(), fun(x.begin() + lo, x.end(), a)); } template long bisect(X const &x, A const &a, long lo, long hi, details::bisect_fun const &fun) { if (lo < 0) throw types::ValueError("lo must be non-negative"); return std::distance(x.begin(), fun(x.begin() + lo, x.begin() + hi, a)); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/bisect/bisect_left.hpp000066400000000000000000000012261416264035500235370ustar00rootroot00000000000000#ifndef PYTHONIC_BISECT_BISECTLEFT_HPP #define PYTHONIC_BISECT_BISECTLEFT_HPP #include "pythonic/include/bisect/bisect_left.hpp" #include "pythonic/bisect/bisect.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace bisect { template long bisect_left(X const &x, A const &a, long lo) { return bisect(x, a, lo, std::lower_bound); } template long bisect_left(X const &x, A const &a, long lo, long hi) { return bisect(x, a, lo, hi, std::lower_bound); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/bisect/bisect_right.hpp000066400000000000000000000010221416264035500237140ustar00rootroot00000000000000#ifndef PYTHONIC_BISECT_BISECTRIGHT_HPP #define PYTHONIC_BISECT_BISECTRIGHT_HPP #include "pythonic/include/bisect/bisect_right.hpp" #include "pythonic/bisect/bisect.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace bisect { template long bisect_right(X const &x, A const &a, long lo) { return bisect(x, a, lo); } template long bisect_right(X const &x, A const &a, long lo, long hi) { return bisect(x, a, lo, hi); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/000077500000000000000000000000001416264035500211225ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/builtins/ArithmeticError.hpp000066400000000000000000000004551416264035500247420ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_ARITHMETICERROR_HPP #define PYTHONIC_BUILTIN_ARITHMETICERROR_HPP #include "pythonic/include/builtins/ArithmeticError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(ArithmeticError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/AssertionError.hpp000066400000000000000000000004511416264035500246140ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_ASSERTIONERROR_HPP #define PYTHONIC_BUILTIN_ASSERTIONERROR_HPP #include "pythonic/include/builtins/AssertionError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(AssertionError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/AttributeError.hpp000066400000000000000000000004511416264035500246100ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_ATTRIBUTEERROR_HPP #define PYTHONIC_BUILTIN_ATTRIBUTEERROR_HPP #include "pythonic/include/builtins/AttributeError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(AttributeError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/BaseException.hpp000066400000000000000000000004451416264035500243670ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_BASEEXCEPTION_HPP #define PYTHONIC_BUILTIN_BASEEXCEPTION_HPP #include "pythonic/include/builtins/BaseException.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(BaseException) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/BufferError.hpp000066400000000000000000000004351416264035500240600ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_BUFFERERROR_HPP #define PYTHONIC_BUILTIN_BUFFERERROR_HPP #include "pythonic/include/builtins/BufferError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(BufferError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/BytesWarning.hpp000066400000000000000000000004411416264035500242460ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_BYTESWARNING_HPP #define PYTHONIC_BUILTIN_BYTESWARNING_HPP #include "pythonic/include/builtins/BytesWarning.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(BytesWarning) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/DeprecationWarning.hpp000066400000000000000000000004711416264035500254200ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_DEPRECATIONWARNING_HPP #define PYTHONIC_BUILTIN_DEPRECATIONWARNING_HPP #include "pythonic/include/builtins/DeprecationWarning.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(DeprecationWarning) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/EOFError.hpp000066400000000000000000000004211416264035500232530ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_EOFERROR_HPP #define PYTHONIC_BUILTIN_EOFERROR_HPP #include "pythonic/include/builtins/EOFError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(EOFError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/EnvironmentError.hpp000066400000000000000000000004611416264035500251520ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_ENVIRONMENTERROR_HPP #define PYTHONIC_BUILTIN_ENVIRONMENTERROR_HPP #include "pythonic/include/builtins/EnvironmentError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(EnvironmentError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/Exception.hpp000066400000000000000000000004251416264035500235720ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_EXCEPTION_HPP #define PYTHONIC_BUILTIN_EXCEPTION_HPP #include "pythonic/include/builtins/Exception.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(Exception) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/False.hpp000066400000000000000000000001761416264035500226710ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_FALSE_HPP #define PYTHONIC_BUILTIN_FALSE_HPP #include "pythonic/include/builtins/False.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/FileNotFoundError.hpp000066400000000000000000000004651416264035500252060ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_FILENOTFOUNDERROR_HPP #define PYTHONIC_BUILTIN_FILENOTFOUNDERROR_HPP #include "pythonic/include/builtins/FileNotFoundError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(FileNotFoundError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/FloatingPointError.hpp000066400000000000000000000004711416264035500254240ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_FLOATINGPOINTERROR_HPP #define PYTHONIC_BUILTIN_FLOATINGPOINTERROR_HPP #include "pythonic/include/builtins/FloatingPointError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(FloatingPointError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/FutureWarning.hpp000066400000000000000000000004451416264035500244360ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_FUTUREWARNING_HPP #define PYTHONIC_BUILTIN_FUTUREWARNING_HPP #include "pythonic/include/builtins/FutureWarning.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(FutureWarning) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/GeneratorExit.hpp000066400000000000000000000004451416264035500244160ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_GENERATOREXIT_HPP #define PYTHONIC_BUILTIN_GENERATOREXIT_HPP #include "pythonic/include/builtins/GeneratorExit.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(GeneratorExit) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/IOError.hpp000066400000000000000000000004151416264035500231540ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_IOERROR_HPP #define PYTHONIC_BUILTIN_IOERROR_HPP #include "pythonic/include/builtins/IOError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(IOError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/ImportError.hpp000066400000000000000000000004351416264035500241210ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_IMPORTERROR_HPP #define PYTHONIC_BUILTIN_IMPORTERROR_HPP #include "pythonic/include/builtins/ImportError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(ImportError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/ImportWarning.hpp000066400000000000000000000004451416264035500244360ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_IMPORTWARNING_HPP #define PYTHONIC_BUILTIN_IMPORTWARNING_HPP #include "pythonic/include/builtins/ImportWarning.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(ImportWarning) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/IndentationError.hpp000066400000000000000000000004611416264035500251220ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_INDENTATIONERROR_HPP #define PYTHONIC_BUILTIN_INDENTATIONERROR_HPP #include "pythonic/include/builtins/IndentationError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(IndentationError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/IndexError.hpp000066400000000000000000000004311416264035500237120ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_INDEXERROR_HPP #define PYTHONIC_BUILTIN_INDEXERROR_HPP #include "pythonic/include/builtins/IndexError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(IndexError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/KeyError.hpp000066400000000000000000000004211416264035500233720ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_KEYERROR_HPP #define PYTHONIC_BUILTIN_KEYERROR_HPP #include "pythonic/include/builtins/KeyError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(KeyError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/KeyboardInterrupt.hpp000066400000000000000000000004651416264035500253150ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_KEYBOARDINTERRUPT_HPP #define PYTHONIC_BUILTIN_KEYBOARDINTERRUPT_HPP #include "pythonic/include/builtins/KeyboardInterrupt.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(KeyboardInterrupt) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/LookupError.hpp000066400000000000000000000004351416264035500241200ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_LOOKUPERROR_HPP #define PYTHONIC_BUILTIN_LOOKUPERROR_HPP #include "pythonic/include/builtins/LookupError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(LookupError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/MemoryError.hpp000066400000000000000000000004351416264035500241170ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_MEMORYERROR_HPP #define PYTHONIC_BUILTIN_MEMORYERROR_HPP #include "pythonic/include/builtins/MemoryError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(MemoryError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/NameError.hpp000066400000000000000000000004251416264035500235260ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_NAMEERROR_HPP #define PYTHONIC_BUILTIN_NAMEERROR_HPP #include "pythonic/include/builtins/NameError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(NameError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/None.hpp000066400000000000000000000001731416264035500225330ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_NONE_HPP #define PYTHONIC_BUILTIN_NONE_HPP #include "pythonic/include/builtins/None.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/NotImplementedError.hpp000066400000000000000000000004751416264035500255770ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_NOTIMPLEMENTEDERROR_HPP #define PYTHONIC_BUILTIN_NOTIMPLEMENTEDERROR_HPP #include "pythonic/include/builtins/NotImplementedError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(NotImplementedError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/OSError.hpp000066400000000000000000000004151416264035500231660ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_OSERROR_HPP #define PYTHONIC_BUILTIN_OSERROR_HPP #include "pythonic/include/builtins/OSError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(OSError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/OverflowError.hpp000066400000000000000000000004451416264035500244530ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_OVERFLOWERROR_HPP #define PYTHONIC_BUILTIN_OVERFLOWERROR_HPP #include "pythonic/include/builtins/OverflowError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(OverflowError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/PendingDeprecationWarning.hpp000066400000000000000000000005251416264035500267250ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_PENDINGDEPRECATIONWARNING_HPP #define PYTHONIC_BUILTIN_PENDINGDEPRECATIONWARNING_HPP #include "pythonic/include/builtins/PendingDeprecationWarning.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(PendingDeprecationWarning) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/ReferenceError.hpp000066400000000000000000000004511416264035500245430ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_REFERENCEERROR_HPP #define PYTHONIC_BUILTIN_REFERENCEERROR_HPP #include "pythonic/include/builtins/ReferenceError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(ReferenceError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/RuntimeError.hpp000066400000000000000000000004411416264035500242670ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_RUNTIMEERROR_HPP #define PYTHONIC_BUILTIN_RUNTIMEERROR_HPP #include "pythonic/include/builtins/RuntimeError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(RuntimeError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/RuntimeWarning.hpp000066400000000000000000000004511416264035500246040ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_RUNTIMEWARNING_HPP #define PYTHONIC_BUILTIN_RUNTIMEWARNING_HPP #include "pythonic/include/builtins/RuntimeWarning.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(RuntimeWarning) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/StopIteration.hpp000066400000000000000000000004451416264035500244420ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_STOPITERATION_HPP #define PYTHONIC_BUILTIN_STOPITERATION_HPP #include "pythonic/include/builtins/StopIteration.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(StopIteration) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/SyntaxError.hpp000066400000000000000000000004351416264035500241350ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_SYNTAXERROR_HPP #define PYTHONIC_BUILTIN_SYNTAXERROR_HPP #include "pythonic/include/builtins/SyntaxError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(SyntaxError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/SyntaxWarning.hpp000066400000000000000000000004451416264035500244520ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_SYNTAXWARNING_HPP #define PYTHONIC_BUILTIN_SYNTAXWARNING_HPP #include "pythonic/include/builtins/SyntaxWarning.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(SyntaxWarning) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/SystemError.hpp000066400000000000000000000004351416264035500241330ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_SYSTEMERROR_HPP #define PYTHONIC_BUILTIN_SYSTEMERROR_HPP #include "pythonic/include/builtins/SystemError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(SystemError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/SystemExit.hpp000066400000000000000000000004311416264035500237470ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_SYSTEMEXIT_HPP #define PYTHONIC_BUILTIN_SYSTEMEXIT_HPP #include "pythonic/include/builtins/SystemExit.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(SystemExit) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/TabError.hpp000066400000000000000000000004211416264035500233500ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_TABERROR_HPP #define PYTHONIC_BUILTIN_TABERROR_HPP #include "pythonic/include/builtins/TabError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(TabError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/True.hpp000066400000000000000000000001731416264035500225530ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_TRUE_HPP #define PYTHONIC_BUILTIN_TRUE_HPP #include "pythonic/include/builtins/True.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/TypeError.hpp000066400000000000000000000004251416264035500235670ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_TYPEERROR_HPP #define PYTHONIC_BUILTIN_TYPEERROR_HPP #include "pythonic/include/builtins/TypeError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(TypeError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/UnboundLocalError.hpp000066400000000000000000000004651416264035500252370ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_UNBOUNDLOCALERROR_HPP #define PYTHONIC_BUILTIN_UNBOUNDLOCALERROR_HPP #include "pythonic/include/builtins/UnboundLocalError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(UnboundLocalError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/UnicodeError.hpp000066400000000000000000000004411416264035500242320ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_UNICODEERROR_HPP #define PYTHONIC_BUILTIN_UNICODEERROR_HPP #include "pythonic/include/builtins/UnicodeError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(UnicodeError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/UnicodeWarning.hpp000066400000000000000000000004511416264035500245470ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_UNICODEWARNING_HPP #define PYTHONIC_BUILTIN_UNICODEWARNING_HPP #include "pythonic/include/builtins/UnicodeWarning.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(UnicodeWarning) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/UserWarning.hpp000066400000000000000000000004351416264035500241010ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_USERWARNING_HPP #define PYTHONIC_BUILTIN_USERWARNING_HPP #include "pythonic/include/builtins/UserWarning.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(UserWarning) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/ValueError.hpp000066400000000000000000000004311416264035500237170ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_VALUEERROR_HPP #define PYTHONIC_BUILTIN_VALUEERROR_HPP #include "pythonic/include/builtins/ValueError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(ValueError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/Warning.hpp000066400000000000000000000004151416264035500232400ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_WARNING_HPP #define PYTHONIC_BUILTIN_WARNING_HPP #include "pythonic/include/builtins/Warning.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(Warning) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/ZeroDivisionError.hpp000066400000000000000000000004651416264035500252760ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_ZERODIVISIONERROR_HPP #define PYTHONIC_BUILTIN_ZERODIVISIONERROR_HPP #include "pythonic/include/builtins/ZeroDivisionError.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_IMPL(ZeroDivisionError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/abs.hpp000066400000000000000000000002321416264035500223750ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_ABS_HPP #define PYTHONIC_BUILTIN_ABS_HPP #include "pythonic/include/builtins/abs.hpp" #include "pythonic/numpy/abs.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/all.hpp000066400000000000000000000006371416264035500224110ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_ALL_HPP #define PYTHONIC_BUILTIN_ALL_HPP #include "pythonic/utils/functor.hpp" #include "pythonic/include/builtins/all.hpp" PYTHONIC_NS_BEGIN namespace builtins { template bool all(Iterable &&s) { auto iend = s.end(); for (auto iter = s.begin(); iter != iend; ++iter) if (!*iter) return false; return true; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/any.hpp000066400000000000000000000006361416264035500224270ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_ANY_HPP #define PYTHONIC_BUILTIN_ANY_HPP #include "pythonic/include/builtins/any.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { template bool any(Iterable &&s) { auto iend = s.end(); for (auto iter = s.begin(); iter != iend; ++iter) if (*iter) return true; return false; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/assert.hpp000066400000000000000000000007471416264035500231440ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_ASSERT_HPP #define PYTHONIC_BUILTIN_ASSERT_HPP #include "pythonic/include/builtins/assert.hpp" #include "pythonic/builtins/AssertionError.hpp" #include "pythonic/types/str.hpp" PYTHONIC_NS_BEGIN void pythran_assert(bool cond) { #ifndef NDEBUG if (!cond) throw types::AssertionError(); #endif } void pythran_assert(bool cond, types::str const &what) { #ifndef NDEBUG if (!cond) throw types::AssertionError(what); #endif } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/bin.hpp000066400000000000000000000027471416264035500224150ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_BIN_HPP #define PYTHONIC_BUILTIN_BIN_HPP #include "pythonic/include/builtins/bin.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" #include #include #include PYTHONIC_NS_BEGIN namespace builtins { template typename std::enable_if::value, types::str>::type bin(T const &v) { using UT = typename std::make_unsigned::type; if (v < T{0}) if (v == std::numeric_limits::min()) { // In this special case, calling -v would overflow so // a special case is needed. types::str res; auto &backend = res.chars(); backend.resize(8 * sizeof(T) + 3); auto it = backend.begin(); *it++ = '-'; *it++ = '0'; *it++ = 'b'; *it++ = '1'; std::fill(it, backend.end(), '0'); return res; } else return "-" + bin(-v); else if (v == T{0}) return "0b0"; else { // Due to rounding errors, we cannot use std::log2(v) // to accuratly find length. size_t len = (8 * sizeof(UT)) - 1; UT i{UT{1} << len}; while (!(i & v)) { i >>= 1; len--; } types::str res; res.reserve(2 + len); auto &backend = res.chars(); backend.append("0b"); for (; i; i >>= 1) if (v & i) backend.append("1"); else backend.append("0"); return res; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/bool_.hpp000066400000000000000000000013331416264035500227250ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_BOOL_HPP #define PYTHONIC_BUILTIN_BOOL_HPP #include "pythonic/include/builtins/bool_.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/tuple.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace functor { template bool bool_::operator()(T const &val) const { return static_cast(val); } template bool bool_::operator()(std::tuple const &val) const { return sizeof...(Ts); } template bool bool_::operator()(types::array const &val) const { return N; } bool bool_::operator()() const { return false; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/chr.hpp000066400000000000000000000005261416264035500224120ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_CHR_HPP #define PYTHONIC_BUILTIN_CHR_HPP #include "pythonic/include/builtins/chr.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { template types::str chr(T const &v) { return types::str((char)v); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/complex.hpp000066400000000000000000000006161416264035500233050ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_COMPLEX_HPP #define PYTHONIC_BUILTIN_COMPLEX_HPP #include "pythonic/include/builtins/complex.hpp" #include "pythonic/types/complex.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace functor { complex::type complex::operator()(double v0, double v1) const { return {v0, v1}; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/complex/000077500000000000000000000000001416264035500225715ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/builtins/complex/conjugate.hpp000066400000000000000000000003121416264035500252550ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_COMPLEX_CONJUGATE_HPP #define PYTHONIC_BUILTIN_COMPLEX_CONJUGATE_HPP #include "pythonic/include/builtins/complex/conjugate.hpp" #include "pythonic/numpy/conjugate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/dict.hpp000066400000000000000000000021051416264035500225540ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_DICT_HPP #define PYTHONIC_BUILTIN_DICT_HPP #include "pythonic/include/builtins/dict.hpp" #include "pythonic/types/dict.hpp" #include "pythonic/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace builtins { namespace anonymous { types::empty_dict dict() { return types::empty_dict(); } template types::dict dict(types::dict const &other) { return other.copy(); } template auto dict(Iterable &&iterable) -> types::dict< typename std::decay(*iterable.begin()))>::type, typename std::decay(*iterable.begin()))>::type> { types::dict< typename std::decay(*iterable.begin()))>::type, typename std::decay(*iterable.begin()))>::type> out = types::empty_dict(); for (auto const &i : iterable) out[std::get<0>(i)] = std::get<1>(i); return out; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/dict/000077500000000000000000000000001416264035500220455ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/builtins/dict/clear.hpp000066400000000000000000000002701416264035500236430ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_DICT_CLEAR_HPP #define PYTHONIC_BUILTIN_DICT_CLEAR_HPP #include "pythonic/include/builtins/dict/clear.hpp" #include "pythonic/__dispatch__/clear.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/dict/copy.hpp000066400000000000000000000002641416264035500235320ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_DICT_COPY_HPP #define PYTHONIC_BUILTIN_DICT_COPY_HPP #include "pythonic/include/builtins/dict/copy.hpp" #include "pythonic/__dispatch__/copy.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/dict/fromkeys.hpp000066400000000000000000000013501416264035500244140ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_DICT_FROMKEYS_HPP #define PYTHONIC_BUILTIN_DICT_FROMKEYS_HPP #include "pythonic/include/builtins/dict/fromkeys.hpp" #include "pythonic/types/dict.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { namespace dict { template types::dict::type::value_type, V> fromkeys(Iterable &&iter, V const &v) { types::dict::type::value_type, V> D = types::empty_dict(); // Allocate default capacity to dict for (auto const &i : iter) D[i] = v; return D; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/dict/get.hpp000066400000000000000000000015121416264035500233340ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_DICT_GET_HPP #define PYTHONIC_BUILTIN_DICT_GET_HPP #include "pythonic/include/builtins/dict/get.hpp" #include "pythonic/types/dict.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace dict { template typename __combined::type get(types::dict const &d, W const &k, X const &default_) { return d.get(k, default_); } template types::none get(types::dict const &d, W const &k) { return d.get(k); } template X get(types::empty_dict const &, W const &, X const &default_) { return default_; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/dict/items.hpp000066400000000000000000000010041416264035500236720ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_DICT_ITEMS_HPP #define PYTHONIC_BUILTIN_DICT_ITEMS_HPP #include "pythonic/include/builtins/dict/items.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/dict.hpp" #include "pythonic/include/types/list.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { namespace dict { template auto items(D &&d) -> decltype(std::forward(d).items()) { return std::forward(d).items(); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/dict/keys.hpp000066400000000000000000000010761416264035500235350ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_DICT_KEYS_HPP #define PYTHONIC_BUILTIN_DICT_KEYS_HPP #include "pythonic/include/builtins/dict/keys.hpp" #include "pythonic/types/dict.hpp" #include "pythonic/types/list.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace dict { // We need a copy here for lvalue like : // for i in {"a": "b", "c": "d"}.keys(): // pass template auto keys(D &&d) -> decltype(std::forward(d).keys()) { return std::forward(d).keys(); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/dict/pop.hpp000066400000000000000000000002601416264035500233520ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_DICT_POP_HPP #define PYTHONIC_BUILTIN_DICT_POP_HPP #include "pythonic/include/builtins/dict/pop.hpp" #include "pythonic/__dispatch__/pop.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/dict/popitem.hpp000066400000000000000000000007251416264035500242370ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_DICT_POPITEM_HPP #define PYTHONIC_BUILTIN_DICT_POPITEM_HPP #include "pythonic/include/builtins/dict/popitem.hpp" #include "pythonic/types/dict.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { namespace dict { template auto popitem(D &&d) -> decltype(std::forward(d).popitem()) { return std::forward(d).popitem(); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/dict/setdefault.hpp000066400000000000000000000016621416264035500247230ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_DICT_SETDEFAULT_HPP #define PYTHONIC_BUILTIN_DICT_SETDEFAULT_HPP #include "pythonic/include/builtins/dict/setdefault.hpp" #include "pythonic/types/dict.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace dict { template V &setdefault(types::dict &d, W const &k, X const &default_) { return d.setdefault(k, default_); } template types::none setdefault(types::dict &d, W const &k) { return d.get(k); } template V setdefault(types::dict &&d, W const &k, X const &default_) { return d.setdefault(k, default_); } template types::none setdefault(types::dict &&d, W const &k) { return d.get(k); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/dict/update.hpp000066400000000000000000000002741416264035500240430ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_DICT_UPDATE_HPP #define PYTHONIC_BUILTIN_DICT_UPDATE_HPP #include "pythonic/include/builtins/dict/update.hpp" #include "pythonic/__dispatch__/update.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/dict/values.hpp000066400000000000000000000006751416264035500240650ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_DICT_VALUES_HPP #define PYTHONIC_BUILTIN_DICT_VALUES_HPP #include "pythonic/include/builtins/dict/values.hpp" #include "pythonic/types/dict.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace dict { template auto values(D &&d) -> decltype(std::forward(d).values()) { return std::forward(d).values(); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/divmod.hpp000066400000000000000000000007361416264035500231230ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_DIVMOD_HPP #define PYTHONIC_BUILTIN_DIVMOD_HPP #include "pythonic/include/builtins/divmod.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { template auto divmod(T0 const &t0, T1 const &t1) // other types are left over -> decltype(types::make_tuple(t0 / t1, t0 % t1)) { return types::make_tuple(t0 / t1, t0 % t1); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/enumerate.hpp000066400000000000000000000053571416264035500236320ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_ENUMERATE_HPP #define PYTHONIC_BUILTIN_ENUMERATE_HPP #include "pythonic/include/builtins/enumerate.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { namespace details { /// enumerate_iterator implementation template enumerate_iterator::enumerate_iterator() { } template enumerate_iterator::enumerate_iterator(Iterator const &iter, long first) : value(first), iter(iter) { } template enumerate_iterator &enumerate_iterator:: operator+=(long n) { value += n, iter += n; return *this; } // Comparison operators can't use value as end() doesn't have a valid // value content // du to the lake of size information for generator // TODO : We could handle case with && without size if there is a // performances benefits template bool enumerate_iterator:: operator!=(enumerate_iterator const &other) const { return !(*this == other); } template bool enumerate_iterator:: operator<(enumerate_iterator const &other) const { return iter < other.iter; } template bool enumerate_iterator:: operator==(enumerate_iterator const &other) const { return iter == other.iter; } template long enumerate_iterator:: operator-(enumerate_iterator const &other) const { return iter - other.iter; } /// details::enumerate implementation template enumerate::enumerate() { } template enumerate::enumerate(Iterable seq, long first) : Iterable(seq), iterator(Iterable::begin(), first), end_iter(Iterable::end(), -1) { } template typename enumerate::iterator &enumerate::begin() { return *this; } template typename enumerate::iterator const & enumerate::begin() const { return *this; } template typename enumerate::iterator enumerate::end() const { return end_iter; } } /// enumerate implementation template details::enumerate::type>::type> enumerate(Iterable &&seq, long first) { return {std::forward(seq), first}; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/file.hpp000066400000000000000000000006731416264035500225600ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_FILE_HPP #define PYTHONIC_BUILTIN_FILE_HPP #include "pythonic/include/builtins/file.hpp" #include "pythonic/types/file.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace anonymous { types::file file(types::str const &filename, types::str const &strmode) { return {filename, strmode}; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/file/000077500000000000000000000000001416264035500220415ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/builtins/file/close.hpp000066400000000000000000000006461416264035500236650ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_FILE_CLOSE_HPP #define PYTHONIC_BUILTIN_FILE_CLOSE_HPP #include "pythonic/include/builtins/file/close.hpp" #include "pythonic/types/file.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { void close(types::file &f) { f.close(); } void close(types::file &&f) { f.close(); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/file/fileno.hpp000066400000000000000000000005721416264035500240320ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_FILE_FILENO_HPP #define PYTHONIC_BUILTIN_FILE_FILENO_HPP #include "pythonic/include/builtins/file/fileno.hpp" #include "pythonic/types/file.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { long fileno(types::file const &f) { return f.fileno(); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/file/flush.hpp000066400000000000000000000006461416264035500237010ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_FILE_FLUSH_HPP #define PYTHONIC_BUILTIN_FILE_FLUSH_HPP #include "pythonic/include/builtins/file/flush.hpp" #include "pythonic/types/file.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { void flush(types::file &f) { f.flush(); } void flush(types::file &&f) { f.flush(); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/file/isatty.hpp000066400000000000000000000005721416264035500240730ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_FILE_ISATTY_HPP #define PYTHONIC_BUILTIN_FILE_ISATTY_HPP #include "pythonic/include/builtins/file/isatty.hpp" #include "pythonic/types/file.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { bool isatty(types::file const &f) { return f.isatty(); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/file/next.hpp000066400000000000000000000002641416264035500235320ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_FILE_NEXT_HPP #define PYTHONIC_BUILTIN_FILE_NEXT_HPP #include "pythonic/include/builtins/file/next.hpp" #include "pythonic/__dispatch__/next.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/file/read.hpp000066400000000000000000000007701416264035500234710ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_FILE_READ_HPP #define PYTHONIC_BUILTIN_FILE_READ_HPP #include "pythonic/include/builtins/file/read.hpp" #include "pythonic/types/file.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { types::str read(types::file &f, long size) { return f.read(size); } types::str read(types::file &&f, long size) { return f.read(size); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/file/readline.hpp000066400000000000000000000011111416264035500243270ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_FILE_READLINE_HPP #define PYTHONIC_BUILTIN_FILE_READLINE_HPP #include "pythonic/include/builtins/file/readline.hpp" #include "pythonic/types/file.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { types::str readline(types::file &f, long size) { return size < 0 ? f.readline() : f.readline(size); } types::str readline(types::file &&f, long size) { return size < 0 ? f.readline() : f.readline(size); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/file/readlines.hpp000066400000000000000000000011551416264035500245220ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_FILE_READLINES_HPP #define PYTHONIC_BUILTIN_FILE_READLINES_HPP #include "pythonic/include/builtins/file/readlines.hpp" #include "pythonic/types/file.hpp" #include "pythonic/types/list.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { template types::list readlines(F &&f) { return f.readlines(); } template types::list readlines(F &&f, long sizehint) { return f.readlines(sizehint); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/file/seek.hpp000066400000000000000000000012761416264035500235070ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_FILE_SEEK_HPP #define PYTHONIC_BUILTIN_FILE_SEEK_HPP #include "pythonic/include/builtins/file/seek.hpp" #include "pythonic/types/file.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { void seek(types::file &f, long offset) { f.seek(offset); } void seek(types::file &&f, long offset) { // Nothing have to be done as it is a lvalue } void seek(types::file &f, long offset, long whence) { f.seek(offset, whence); } void seek(types::file &&f, long offset, long whence) { // Nothing have to be done as it is a lvalue } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/file/tell.hpp000066400000000000000000000005601416264035500235130ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_FILE_TELL_HPP #define PYTHONIC_BUILTIN_FILE_TELL_HPP #include "pythonic/include/builtins/file/tell.hpp" #include "pythonic/types/file.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { long tell(types::file const &f) { return f.tell(); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/file/truncate.hpp000066400000000000000000000011401416264035500243730ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_FILE_TRUNCATE_HPP #define PYTHONIC_BUILTIN_FILE_TRUNCATE_HPP #include "pythonic/include/builtins/file/truncate.hpp" #include "pythonic/types/file.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { void truncate(types::file &f) { f.truncate(); } void truncate(types::file &&f) { f.truncate(); } void truncate(types::file &f, long size) { f.truncate(size); } void truncate(types::file &&f, long size) { f.truncate(size); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/file/write.hpp000066400000000000000000000010121416264035500236760ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_FILE_WRITE_HPP #define PYTHONIC_BUILTIN_FILE_WRITE_HPP #include "pythonic/include/builtins/file/write.hpp" #include "pythonic/types/file.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { long write(types::file &f, types::str const &str) { return f.write(str); } long write(types::file &&f, types::str const &str) { return f.write(str); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/file/writelines.hpp000066400000000000000000000006631416264035500247440ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_FILE_WRITELINES_HPP #define PYTHONIC_BUILTIN_FILE_WRITELINES_HPP #include "pythonic/include/builtins/file/writelines.hpp" #include "pythonic/types/file.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { template void writelines(F &&f, T const &sequence) { f.writelines(sequence); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/filter.hpp000066400000000000000000000070201416264035500231170ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_FILTER_HPP #define PYTHONIC_BUILTIN_FILTER_HPP #include "pythonic/include/builtins/filter.hpp" #include "pythonic/utils/iterator.hpp" #include "pythonic/itertools/common.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace details { template bool filter_iterator::test_filter(std::false_type) { return op(*iter); } template bool filter_iterator::test_filter(std::true_type) { return *iter; } template filter_iterator::filter_iterator(Operator _op, List0 &_seq) : op(_op), iter(_seq.begin()), iter_end(_seq.end()) { if (!test_filter(std::is_same())) next_value(); } template filter_iterator::filter_iterator(itertools::npos, Operator _op, List0 &_seq) : op(_op), iter(_seq.end()), iter_end(_seq.end()) { } template typename List0::value_type filter_iterator:: operator*() const { return *iter; } template filter_iterator &filter_iterator:: operator++() { next_value(); return *this; } template void filter_iterator::next_value() { while (++iter != iter_end) { if (test_filter(std::is_same())) return; } } template bool filter_iterator:: operator==(filter_iterator const &other) const { return !(iter != other.iter); } template bool filter_iterator:: operator!=(filter_iterator const &other) const { return iter != other.iter; } template bool filter_iterator:: operator<(filter_iterator const &other) const { return iter != other.iter; } template filter::filter(Operator _op, List0 const &_seq) : utils::iterator_reminder(_seq), iterator(_op, this->values), end_iter(itertools::npos(), _op, this->values) { } template typename filter::iterator &filter::begin() { return *this; } template typename filter::iterator const & filter::begin() const { return *this; } template typename filter::iterator const & filter::end() const { return end_iter; } } template details::filter::type>::type, typename std::remove_cv< typename std::remove_reference::type>::type> filter(Operator &&_op, List0 &&_seq) { return {std::forward(_op), std::forward(_seq)}; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/float_.hpp000066400000000000000000000007071416264035500231030ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_FLOAT_HPP #define PYTHONIC_BUILTIN_FLOAT_HPP #include "pythonic/include/builtins/float_.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace functor { template float_::type float_::operator()(T &&t) const { return static_cast(t); } float_::type float_::operator()() const { return 0.; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/float_/000077500000000000000000000000001416264035500223665ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/builtins/float_/is_integer.hpp000066400000000000000000000005721416264035500252330ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_FLOAT_ISINTEGER_HPP #define PYTHONIC_BUILTIN_FLOAT_ISINTEGER_HPP #include "pythonic/include/builtins/float_/is_integer.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { namespace float_ { bool is_integer(double d) { return std::trunc(d) == d; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/getattr.hpp000066400000000000000000000002041416264035500233010ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_GETATTR_HPP #define PYTHONIC_BUILTIN_GETATTR_HPP #include "pythonic/include/builtins/getattr.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/hex.hpp000066400000000000000000000006371416264035500224250ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_HEX_HPP #define PYTHONIC_BUILTIN_HEX_HPP #include "pythonic/include/builtins/hex.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/str.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { template types::str hex(T const &v) { std::ostringstream oss; oss << "0x" << std::hex << v; return oss.str(); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/id.hpp000066400000000000000000000013621416264035500222310ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_ID_HPP #define PYTHONIC_BUILTIN_ID_HPP #include "pythonic/include/builtins/id.hpp" #include "pythonic/utils/functor.hpp" /* * We use uintptr_t conversion because on windows 64 bits, sizeof(void*) == 8 * && sizeof(long) == 4. Because of this, void* to long is forbidden but * void* -> uintptr_t -> long is allowed * Accuracy is lost this way... */ PYTHONIC_NS_BEGIN namespace builtins { template long id(T const &t) { return t.id(); } long id(long const &t) { return reinterpret_cast(&t); } long id(double const &t) { return reinterpret_cast(&t); } long id(bool const &t) { return reinterpret_cast(&t); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/in.hpp000066400000000000000000000016601416264035500222440ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_IN_HPP #define PYTHONIC_BUILTIN_IN_HPP #include "pythonic/include/builtins/in.hpp" #include "pythonic/types/traits.hpp" #include PYTHONIC_NS_BEGIN namespace details { template struct in { template bool operator()(T &&t, V const &v) const; }; template <> template bool in::operator()(T &&t, V const &v) const { return std::find(t.begin(), t.end(), v) != t.end(); } template <> template bool in::operator()(T &&t, V const &v) const { return t.contains(v); } } template bool in(T &&t, V const &v) { using RT = typename std::remove_cv::type>::type; static bool constexpr has_contains = types::has_contains::value; return details::in()(std::forward(t), v); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/int_.hpp000066400000000000000000000015171416264035500225700ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_INT_HPP #define PYTHONIC_BUILTIN_INT_HPP #include "pythonic/include/builtins/int_.hpp" #include "pythonic/types/str.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { namespace functor { int_::type int_::operator()(char const t[], long base) const { return std::strtol(t, nullptr, base); } int_::type int_::operator()(types::str const &t, long base) const { return (*this)(t.c_str(), base); } int_::type int_::operator()(types::chr const &t, long base) const { char tmp[2] = {t.c, 0}; return (*this)(&tmp[0], base); } template int_::type int_::operator()(T &&t) const { return static_cast(t); } int_::type int_::operator()() const { return 0L; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/isinstance.hpp000066400000000000000000000003061416264035500237720ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_ISINSTANCE_HPP #define PYTHONIC_BUILTIN_ISINSTANCE_HPP #include "pythonic/include/builtins/isinstance.hpp" #include "pythonic/include/builtins/pythran/is_none.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/iter.hpp000066400000000000000000000021421416264035500225750ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_ITER_HPP #define PYTHONIC_BUILTIN_ITER_HPP #include "pythonic/include/builtins/iter.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace details { /// details iter implementation template iter::iter() { } // FIXME : There is a dangling reference as data.begin() is ! the one // from data "saved" in the "iter" struct template iter::iter(T data) : iterator(data.begin()), _end(data.end()), data(data) { } template typename iter::iterator &iter::begin() { return *this; } template typename iter::iterator const &iter::begin() const { return *this; } template typename iter::iterator const &iter::end() const { return _end; } } /// iter implementation template details::iter< typename std::remove_cv::type>::type> iter(T &&t) { return {std::forward(t)}; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/len.hpp000066400000000000000000000010221416264035500224040ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_LEN_HPP #define PYTHONIC_BUILTIN_LEN_HPP #include "pythonic/include/builtins/len.hpp" #include "pythonic/types/traits.hpp" #include "pythonic/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace builtins { template long len(std::tuple const &) { return sizeof...(Types); } template typename std::enable_if::value, long>::type len(T const &t) { return t.size(); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/list.hpp000066400000000000000000000017671416264035500226210ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_LIST_HPP #define PYTHONIC_BUILTIN_LIST_HPP #include "pythonic/include/builtins/list.hpp" #include "pythonic/types/list.hpp" #include "pythonic/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace builtins { namespace anonymous { inline types::empty_list list() { return types::empty_list(); } inline types::empty_list list(types::empty_list) { return types::empty_list(); } template types::list::type::iterator>::value_type>:: type> list(Iterable &&t) { return types::list::type::iterator>::value_type>::type>(t.begin(), t.end()); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/list/000077500000000000000000000000001416264035500220755ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/builtins/list/append.hpp000066400000000000000000000013411416264035500240540ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_LIST_APPEND_HPP #define PYTHONIC_BUILTIN_LIST_APPEND_HPP #include "pythonic/include/builtins/list/append.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/types/list.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace list { template types::none_type append(types::list &seq, F &&value) { seq.push_back(std::forward(value)); return builtins::None; } template types::none_type append(types::list &&seq, F &&value) { seq.push_back(std::forward(value)); return builtins::None; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/list/count.hpp000066400000000000000000000002701416264035500237350ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_LIST_COUNT_HPP #define PYTHONIC_BUILTIN_LIST_COUNT_HPP #include "pythonic/include/builtins/list/count.hpp" #include "pythonic/__dispatch__/count.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/list/extend.hpp000066400000000000000000000015771416264035500241070ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_LIST_EXTEND_HPP #define PYTHONIC_BUILTIN_LIST_EXTEND_HPP #include "pythonic/include/builtins/list/extend.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/types/list.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace list { template typename std::enable_if< !std::is_same::type, types::empty_list>::value, types::none_type>::type extend(T0 &&seq, T1 const &add) { std::forward(seq) += add; return {}; } template typename std::enable_if< std::is_same::type, types::empty_list>::value, types::none_type>::type extend(T0 &&seq, T1 const &add) { return {}; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/list/insert.hpp000066400000000000000000000014521416264035500241140ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_LIST_INSERT_HPP #define PYTHONIC_BUILTIN_LIST_INSERT_HPP #include "pythonic/include/builtins/list/insert.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/types/list.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace list { // TODO : range_analysis may be use to have a "fast insert" function. template types::none_type insert(types::list &seq, long n, F &&value) { n = n % (1 + seq.size()); // +1 because we want to be able to insert at // the end of seq if (n < 0) n += seq.size(); seq.insert(n, std::forward(value)); return builtins::None; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/list/pop.hpp000066400000000000000000000002601416264035500234020ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_LIST_POP_HPP #define PYTHONIC_BUILTIN_LIST_POP_HPP #include "pythonic/include/builtins/list/pop.hpp" #include "pythonic/__dispatch__/pop.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/list/remove.hpp000066400000000000000000000002741416264035500241060ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_LIST_REMOVE_HPP #define PYTHONIC_BUILTIN_LIST_REMOVE_HPP #include "pythonic/include/builtins/list/remove.hpp" #include "pythonic/__dispatch__/remove.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/list/reverse.hpp000066400000000000000000000010351416264035500242600ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_LIST_REVERSE_HPP #define PYTHONIC_BUILTIN_LIST_REVERSE_HPP #include "pythonic/include/builtins/list/reverse.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/types/list.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace list { template types::none_type reverse(types::list &seq) { std::reverse(seq.begin(), seq.end()); return builtins::None; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/list/sort.hpp000066400000000000000000000014611416264035500235770ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_LIST_SORT_HPP #define PYTHONIC_BUILTIN_LIST_SORT_HPP #include "pythonic/include/builtins/list/sort.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/types/list.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/pdqsort.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace list { template types::none_type sort(types::list &seq) { pdqsort(seq.begin(), seq.end()); return builtins::None; } template types::none_type sort(types::list &seq, K key) { pdqsort(seq.begin(), seq.end(), [&key](T const &self, T const &other) { return key(self) < key(other); }); return builtins::None; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/map.hpp000066400000000000000000000160721416264035500224160ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_MAP_HPP #define PYTHONIC_BUILTIN_MAP_HPP #include "pythonic/include/builtins/map.hpp" #include "pythonic/itertools/common.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/fwd.hpp" #include "pythonic/utils/int_.hpp" #include "pythonic/utils/iterator.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/seq.hpp" #include #include #include PYTHONIC_NS_BEGIN namespace builtins { namespace details { template template map_iterator::map_iterator(Operator const &op, std::tuple &_iters, utils::index_sequence) : it(std::get(_iters).begin()...), _op(op) { } template template map_iterator::map_iterator(itertools::npos, Operator const &op, std::tuple &_iters, utils::index_sequence) : it(std::get(_iters).end()...), _op(op) { } template template typename map_res::type map_iterator::get_value(utils::index_sequence, std::false_type) const { return _op(*std::get(it)...); } template template typename map_res::type map_iterator::get_value(utils::index_sequence, std::true_type) const { return types::make_tuple(*std::get(it)...); } template typename map_res::type map_iterator:: operator*() const { return get_value(utils::make_index_sequence{}, std::is_same()); } template template void map_iterator::next(utils::index_sequence) { utils::fwd(++std::get(it)...); } template map_iterator &map_iterator:: operator++() { next(utils::make_index_sequence{}); return *this; } template template void map_iterator::advance(long i, utils::int_) { std::get(it) += i; advance(i, utils::int_()); } template void map_iterator::advance(long i, utils::int_<0>) { std::get<0>(it) += i; } template map_iterator &map_iterator:: operator+=(long i) { advance(i, utils::int_()); return *this; } template map_iterator map_iterator:: operator+(long i) const { map_iterator other(*this); other += i; return other; } template template bool map_iterator::equal( map_iterator const &other, utils::int_) const { return std::get(other.it) == std::get(it) || equal(other, utils::int_()); } template bool map_iterator::equal( map_iterator const &other, utils::int_<0>) const { return std::get<0>(other.it) == std::get<0>(it); } template bool map_iterator:: operator==(map_iterator const &other) const { return equal(other, utils::int_()); } template bool map_iterator:: operator!=(map_iterator const &other) const { return !(*this == other); } template bool map_iterator:: operator<(map_iterator const &other) const { return !(*this == other); } template template long map_iterator::min_len( map_iterator const &other, utils::int_) const { return std::min((long)(std::get(it) - std::get(other.it)), min_len(other, utils::int_())); } template long map_iterator::min_len( map_iterator const &other, utils::int_<0>) const { return std::get<0>(it) - std::get<0>(other.it); } template long map_iterator:: operator-(map_iterator const &other) const { return min_len(other, utils::int_()); } template template map::map(Operator const &_op, Types &&... _iters) : utils::iterator_reminder( std::forward(_iters)...), map_iterator( _op, this->values, utils::make_index_sequence{}), end_iter(itertools::npos(), _op, this->values, utils::make_index_sequence{}) { } template typename map::iterator &map::begin() { return *this; } template typename map::iterator const & map::begin() const { return *this; } template typename map::iterator const & map::end() const { return end_iter; } } template auto map(Operator &&_op, Iter &&... iters) -> details::map< typename std::remove_cv< typename std::remove_reference::type>::type, typename types::iterator::type>::type>::type...> { return {std::forward(_op), std::forward(iters)...}; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/max.hpp000066400000000000000000000011331416264035500224160ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_MAX_HPP #define PYTHONIC_BUILTIN_MAX_HPP #include "pythonic/include/builtins/max.hpp" #include "pythonic/builtins/minmax.hpp" #include "pythonic/operator_/lt.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { template auto max(Types &&... values) -> decltype(details::minmax(operator_::functor::lt{}, std::forward(values)...)) { return details::minmax(operator_::functor::lt{}, std::forward(values)...); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/min.hpp000066400000000000000000000011331416264035500224140ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_MIN_HPP #define PYTHONIC_BUILTIN_MIN_HPP #include "pythonic/include/builtins/min.hpp" #include "pythonic/builtins/minmax.hpp" #include "pythonic/operator_/gt.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { template auto min(Types &&... values) -> decltype(details::minmax(operator_::functor::gt{}, std::forward(values)...)) { return details::minmax(operator_::functor::gt{}, std::forward(values)...); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/minmax.hpp000066400000000000000000000026741416264035500231350ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_MINMAX_HPP #define PYTHONIC_BUILTIN_MINMAX_HPP #include "pythonic/include/builtins/minmax.hpp" #include #include PYTHONIC_NS_BEGIN namespace builtins { namespace details { template typename std::decay::type::value_type minmax(Op const &op, T &&t) { return *std::max_element(t.begin(), t.end(), op); } template typename std::decay::type::value_type minmax(Op const &op, T &&t, types::kwonly, F key) { using value_type = decltype(*t.begin()); return *std::max_element( t.begin(), t.end(), [op, key](value_type const &self, value_type const &other) { return op(key(self), key(other)); }); } template typename std::enable_if::value, typename __combined::type>::type minmax(Op const &op, T0 const &t0, T1 const &t1, Types const &... ts) { using value_type = typename __combined::type; std::initializer_list values = { static_cast(t0), static_cast(t1), static_cast(ts)...}; return minmax(op, values); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/next.hpp000066400000000000000000000007631416264035500226170ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_NEXT_HPP #define PYTHONIC_BUILTIN_NEXT_HPP #include "pythonic/include/builtins/next.hpp" #include "pythonic/builtins/StopIteration.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { template auto next(T &&y) -> decltype(*y) { if ((decltype(y.begin()) &)y != y.end()) { auto &&tmp = *y; ++y; return tmp; } else throw types::StopIteration(); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/oct.hpp000066400000000000000000000007651416264035500224300ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_OCT_HPP #define PYTHONIC_BUILTIN_OCT_HPP #include "pythonic/include/builtins/oct.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { template types::str oct(T const &v) { std::ostringstream oss; oss << #if defined(__PYTHRAN__) && __PYTHRAN__ == 3 "0o" #else '0' #endif << std::oct << v; return oss.str(); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/open.hpp000066400000000000000000000006251416264035500225770ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_OPEN_HPP #define PYTHONIC_BUILTIN_OPEN_HPP #include "pythonic/include/builtins/open.hpp" #include "pythonic/types/file.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { types::file open(types::str const &filename, types::str const &strmode) { return {filename, strmode}; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/ord.hpp000066400000000000000000000010221416264035500224120ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_ORD_HPP #define PYTHONIC_BUILTIN_ORD_HPP #include "pythonic/include/builtins/ord.hpp" #include "pythonic/types/exceptions.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { long ord(types::str const &v) { if (v.size() != 1) throw types::TypeError( "ord() expected a character, but string of length " + std::to_string(v.size()) + " found"); return (long)v.chars()[0]; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/pow.hpp000066400000000000000000000014731416264035500224450ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_POW_HPP #define PYTHONIC_BUILTIN_POW_HPP #include "pythonic/include/builtins/pow.hpp" #include "pythonic/numpy/power.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { double pow(long x, long y) { return std::pow((double)x, (double)y); } template long pow(long x, std::integral_constant) { if (N == 0) return 1; if (N == 1) return x; long tmp = pow(x, std::integral_constant{}); if (N % 2 == 0) return tmp * tmp; else return tmp * tmp * x; } template auto pow(Types &&... args) -> decltype(numpy::functor::power{}(std::forward(args)...)) { return numpy::functor::power{}(std::forward(args)...); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/print.hpp000066400000000000000000000020601416264035500227650ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_PRINT_HPP #define PYTHONIC_BUILTIN_PRINT_HPP #include "pythonic/include/builtins/print.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { namespace details { template std::ostream &print(std::ostream &os, T const &t) { return os << t; } std::ostream &print(std::ostream &os, bool const &t) { static char const repr[2][6] = {"False", "True\0"}; return os << repr[t]; } } void print_nonl() { } template void print_nonl(T const &value, Types const &... values) { details::print(std::cout, value); if (sizeof...(Types) > 0) std::cout << ' '; print_nonl(values...); } void print() { std::cout << std::endl; } template void print(T const &value, Types const &... values) { details::print(std::cout, value); if (sizeof...(values) > 0) std::cout << ' '; print(values...); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/pythran/000077500000000000000000000000001416264035500226075ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/builtins/pythran/StaticIfBreak.hpp000066400000000000000000000007031416264035500257730ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_PYTHRAN_STATICIFBREAK_HPP #define PYTHONIC_BUILTIN_PYTHRAN_STATICIFBREAK_HPP #include "pythonic/include/builtins/pythran/StaticIfBreak.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/static_if.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace pythran { template types::StaticIfBreak StaticIfBreak(T const &arg) { return {arg}; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/pythran/StaticIfCont.hpp000066400000000000000000000006761416264035500256630ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_PYTHRAN_STATICIFCONT_HPP #define PYTHONIC_BUILTIN_PYTHRAN_STATICIFCONT_HPP #include "pythonic/include/builtins/pythran/StaticIfCont.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/static_if.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace pythran { template types::StaticIfCont StaticIfCont(T const &arg) { return {arg}; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/pythran/StaticIfNoReturn.hpp000066400000000000000000000007221416264035500265240ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_PYTHRAN_STATICIFNORETURN_HPP #define PYTHONIC_BUILTIN_PYTHRAN_STATICIFNORETURN_HPP #include "pythonic/include/builtins/pythran/StaticIfNoReturn.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/static_if.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace pythran { template types::StaticIfNoReturn StaticIfNoReturn(T const &arg) { return {arg}; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/pythran/StaticIfReturn.hpp000066400000000000000000000007101416264035500262240ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_PYTHRAN_STATICIFRETURN_HPP #define PYTHONIC_BUILTIN_PYTHRAN_STATICIFRETURN_HPP #include "pythonic/include/builtins/pythran/StaticIfReturn.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/static_if.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace pythran { template types::StaticIfReturn StaticIfReturn(T const &arg) { return {arg}; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/pythran/abssqr.hpp000066400000000000000000000014351416264035500246160ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_PYTHRAN_ABSSQR_HPP #define PYTHONIC_BUILTIN_PYTHRAN_ABSSQR_HPP #include "pythonic/include/builtins/pythran/abssqr.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace pythran { namespace details { template T abssqr(T const &v) { return v * v; } template T abssqr(std::complex const &v) { return v.real() * v.real() + v.imag() * v.imag(); } } #define NUMPY_NARY_FUNC_NAME abssqr #define NUMPY_NARY_FUNC_SYM details::abssqr #include "pythonic/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/pythran/and_.hpp000066400000000000000000000011431416264035500242200ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_PYTHRAN_AND_HPP #define PYTHONIC_BUILTIN_PYTHRAN_AND_HPP #include "pythonic/include/builtins/pythran/and_.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/combined.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace pythran { template types::lazy_combined_t and_(T0 &&v0, T1 &&v1) { auto &&val0 = std::forward(v0)(); if (val0) return (types::lazy_combined_t)std::forward(v1)(); else return (types::lazy_combined_t)val0; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/pythran/is_none.hpp000066400000000000000000000005021416264035500247470ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_PYTHRAN_IS_NONE_HPP #define PYTHONIC_BUILTIN_PYTHRAN_IS_NONE_HPP #include "pythonic/include/builtins/pythran/is_none.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/NoneType.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace pythran { } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/pythran/kwonly.hpp000066400000000000000000000002271416264035500246440ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_PYTHRAN_KWONLY_HPP #define PYTHONIC_BUILTIN_PYTHRAN_KWONLY_HPP #include "pythonic/include/builtins/pythran/kwonly.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/pythran/len_set.hpp000066400000000000000000000007771416264035500247640ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_PYTHRAN_LEN_SET_HPP #define PYTHONIC_BUILTIN_PYTHRAN_LEN_SET_HPP #include "pythonic/include/builtins/pythran/len_set.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { namespace pythran { template long len_set(Iterable const &s) { return std::set::value_type>(s.begin(), s.end()).size(); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/pythran/make_shape.hpp000066400000000000000000000005051416264035500254150ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_MAKE_SHAPE_HPP #define PYTHONIC_BUILTIN_MAKE_SHAPE_HPP PYTHONIC_NS_BEGIN namespace builtins { namespace pythran { template pythonic::types::pshape make_shape(Args... args) { return {args...}; } } // pythran } // builtins PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/pythran/or_.hpp000066400000000000000000000011371416264035500241010ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_PYTHRAN_OR_HPP #define PYTHONIC_BUILTIN_PYTHRAN_OR_HPP #include "pythonic/include/builtins/pythran/or_.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/combined.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace pythran { template types::lazy_combined_t or_(T0 &&v0, T1 &&v1) { auto &&val0 = std::forward(v0)(); if (val0) return (types::lazy_combined_t)val0; else return (types::lazy_combined_t)std::forward(v1)(); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/pythran/static_if.hpp000066400000000000000000000010441416264035500252640ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_PYTHRAN_STATIC_IF_HPP #define PYTHONIC_BUILTIN_PYTHRAN_STATIC_IF_HPP #include "pythonic/include/builtins/pythran/static_if.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/builtins/pythran/is_none.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace pythran { template auto static_if(T const &cond, F0 f0, F1 f1) -> decltype(details::static_if{cond}(f0, f1)) { return details::static_if{cond}(f0, f1); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/pythran/static_list.hpp000066400000000000000000000021401416264035500256370ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_PYTHRAN_STATIC_LIST_HPP #define PYTHONIC_BUILTIN_PYTHRAN_STATIC_LIST_HPP #include "pythonic/include/builtins/pythran/static_list.hpp" #include "pythonic/builtins/list.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace pythran { template types::static_list static_list(types::array const &other) { return other.template to_array(); } template types::static_list static_list(types::array &other) { return other.template to_array(); } template types::static_list static_list(types::array &&other) { return other.template to_array(); } template auto static_list(T &&other) -> decltype(pythonic::builtins::functor::list{}(std::forward(other))) { return pythonic::builtins::functor::list{}(std::forward(other)); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/range.hpp000066400000000000000000000053301416264035500227300ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_RANGE_HPP #define PYTHONIC_BUILTIN_RANGE_HPP #include "pythonic/include/builtins/range.hpp" #include #include "pythonic/builtins/range.hpp" #include "pythonic/types/list.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace { long _init_last(long _begin, long _end, long _step) { if (_step > 0) return _begin + std::max(0L, _step * ((_end - _begin + _step - 1) / _step)); else return _begin + std::min(0L, _step * ((_end - _begin + _step + 1) / _step)); } } range_iterator::range_iterator(long v, long s) : value_(v), step_(s) { } long range_iterator::operator*() const { return value_; } range_iterator &range_iterator::operator++() { value_ += step_; return *this; } range_iterator range_iterator::operator++(int) { range_iterator self(*this); value_ += step_; return self; } range_iterator &range_iterator::operator+=(long n) { value_ += step_ * n; return *this; } range_iterator &range_iterator::operator--() { value_ -= step_; return *this; } range_iterator range_iterator::operator--(int) { range_iterator self(*this); value_ -= step_; return self; } range_iterator &range_iterator::operator-=(long n) { value_ -= step_ * n; return *this; } bool range_iterator::operator!=(range_iterator const &other) const { return value_ != other.value_; } bool range_iterator::operator==(range_iterator const &other) const { return value_ == other.value_; } bool range_iterator::operator<(range_iterator const &other) const { const long sign = +1 | (step_ >> (sizeof(long) * CHAR_BIT - 1)); return sign * value_ < sign * other.value_; } long range_iterator::operator-(range_iterator const &other) const { return (value_ - other.value_) / step_; } range::range(long b, long e, long s) : begin_(b), end_(_init_last(b, e, s)), step_(s) { } range::range(long e) : begin_(0), end_(e), step_(1) { } range_iterator range::begin() const { return range_iterator(begin_, step_); } range_iterator range::end() const { return range_iterator(end_, step_); } typename range::reverse_iterator range::rbegin() const { return {end_ - step_, -step_}; } typename range::reverse_iterator range::rend() const { return {begin_ - step_, -step_}; } long range::size() const { return (end_ - begin_) / step_; } long range::operator[](long i) const { return begin_ + i * step_; } } PYTHONIC_NS_END /* overload std::get */ namespace std { template long get(pythonic::builtins::range const &t) { return t[I]; } } #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/reduce.hpp000066400000000000000000000022521416264035500231030ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_REDUCE_HPP #define PYTHONIC_BUILTIN_REDUCE_HPP #include "pythonic/include/builtins/reduce.hpp" #include "pythonic/utils/functor.hpp" #include #include #include PYTHONIC_NS_BEGIN namespace builtins { template auto reduce(Operator op, Iterable s) -> decltype(op(std::declval::value_type>(), std::declval::value_type>())) { auto iter = s.begin(); auto r = *iter; ++iter; if (iter != s.end()) return std::accumulate(iter, s.end(), r, op); else return r; } template auto reduce(Operator op, Iterable s, T const &init) -> decltype(std::accumulate( s.begin(), s.end(), static_cast>(init), op)) { return std::accumulate( s.begin(), s.end(), static_cast>(init), op); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/reversed.hpp000066400000000000000000000022531416264035500234540ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_REVERSED_HPP #define PYTHONIC_BUILTIN_REVERSED_HPP #include "pythonic/include/builtins/reversed.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace details { template reversed::reversed() { } template reversed::reversed(Iterable const &iterable) : iterable(iterable) { } template typename reversed::iterator reversed::begin() { return iterable.rbegin(); } template typename reversed::iterator reversed::end() { return iterable.rend(); } template typename reversed::const_iterator reversed::begin() const { return iterable.rbegin(); } template typename reversed::const_iterator reversed::end() const { return iterable.rend(); } } template details::reversed reversed(Iterable const &iterable) { return {iterable}; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/round.hpp000066400000000000000000000007621416264035500227670ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_ROUND_HPP #define PYTHONIC_BUILTIN_ROUND_HPP #include "pythonic/include/builtins/round.hpp" #include "pythonic/builtins/pow.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { template double round(T const &v, size_t n) { T p = functor::pow()(10, n); return std::lround(v * p) / p; } template double round(T const &v) { return std::lround(v); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/set.hpp000066400000000000000000000011221416264035500224220ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_SET_HPP #define PYTHONIC_BUILTIN_SET_HPP #include "pythonic/include/builtins/set.hpp" #include "pythonic/types/set.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace anonymous { inline types::empty_set set() { return types::empty_set(); } template inline types::set::type::iterator>::value_type> set(Iterable &&t) { return {t.begin(), t.end()}; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/set/000077500000000000000000000000001416264035500217155ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/builtins/set/add.hpp000066400000000000000000000012401416264035500231530ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_SET_ADD_HPP #define PYTHONIC_BUILTIN_SET_ADD_HPP #include "pythonic/include/builtins/set/add.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/set.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { template types::none_type add(types::set &s, F const &value) { s.add(value); return builtins::None; } template types::none_type add(types::set &&s, F const &value) { s.add(value); return builtins::None; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/set/clear.hpp000066400000000000000000000002651416264035500235170ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_SET_CLEAR_HPP #define PYTHONIC_BUILTIN_SET_CLEAR_HPP #include "pythonic/include/builtins/set/clear.hpp" #include "pythonic/__dispatch__/clear.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/set/copy.hpp000066400000000000000000000002611416264035500233770ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_SET_COPY_HPP #define PYTHONIC_BUILTIN_SET_COPY_HPP #include "pythonic/include/builtins/set/copy.hpp" #include "pythonic/__dispatch__/copy.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/set/difference.hpp000066400000000000000000000022611416264035500245210ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_SET_DIFFERENCE_HPP #define PYTHONIC_BUILTIN_SET_DIFFERENCE_HPP #include "pythonic/include/builtins/set/difference.hpp" #include "pythonic/types/set.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { template types::set difference(types::set const &set, Types const &... others) { return set.difference(others...); } template types::set difference(types::set &&set, Types const &... others) { set.difference_update(others...); return set; } template types::empty_set difference(types::empty_set const &set, Types const &... others) { return types::empty_set(); } template types::set difference(types::set const &set) { return set; } template types::set difference(types::set &&set) { return set; } types::empty_set difference(types::empty_set const &set) { return types::empty_set(); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/set/difference_update.hpp000066400000000000000000000020301416264035500260550ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_SET_DIFFERENCEUPDATE_HPP #define PYTHONIC_BUILTIN_SET_DIFFERENCEUPDATE_HPP #include "pythonic/include/builtins/set/difference_update.hpp" #include "pythonic/types/set.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { template types::none_type difference_update(types::set &set, Types const &... others) { set.difference_update(others...); return {}; } template types::none_type difference_update(types::set &&set, Types const &... others) { // nothing to be done as we work on rvalue return {}; } template types::none_type difference_update(types::empty_set const &set, Types const &... others) { // nothing can be removed in set return {}; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/set/discard.hpp000066400000000000000000000012721416264035500240410ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_SET_DISCARD_HPP #define PYTHONIC_BUILTIN_SET_DISCARD_HPP #include "pythonic/include/builtins/set/discard.hpp" #include "pythonic/types/set.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { template void discard(types::set &set, U const &elem) { set.discard(elem); } template void discard(types::set &&set, U const &elem) { // nothing to be done for lvalue } template void discard(types::empty_set const &set, U const &elem) { // nothing to remove in an empty_set } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/set/intersection.hpp000066400000000000000000000016521416264035500251400ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_SET_INTERSECTION_HPP #define PYTHONIC_BUILTIN_SET_INTERSECTION_HPP #include "pythonic/include/builtins/set/intersection.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/set.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { template typename __combined, Types...>::type intersection(types::set const &set, Types const &... others) { return set.intersection(others...); } /* No rvalue overload possible because of return type modification.: * >>> a = set([1,2,3]) * >>> b = set([1., 2., 3.]) * >>> a.intersection(b) * set([1.0, 2.0, 3.0]) */ template types::empty_set intersection(types::empty_set const &set, Types const &... others) { return types::empty_set(); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/set/intersection_update.hpp000066400000000000000000000021541416264035500265000ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_SET_INTERSECTIONUPDATE_HPP #define PYTHONIC_BUILTIN_SET_INTERSECTIONUPDATE_HPP #include "pythonic/include/builtins/set/intersection_update.hpp" #include "pythonic/types/set.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { template types::none_type intersection_update(types::set &set, Types const &... others) { set.intersection_update(others...); return {}; } template types::none_type intersection_update(types::set &&set, Types const &... others) { // If it is an rvalue, we don't really want to update return {}; } template types::none_type intersection_update(types::empty_set &&set, Types const &... others) { // If it is an empty_set, it is ! really updated otherwise we have a // typing issue return {}; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/set/isdisjoint.hpp000066400000000000000000000011321416264035500246020ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_SET_ISDISJOINT_HPP #define PYTHONIC_BUILTIN_SET_ISDISJOINT_HPP #include "pythonic/include/builtins/set/isdisjoint.hpp" #include "pythonic/types/set.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { template bool isdisjoint(types::set const &calling_set, U const &arg_set) { return calling_set.isdisjoint(arg_set); } template bool isdisjoint(types::empty_set const &calling_set, U const &arg_set) { return true; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/set/issubset.hpp000066400000000000000000000010601416264035500242640ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_SET_ISSUBSET_HPP #define PYTHONIC_BUILTIN_SET_ISSUBSET_HPP #include "pythonic/include/builtins/set/issubset.hpp" #include "pythonic/types/set.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { template bool issubset(types::set const &set, U const &other) { return set.issubset(other); } template bool issubset(types::empty_set const &set, U const &other) { return true; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/set/issuperset.hpp000066400000000000000000000010751416264035500246370ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_SET_ISSUPERSET_HPP #define PYTHONIC_BUILTIN_SET_ISSUPERSET_HPP #include "pythonic/include/builtins/set/issuperset.hpp" #include "pythonic/types/set.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { template bool issuperset(types::set const &set, U const &other) { return set.issuperset(other); } template bool issuperset(types::empty_set const &set, U const &other) { return false; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/set/remove.hpp000066400000000000000000000002711416264035500237230ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_SET_REMOVE_HPP #define PYTHONIC_BUILTIN_SET_REMOVE_HPP #include "pythonic/include/builtins/set/remove.hpp" #include "pythonic/__dispatch__/remove.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/set/symmetric_difference.hpp000066400000000000000000000016451416264035500266220ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_SET_SYMMETRICDIFFERENCE_HPP #define PYTHONIC_BUILTIN_SET_SYMMETRICDIFFERENCE_HPP #include "pythonic/include/builtins/set/symmetric_difference.hpp" #include "pythonic/types/set.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { template typename __combined, U>::type symmetric_difference(types::set const &set, U const &other) { return set.symmetric_difference(other); } /* No rvalue overload possible because of return type modification.: * >>> a = set([1, 2, 3]) * >>> b = set([2., 3., 4.]) * >>> a.symmetric_difference(b) * set([1.0, 4.0]) */ template typename __combined::type symmetric_difference(types::empty_set const &set, U const &other) { return other; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/set/symmetric_difference_update.hpp000066400000000000000000000021221416264035500301530ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_SET_SYMMETRICDIFFERENCEUPDATE_HPP #define PYTHONIC_BUILTIN_SET_SYMMETRICDIFFERENCEUPDATE_HPP #include "pythonic/include/builtins/set/symmetric_difference_update.hpp" #include "pythonic/types/set.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { template types::none_type symmetric_difference_update(types::set &set, U const &other) { set.symmetric_difference_update(other); return {}; } template types::none_type symmetric_difference_update(types::set &&set, U const &other) { // nothing to be done on rvalue return {}; } template types::none_type symmetric_difference_update(types::empty_set const &set, U const &other) { // nothing otherwise empty_set have ! its correct type. return {}; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/set/union_.hpp000066400000000000000000000020331416264035500237130ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_SET_UNION_HPP #define PYTHONIC_BUILTIN_SET_UNION_HPP #include "pythonic/include/builtins/set/union_.hpp" #include "pythonic/types/set.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { template typename __combined, Types...>::type union_(types::set const &set, Types const &... others) { return set.union_(others...); } template typename __combined::type union_(types::empty_set const &init, Types const &... others) { return union_(others...); } template types::set union_(types::set const &set) { return set; } template typename __combined::type union_(T const &set) { return {set}; } types::empty_set union_(types::empty_set const &init) { return types::empty_set(); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/set/update.hpp000066400000000000000000000002511416264035500237060ustar00rootroot00000000000000#ifndef PYTHONIC_SET_UPDATE_HPP #define PYTHONIC_SET_UPDATE_HPP #include "pythonic/include/builtins/set/update.hpp" #include "pythonic/__dispatch__/update.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/slice.hpp000066400000000000000000000012561416264035500227360ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_SLICE_HPP #define PYTHONIC_BUILTIN_SLICE_HPP #include "pythonic/include/builtins/slice.hpp" #include "pythonic/types/slice.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace anonymous { types::contiguous_slice slice(types::none stop) { return {types::none(), stop}; } types::contiguous_slice slice(types::none start, types::none stop) { return {start, stop}; } types::slice slice(types::none start, types::none stop, types::none step) { return {start, stop, step}; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/sorted.hpp000066400000000000000000000044041416264035500231350ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_SORTED_HPP #define PYTHONIC_BUILTIN_SORTED_HPP #include "pythonic/include/builtins/sorted.hpp" #include "pythonic/types/list.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/pdqsort.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { template types::list::type::iterator>::value_type>::type> sorted(Iterable &&seq) { types::list::type::iterator>::value_type>::type> out(seq.begin(), seq.end()); pdqsort(out.begin(), out.end()); return out; } template types::list::type::iterator>::value_type>::type> sorted(Iterable &&seq, Key const &key, bool reverse) { using value_type = typename std::remove_cv::type::iterator>::value_type>::type; types::list out(seq.begin(), seq.end()); if (reverse) pdqsort(out.begin(), out.end(), [&key](value_type const &self, value_type const &other) { return key(self) > key(other); }); else pdqsort(out.begin(), out.end(), [&key](value_type const &self, value_type const &other) { return key(self) < key(other); }); return out; } template types::list::type::iterator>::value_type>::type> sorted(Iterable &&seq, types::none_type const &key, bool reverse) { using value_type = typename std::remove_cv::type::iterator>::value_type>::type; types::list out(seq.begin(), seq.end()); if (reverse) pdqsort(out.begin(), out.end(), [](value_type const &self, value_type const &other) { return self > other; }); else pdqsort(out.begin(), out.end()); return out; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/str.hpp000066400000000000000000000032301416264035500224410ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_STR_HPP #define PYTHONIC_BUILTIN_STR_HPP #include "pythonic/include/builtins/str.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { namespace anonymous { template types::str str(T const &t) { std::ostringstream oss; oss << t; return oss.str(); } inline types::str str(bool b) { static char const repr[2][6] = {"False", "True\0"}; return repr[b]; } inline types::str str(long value) { /* adapted from http://www.jb.man.ac.uk/~slowe/cpp/itoa.html#performance */ // this buffer is large enough to hold the binary representation, so // the decimal representation will be ok char buffer[8 * (1 << sizeof(value))]; char *ptr = buffer, *ptr1 = buffer, tmp_char; long tmp_value; do { tmp_value = value; value /= 10; *ptr++ = "zyxwvutsrqponmlkjihgfedcba9876543210123456789abcdefghijklmn" "opqrstuvwxyz"[35 + (tmp_value - value * 10)]; } while (value); // Apply negative sign if (tmp_value < 0) *ptr++ = '-'; *ptr-- = '\0'; while (ptr1 < ptr) { tmp_char = *ptr; *ptr-- = *ptr1; *ptr1++ = tmp_char; } return buffer; } inline types::str str(double l) { // when using %g, only 6 significant bits are used, so this should be // enough. // Use snprintf though char buffer[8 * (1 << sizeof(l))]; snprintf(buffer, sizeof(buffer), "%g", l); return buffer; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/str/000077500000000000000000000000001416264035500217325ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/builtins/str/__mod__.hpp000066400000000000000000000025751416264035500240270ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_STR_MOD_HPP #define PYTHONIC_BUILTIN_STR_MOD_HPP #include "pythonic/builtins/str/__mod__.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { namespace str { namespace details { template void fmt(boost::format &f, Tuple const &a, utils::int_<1>) { f % std::get::value - 1>(a); } template void fmt(boost::format &f, Tuple const &a, utils::int_) { fmt(f % std::get::value - I>(a), a, utils::int_()); } } template types::str __mod__(types::str const &s, T const &arg) { const boost::format fmter(s.chars()); return (boost::format(fmter) % arg).str(); } template types::str __mod__(types::str const &s, std::tuple const &args) { boost::format fmter(s.chars()); details::fmt(fmter, args, utils::int_()); return fmter.str(); } template types::str __mod__(types::str const &s, types::array const &args) { boost::format fmter(s.chars()); details::fmt(fmter, args, utils::int_()); return fmter.str(); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/str/capitalize.hpp000066400000000000000000000012161416264035500245700ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_STR_CAPITALIZE_HPP #define PYTHONIC_BUILTIN_STR_CAPITALIZE_HPP #include "pythonic/include/builtins/str/capitalize.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { types::str capitalize(types::str const &s) { if (s.empty()) return s; else { types::str copy = s; copy.chars()[0] = ::toupper(s.chars()[0]); std::transform(s.chars().begin() + 1, s.chars().end(), copy.chars().begin() + 1, ::tolower); return copy; } } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/str/count.hpp000066400000000000000000000002651416264035500235760ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_STR_COUNT_HPP #define PYTHONIC_BUILTIN_STR_COUNT_HPP #include "pythonic/include/builtins/str/count.hpp" #include "pythonic/__dispatch__/count.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/str/endswith.hpp000066400000000000000000000011121416264035500242630ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_STR_ENDSWITH_HPP #define PYTHONIC_BUILTIN_STR_ENDSWITH_HPP #include "pythonic/include/builtins/str/endswith.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { bool endswith(types::str const &s, types::str const &suffix, long start, long end) { if (end == -1) end = s.size(); long rstart = end - suffix.size(); return rstart >= start && s.compare(rstart, suffix.size(), suffix) == 0; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/str/find.hpp000066400000000000000000000013701416264035500233640ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_STR_FIND_HPP #define PYTHONIC_BUILTIN_STR_FIND_HPP #include "pythonic/include/builtins/str/find.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { long find(types::str const &s, types::str const &value, long start, long end) { if (end < 0) end += s.size(); long a = s.find(value, start); return (a > end) ? -1 : a; } long find(types::str const &s, types::str const &value, long start) { return find(s, value, start, s.size()); } long find(types::str const &s, types::str const &value) { return find(s, value, 0, s.size()); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/str/isalpha.hpp000066400000000000000000000007561416264035500240740ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_STR_ISALPHA_HPP #define PYTHONIC_BUILTIN_STR_ISALPHA_HPP #include "pythonic/include/builtins/str/isalpha.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { bool isalpha(types::str const &s) { return !s.empty() && std::all_of(s.chars().begin(), s.chars().end(), (int (*)(int))std::isalpha); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/str/isdigit.hpp000066400000000000000000000010011416264035500240670ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_STR_ISDIGIT_HPP #define PYTHONIC_BUILTIN_STR_ISDIGIT_HPP #include "pythonic/include/builtins/str/isdigit.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { namespace str { bool isdigit(types::str const &s) { return !s.empty() && std::all_of(s.chars().begin(), s.chars().end(), (int (*)(int))std::isdigit); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/str/join.hpp000066400000000000000000000067351416264035500234150ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_STR_JOIN_HPP #define PYTHONIC_BUILTIN_STR_JOIN_HPP #include "pythonic/include/builtins/str/join.hpp" #include "pythonic/builtins/len.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { template types::str join(S const &s, types::str const &iterable) { long ssize = std::distance(std::begin(s), std::end(s)) - (std::is_same::value ? 0 : 1); /* first iterate over iterable to gather sizes */ size_t n = ssize * (iterable.size() - 1) + iterable.size(); std::string out(n, 0); auto iter = iterable.chars().begin(); auto oter = out.begin(); if (iter != iterable.chars().end()) { *oter++ = *iter++; if (ssize) for (; iter != iterable.chars().end(); ++iter) { for (auto &&v : s) *oter++ = v.chars()[0]; *oter++ = *iter; } else std::copy(iter, iterable.chars().end(), oter); } return {std::move(out)}; } template typename std::enable_if< !std::is_same::type>::type, types::str>::value && std::is_same< typename std::iterator_traits::type::iterator>::iterator_category, std::random_access_iterator_tag>::value, types::str>::type join(S const &s, Iterable &&iterable) { long ssize = builtins::functor::len{}(s); /* first iterate over iterable to gather sizes */ long iterable_size = std::distance(iterable.begin(), iterable.end()); if (iterable_size == 0) return ""; size_t n = ssize * (iterable_size - 1); for (auto const &iter : iterable) n += builtins::len(iter); std::string out(n, 0); auto iter = iterable.begin(); auto oter = out.begin(); if (iter != iterable.end()) { auto tmp = *iter; auto const &stmp = tmp.chars(); oter = std::copy(stmp.begin(), stmp.end(), oter); ++iter; if (ssize) for (; iter != iterable.end(); ++iter) { auto chars = s.chars(); oter = std::copy(std::begin(chars), std::begin(chars) + ssize, oter); auto tmp = *iter; auto const &stmp = tmp.chars(); oter = std::copy(stmp.begin(), stmp.end(), oter); } else for (; iter != iterable.end(); ++iter) { auto tmp = (*iter); auto const &stmp = tmp.chars(); oter = std::copy(stmp.begin(), stmp.end(), oter); } } return {std::move(out)}; } template typename std::enable_if< !std::is_same< typename std::iterator_traits::type::iterator>::iterator_category, std::random_access_iterator_tag>::value, types::str>::type join(S const &s, Iterable &&iterable) { types::str out; auto iter = iterable.begin(); if (iter != iterable.end()) { out += *iter; ++iter; for (; iter != iterable.end(); ++iter) { out += s; out += *iter; } } return out; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/str/lower.hpp000066400000000000000000000007731416264035500236020ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_STR_LOWER_HPP #define PYTHONIC_BUILTIN_STR_LOWER_HPP #include "pythonic/include/builtins/str/lower.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { types::str lower(types::str const &s) { types::str copy = s; std::transform(s.chars().begin(), s.chars().end(), copy.chars().begin(), ::tolower); return copy; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/str/lstrip.hpp000066400000000000000000000010671416264035500237640ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_STR_LSTRIP_HPP #define PYTHONIC_BUILTIN_STR_LSTRIP_HPP #include "pythonic/include/builtins/str/lstrip.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { types::str lstrip(types::str const &self, types::str const &to_del) { auto chars = self.chars(); auto stop = self.find_first_not_of(to_del); if (stop < 0) return {}; else return {chars.begin() + stop, chars.end()}; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/str/replace.hpp000066400000000000000000000030301416264035500240520ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_STR_REPLACE_HPP #define PYTHONIC_BUILTIN_STR_REPLACE_HPP #include "pythonic/include/builtins/str/replace.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { namespace str { types::str replace(types::str const &self, types::str const &old_pattern, types::str const &new_pattern, long count) { char const *needle = old_pattern.c_str(); char const *new_needle = new_pattern.c_str(); char const *new_needle_end = new_needle + new_pattern.size(); char const *haystack = self.c_str(); char const *haystack_next = strstr(haystack, needle); if (!count || !haystack_next) { return {haystack}; } else { size_t n = 1 + std::max(self.size(), self.size() * (1 + new_pattern.size()) / (1 + old_pattern.size())); std::unique_ptr buffer{new char[n]}; char *iter = buffer.get(); do { iter = std::copy(haystack, haystack_next, iter); iter = std::copy(new_needle, new_needle_end, iter); --count; haystack = haystack_next + old_pattern.size(); assert(size_t(iter - buffer.get()) < n); } while (count && (haystack_next = strstr(haystack, needle))); std::copy(haystack, self.c_str() + self.size() + 1, iter); types::str replaced(buffer.get()); return replaced; } } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/str/rstrip.hpp000066400000000000000000000010571416264035500237710ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_STR_RSTRIP_HPP #define PYTHONIC_BUILTIN_STR_RSTRIP_HPP #include "pythonic/include/builtins/str/rstrip.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { types::str rstrip(types::str const &self, types::str const &to_del) { auto chars = self.chars(); auto stop = self.find_last_not_of(to_del); if (stop < 0) return {}; return {chars.begin(), chars.begin() + stop + 1}; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/str/split.hpp000066400000000000000000000036541416264035500236060ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_STR_SPLIT_HPP #define PYTHONIC_BUILTIN_STR_SPLIT_HPP #include "pythonic/include/builtins/str/split.hpp" #include "pythonic/builtins/str/strip.hpp" #include "pythonic/types/list.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { types::list split(types::str const &in, types::str const &sep, long maxsplit) { types::str s = strip(in); types::list res(0); if (s.empty()) return res; size_t current = 0; size_t next = 0; long numsplit = 0; while (next != types::str::npos && (numsplit++ < maxsplit || maxsplit == -1)) { next = s.find_first_of(sep, current); res.push_back(s.substr(current, next - current)); current = next + 1; } if (next != types::str::npos) { current = next + 1; res.push_back(s.substr(current, s.size() - current)); } return res; } types::list split(types::str const &in, types::none_type const &, long maxsplit) { types::str s = strip(in); types::list res(0); if (s.empty()) return res; size_t current = 0; size_t next = 0; long numsplit = 0; while (next != types::str::npos && (numsplit++ < maxsplit || maxsplit == -1)) { next = s.find_first_of(" \n\r\t", current); // from the pydoc, we skip any blank list size_t end = s.find_first_not_of(" \n\r\t", next); res.push_back(s.substr(current, next - current)); current = end; } if (next != types::str::npos) { current = next + 1; res.push_back(s.substr(current, s.size() - current)); } return res; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/str/startswith.hpp000066400000000000000000000011041416264035500246530ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_STR_STARTSWITH_HPP #define PYTHONIC_BUILTIN_STR_STARTSWITH_HPP #include "pythonic/include/builtins/str/startswith.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { bool startswith(types::str const &s, types::str const &prefix, long start, long end) { if (end < 0) end = s.size(); return (end - start) >= prefix.size() && s.compare(start, prefix.size(), prefix) == 0; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/str/strip.hpp000066400000000000000000000012741416264035500236100ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_STR_STRIP_HPP #define PYTHONIC_BUILTIN_STR_STRIP_HPP #include "pythonic/include/builtins/str/strip.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { types::str strip(types::str const &self, types::str const &to_del) { if (!self) return self; auto first = self.find_first_not_of(to_del); if (first == -1) return types::str(); else return types::str(self.chars().begin() + first, self.chars().begin() + self.find_last_not_of(to_del) + 1); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/str/upper.hpp000066400000000000000000000007731416264035500236050ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_STR_UPPER_HPP #define PYTHONIC_BUILTIN_STR_UPPER_HPP #include "pythonic/include/builtins/str/upper.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { types::str upper(types::str const &s) { types::str copy = s; std::transform(s.chars().begin(), s.chars().end(), copy.chars().begin(), ::toupper); return copy; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/sum.hpp000066400000000000000000000022071416264035500224400ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_SUM_HPP #define PYTHONIC_BUILTIN_SUM_HPP #include "pythonic/include/builtins/sum.hpp" #include "pythonic/types/assignable.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/int_.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { namespace details { template auto tuple_sum::operator()(Tuple const &t) -> decltype(std::get(t) + tuple_sum()(t)) { return std::get(t) + tuple_sum()(t); } template auto tuple_sum::operator()(Tuple const &t) -> decltype(std::get<0>(t)) { return std::get<0>(t); } } template auto sum(Iterable s, T start) -> decltype(std::accumulate( s.begin(), s.end(), static_cast::type>( start))) { return std::accumulate( s.begin(), s.end(), static_cast::type>( start)); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/tuple.hpp000066400000000000000000000040121416264035500227610ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_TUPLE_HPP #define PYTHONIC_BUILTIN_TUPLE_HPP #include "pythonic/include/builtins/tuple.hpp" #include "pythonic/types/dynamic_tuple.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { template std::tuple tuple(std::tuple const &t) { return t; } template /* this is far from perfect, but how to cope with the difference between python tuples && c++ ones ? */ typename std::enable_if < types::len_of::type>::type>:: value<0, types::dynamic_tuple::type>::type::iterator>::value_type>>::type tuple(Iterable &&i) { return {i.begin(), i.end()}; } template /* specialization if we are capable to statically compute the size of the input */ typename std::enable_if< types::len_of::type>::type>::value >= 0, types::array< typename std::iterator_traits< typename std::remove_cv::type>::type::iterator>::value_type, types::len_of::type>::type>::value>>::type tuple(StaticIterable &&i) { types::array< typename std::iterator_traits< typename std::remove_cv::type>::type::iterator>::value_type, types::len_of::type>::type>::value> res; std::copy(i.begin(), i.end(), res.begin()); return res; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/type.hpp000066400000000000000000000071521416264035500226210ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_TYPE_HPP #define PYTHONIC_BUILTIN_TYPE_HPP #include "pythonic/include/builtins/type.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/builtins/bool_.hpp" #include "pythonic/builtins/int_.hpp" #include "pythonic/builtins/float_.hpp" #include "pythonic/builtins/complex.hpp" #include "pythonic/builtins/set.hpp" #include "pythonic/builtins/str.hpp" #include "pythonic/builtins/list.hpp" #include "pythonic/builtins/dict.hpp" #include "pythonic/builtins/tuple.hpp" #include "pythonic/numpy/array.hpp" #include "pythonic/numpy/byte.hpp" #include "pythonic/numpy/ubyte.hpp" #include "pythonic/numpy/short_.hpp" #include "pythonic/numpy/ushort.hpp" #include "pythonic/numpy/intc.hpp" #include "pythonic/numpy/uintc.hpp" #include "pythonic/numpy/int_.hpp" #include "pythonic/numpy/uint.hpp" #include "pythonic/numpy/longlong.hpp" #include "pythonic/numpy/ulonglong.hpp" #include "pythonic/numpy/float32.hpp" #include "pythonic/numpy/float128.hpp" PYTHONIC_NS_BEGIN namespace builtins { template <> struct type_functor { using type = functor::bool_; }; template <> struct type_functor { using type = functor::float_; }; template <> struct type_functor { using type = functor::str; }; template struct type_functor> { using type = functor::complex; }; template <> struct type_functor { using type = functor::set; }; template struct type_functor> { using type = functor::set; }; template <> struct type_functor { using type = functor::list; }; template struct type_functor> { using type = functor::list; }; template struct type_functor> { using type = functor::list; }; template <> struct type_functor { using type = functor::dict; }; template struct type_functor> { using type = functor::dict; }; template struct type_functor> { using type = functor::tuple; }; template struct type_functor> { using type = functor::tuple; }; template struct type_functor> { using type = numpy::functor::array; }; template <> struct type_functor { using type = numpy::functor::byte; }; template <> struct type_functor { using type = numpy::functor::ubyte; }; template <> struct type_functor { using type = numpy::functor::short_; }; template <> struct type_functor { using type = numpy::functor::ushort; }; template <> struct type_functor { using type = numpy::functor::intc; }; template <> struct type_functor { using type = numpy::functor::uintc; }; template <> struct type_functor { using type = numpy::functor::int_; }; template <> struct type_functor { using type = numpy::functor::uint; }; template <> struct type_functor { using type = numpy::functor::longlong; }; template <> struct type_functor { using type = numpy::functor::ulonglong; }; template <> struct type_functor { using type = numpy::functor::float32; }; template <> struct type_functor { using type = numpy::functor::float128; }; template typename type_functor::type type(T const &) { return {}; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/xrange.hpp000066400000000000000000000040571416264035500231250ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_XRANGE_HPP #define PYTHONIC_BUILTIN_XRANGE_HPP #include "pythonic/include/builtins/xrange.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { namespace { long _init_last(long _begin, long _end, long _step) { if (_step > 0) return _begin + std::max(0L, _step * ((_end - _begin + _step - 1) / _step)); else return _begin + std::min(0L, _step * ((_end - _begin + _step + 1) / _step)); } } xrange_iterator::xrange_iterator(long v, long s) : value_(v), step_(s) { } long xrange_iterator::operator*() const { return value_; } xrange_iterator &xrange_iterator::operator++() { value_ += step_; return *this; } xrange_iterator xrange_iterator::operator++(int) { xrange_iterator self(*this); value_ += step_; return self; } xrange_iterator &xrange_iterator::operator+=(long n) { value_ += step_ * n; return *this; } bool xrange_iterator::operator!=(xrange_iterator const &other) const { return value_ != other.value_; } bool xrange_iterator::operator==(xrange_iterator const &other) const { return value_ == other.value_; } bool xrange_iterator::operator<(xrange_iterator const &other) const { return step_ * value_ < step_ * other.value_; } long xrange_iterator::operator-(xrange_iterator const &other) const { return (value_ - other.value_) / step_; } xrange::xrange(long b, long e, long s) : begin_(b), end_(_init_last(b, e, s)), step_(s) { } xrange::xrange(long e) : begin_(0), end_(e), step_(1) { } xrange_iterator xrange::begin() const { return xrange_iterator(begin_, step_); } xrange_iterator xrange::end() const { return xrange_iterator(end_, step_); } typename xrange::reverse_iterator xrange::rbegin() const { return {end_ - step_, -step_}; } typename xrange::reverse_iterator xrange::rend() const { return {begin_ - step_, -step_}; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/builtins/zip.hpp000066400000000000000000000007061416264035500224400ustar00rootroot00000000000000#ifndef PYTHONIC_BUILTIN_ZIP_HPP #define PYTHONIC_BUILTIN_ZIP_HPP #include "pythonic/include/builtins/zip.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/builtins/map.hpp" PYTHONIC_NS_BEGIN namespace builtins { template auto zip(Iter &&... iters) -> decltype(map(builtins::None, std::forward(iters)...)) { return map(builtins::None, std::forward(iters)...); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/cmath/000077500000000000000000000000001416264035500203655ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/cmath/acos.hpp000066400000000000000000000004131416264035500220210ustar00rootroot00000000000000#ifndef PYTHONIC_CMATH_ACOS_HPP #define PYTHONIC_CMATH_ACOS_HPP #include "pythonic/include/cmath/acos.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/cmath/acosh.hpp000066400000000000000000000004161416264035500221740ustar00rootroot00000000000000#ifndef PYTHONIC_CMATH_ACOSH_HPP #define PYTHONIC_CMATH_ACOSH_HPP #include "pythonic/include/cmath/acosh.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/cmath/asin.hpp000066400000000000000000000004131416264035500220260ustar00rootroot00000000000000#ifndef PYTHONIC_CMATH_ASIN_HPP #define PYTHONIC_CMATH_ASIN_HPP #include "pythonic/include/cmath/asin.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/cmath/asinh.hpp000066400000000000000000000004161416264035500222010ustar00rootroot00000000000000#ifndef PYTHONIC_CMATH_ASINH_HPP #define PYTHONIC_CMATH_ASINH_HPP #include "pythonic/include/cmath/asinh.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/cmath/atan.hpp000066400000000000000000000004131416264035500220170ustar00rootroot00000000000000#ifndef PYTHONIC_CMATH_ATAN_HPP #define PYTHONIC_CMATH_ATAN_HPP #include "pythonic/include/cmath/atan.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/cmath/atanh.hpp000066400000000000000000000004161416264035500221720ustar00rootroot00000000000000#ifndef PYTHONIC_CMATH_ATANH_HPP #define PYTHONIC_CMATH_ATANH_HPP #include "pythonic/include/cmath/atanh.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/cmath/cos.hpp000066400000000000000000000007051416264035500216640ustar00rootroot00000000000000#ifndef PYTHONIC_CMATH_COS_HPP #define PYTHONIC_CMATH_COS_HPP #include "pythonic/include/cmath/cos.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { template std::complex cos(std::complex const &v) { return std::cos(v); } template std::complex cos(T const &v) { return std::cos(v); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/cmath/cosh.hpp000066400000000000000000000004131416264035500220300ustar00rootroot00000000000000#ifndef PYTHONIC_CMATH_COSH_HPP #define PYTHONIC_CMATH_COSH_HPP #include "pythonic/include/cmath/cosh.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/cmath/e.hpp000066400000000000000000000001531416264035500213210ustar00rootroot00000000000000#ifndef PYTHONIC_CMATH_E_HPP #define PYTHONIC_CMATH_E_HPP #include "pythonic/include/cmath/e.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/cmath/exp.hpp000066400000000000000000000004101416264035500216650ustar00rootroot00000000000000#ifndef PYTHONIC_CMATH_EXP_HPP #define PYTHONIC_CMATH_EXP_HPP #include "pythonic/include/cmath/exp.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/cmath/isinf.hpp000066400000000000000000000004161416264035500222070ustar00rootroot00000000000000#ifndef PYTHONIC_CMATH_ISINF_HPP #define PYTHONIC_CMATH_ISINF_HPP #include "pythonic/include/cmath/isinf.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/cmath/isnan.hpp000066400000000000000000000004161416264035500222070ustar00rootroot00000000000000#ifndef PYTHONIC_CMATH_ISNAN_HPP #define PYTHONIC_CMATH_ISNAN_HPP #include "pythonic/include/cmath/isnan.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/cmath/log.hpp000066400000000000000000000005451416264035500216630ustar00rootroot00000000000000#ifndef PYTHONIC_CMATH_LOG_HPP #define PYTHONIC_CMATH_LOG_HPP #include "pythonic/include/cmath/log.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { using std::log; double log(double x, double base) { return log(x) / log(base); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/cmath/log10.hpp000066400000000000000000000004161416264035500220210ustar00rootroot00000000000000#ifndef PYTHONIC_CMATH_LOG10_HPP #define PYTHONIC_CMATH_LOG10_HPP #include "pythonic/include/cmath/log10.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/cmath/pi.hpp000066400000000000000000000001561416264035500215100ustar00rootroot00000000000000#ifndef PYTHONIC_CMATH_PI_HPP #define PYTHONIC_CMATH_PI_HPP #include "pythonic/include/cmath/pi.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/cmath/sin.hpp000066400000000000000000000004101416264035500216620ustar00rootroot00000000000000#ifndef PYTHONIC_CMATH_SIN_HPP #define PYTHONIC_CMATH_SIN_HPP #include "pythonic/include/cmath/sin.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/cmath/sinh.hpp000066400000000000000000000004131416264035500220350ustar00rootroot00000000000000#ifndef PYTHONIC_CMATH_SINH_HPP #define PYTHONIC_CMATH_SINH_HPP #include "pythonic/include/cmath/sinh.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/cmath/sqrt.hpp000066400000000000000000000004131416264035500220650ustar00rootroot00000000000000#ifndef PYTHONIC_CMATH_SQRT_HPP #define PYTHONIC_CMATH_SQRT_HPP #include "pythonic/include/cmath/sqrt.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/cmath/tan.hpp000066400000000000000000000004101416264035500216530ustar00rootroot00000000000000#ifndef PYTHONIC_CMATH_TAN_HPP #define PYTHONIC_CMATH_TAN_HPP #include "pythonic/include/cmath/tan.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/cmath/tanh.hpp000066400000000000000000000004131416264035500220260ustar00rootroot00000000000000#ifndef PYTHONIC_CMATH_TANH_HPP #define PYTHONIC_CMATH_TANH_HPP #include "pythonic/include/cmath/tanh.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/core.hpp000066400000000000000000000020571416264035500207360ustar00rootroot00000000000000 #ifndef PYTHONIC_CORE_HPP #define PYTHONIC_CORE_HPP #define PYTHONIC_NS_BEGIN \ namespace \ { \ namespace pythonic \ { #define PYTHONIC_NS_END \ } \ } // mostly to flag '_' as unused in generated code #ifdef WIN32 #define PYTHRAN_UNUSED #else #define PYTHRAN_UNUSED __attribute__((unused)) #endif // for backward compatibility #ifdef USE_BOOST_SIMD #define USE_XSIMD #endif #define STR_(M) #M // clang-format off #define INCLUDE_FILE(U, M) STR_(U/M.hpp) // clang-format on #include "pythonic/types/assignable.hpp" #include "pythonic/types/combined.hpp" #include "pythonic/types/int.hpp" #include "pythonic/types/float.hpp" #include "pythonic/types/slice.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/functools/000077500000000000000000000000001416264035500213055ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/functools/partial.hpp000066400000000000000000000023561416264035500234600ustar00rootroot00000000000000#ifndef PYTHONIC_FUNCTOOLS_PARTIAL_HPP #define PYTHONIC_FUNCTOOLS_PARTIAL_HPP #include "pythonic/include/functools/partial.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/seq.hpp" #include PYTHONIC_NS_BEGIN namespace functools { namespace details { template task::task() : closure() { } template task::task(ClosureTypes const &... types) : closure(types...) { } template template auto task::operator()(Types &&... types) const -> decltype( this->call(utils::make_index_sequence(), std::forward(types)...)) { return call(utils::make_index_sequence(), std::forward(types)...); } } template // remove references as closure capture the env by copy details::task::type>::type...> partial(Types &&... types) { return {std::forward(types)...}; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/functools/reduce.hpp000066400000000000000000000002561416264035500232700ustar00rootroot00000000000000#ifndef PYTHONIC_FUNCTOOLS_REDUCE_HPP #define PYTHONIC_FUNCTOOLS_REDUCE_HPP #include "pythonic/include/functools/reduce.hpp" #include "pythonic/builtins/reduce.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/000077500000000000000000000000001416264035500207145ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/__dispatch__/000077500000000000000000000000001416264035500233075ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/__dispatch__/clear.hpp000066400000000000000000000005501416264035500251060ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_DISPATCH_CLEAR_HPP #define PYTHONIC_INCLUDE_DISPATCH_CLEAR_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace __dispatch__ { template auto clear(Any &&any) -> decltype(any.clear()) { return any.clear(); } DEFINE_FUNCTOR(pythonic::__dispatch__, clear); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/__dispatch__/conjugate.hpp000066400000000000000000000006431416264035500260020ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_DISPATCH_CONJUGATE_HPP #define PYTHONIC_INCLUDE_DISPATCH_CONJUGATE_HPP #include "pythonic/include/numpy/conjugate.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace __dispatch__ { template auto conjugate(Any const &any) -> decltype(numpy::functor::conjugate{}(any)); DEFINE_FUNCTOR(pythonic::__dispatch__, conjugate); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/__dispatch__/copy.hpp000066400000000000000000000005111416264035500247670ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_DISPATCH_COPY_HPP #define PYTHONIC_INCLUDE_DISPATCH_COPY_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace __dispatch__ { template auto copy(Any const &any) -> decltype(any.copy()); DEFINE_FUNCTOR(pythonic::__dispatch__, copy); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/__dispatch__/count.hpp000066400000000000000000000006051416264035500251510ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_DISPATCH_COUNT_HPP #define PYTHONIC_INCLUDE_DISPATCH_COUNT_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace __dispatch__ { template auto count(Any &&any, Value &&value) -> decltype(any.count(std::forward(value))); DEFINE_FUNCTOR(pythonic::__dispatch__, count); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/__dispatch__/index.hpp000066400000000000000000000004661416264035500251350ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_DISPATCH_INDEX_HPP #define PYTHONIC_INCLUDE_DISPATCH_INDEX_HPP #include "pythonic/utils/functor.hpp" #include "pythonic/include/operator_/indexOf.hpp" PYTHONIC_NS_BEGIN namespace __dispatch__ { USING_FUNCTOR(index, pythonic::operator_::functor::indexOf); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/__dispatch__/pop.hpp000066400000000000000000000006001416264035500246120ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_DISPATCH_POP_HPP #define PYTHONIC_INCLUDE_DISPATCH_POP_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace __dispatch__ { template auto pop(Any &&any, Arg0 &&... arg0) -> decltype(any.pop(std::forward(arg0)...)); DEFINE_FUNCTOR(pythonic::__dispatch__, pop); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/__dispatch__/remove.hpp000066400000000000000000000005571416264035500253240ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_DISPATCH_REMOVE_HPP #define PYTHONIC_INCLUDE_DISPATCH_REMOVE_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace __dispatch__ { template auto remove(Any &any, Arg0 const &arg0) -> decltype(any.remove(arg0)); DEFINE_FUNCTOR(pythonic::__dispatch__, remove); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/__dispatch__/sort.hpp000066400000000000000000000015051416264035500250100ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_DISPATCH_SORT_HPP #define PYTHONIC_INCLUDE_DISPATCH_SORT_HPP #include "pythonic/include/builtins/list/sort.hpp" #include "pythonic/include/numpy/sort.hpp" PYTHONIC_NS_BEGIN namespace __dispatch__ { template auto sort(types::list &l, Args &&... args) -> decltype(pythonic::builtins::list::sort(l, std::forward(args)...)); template auto sort(types::list &&l, Args &&... args) -> decltype(pythonic::builtins::list::sort(std::move(l), std::forward(args)...)); template types::none_type sort(Any &&any, Args &&... args); DEFINE_FUNCTOR(pythonic::__dispatch__, sort); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/__dispatch__/update.hpp000066400000000000000000000006201416264035500253000ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_DISPATCH_UPDATE_HPP #define PYTHONIC_INCLUDE_DISPATCH_UPDATE_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace __dispatch__ { template auto update(Any &&any, Arg0 &&... arg0) -> decltype(any.update(std::forward(arg0)...)); DEFINE_FUNCTOR(pythonic::__dispatch__, update); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/bisect/000077500000000000000000000000001416264035500221655ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/bisect/bisect.hpp000066400000000000000000000014721416264035500241530ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BISECT_BISECT_HPP #define PYTHONIC_INCLUDE_BISECT_BISECT_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace bisect { namespace details { template using bisect_fun = decltype(std::upper_bound); } template long bisect(X const &x, A const &a, long lo = 0, details::bisect_fun const &fun = std::upper_bound); template long bisect(X const &x, A const &a, long lo, long hi, details::bisect_fun const &fun = std::upper_bound); DEFINE_FUNCTOR(pythonic::bisect, bisect); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/bisect/bisect_left.hpp000066400000000000000000000006641416264035500251670ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BISECT_BISECTLEFT_HPP #define PYTHONIC_INCLUDE_BISECT_BISECTLEFT_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace bisect { template long bisect_left(X const &x, A const &a, long lo = 0); template long bisect_left(X const &x, A const &a, long lo, long hi); DEFINE_FUNCTOR(pythonic::bisect, bisect_left); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/bisect/bisect_right.hpp000066400000000000000000000006711416264035500253500ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BISECT_BISECTRIGHT_HPP #define PYTHONIC_INCLUDE_BISECT_BISECTRIGHT_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace bisect { template long bisect_right(X const &x, A const &a, long lo = 0); template long bisect_right(X const &x, A const &a, long lo, long hi); DEFINE_FUNCTOR(pythonic::bisect, bisect_right); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/000077500000000000000000000000001416264035500225455ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/builtins/ArithmeticError.hpp000066400000000000000000000004131416264035500263570ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_ARITHMETICERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_ARITHMETICERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(ArithmeticError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/AssertionError.hpp000066400000000000000000000004101416264035500262320ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_ASSERTIONERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_ASSERTIONERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(AssertionError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/AttributeError.hpp000066400000000000000000000004101416264035500262260ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_ATTRIBUTEERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_ATTRIBUTEERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(AttributeError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/BaseException.hpp000066400000000000000000000004051416264035500260060ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_BASEEXCEPTION_HPP #define PYTHONIC_INCLUDE_BUILTIN_BASEEXCEPTION_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(BaseException) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/BufferError.hpp000066400000000000000000000003771416264035500255100ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_BUFFERERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_BUFFERERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(BufferError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/BytesWarning.hpp000066400000000000000000000004021416264035500256660ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_BYTESWARNING_HPP #define PYTHONIC_INCLUDE_BUILTIN_BYTESWARNING_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(BytesWarning) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/DeprecationWarning.hpp000066400000000000000000000004241416264035500270410ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_DEPRECATIONWARNING_HPP #define PYTHONIC_INCLUDE_BUILTIN_DEPRECATIONWARNING_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(DeprecationWarning) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/EOFError.hpp000066400000000000000000000003661416264035500247060ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_EOFERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_EOFERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(EOFError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/EnvironmentError.hpp000066400000000000000000000004161416264035500265750ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_ENVIRONMENTERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_ENVIRONMENTERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(EnvironmentError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/Exception.hpp000066400000000000000000000003711416264035500252150ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_EXCEPTION_HPP #define PYTHONIC_INCLUDE_BUILTIN_EXCEPTION_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(Exception) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/False.hpp000066400000000000000000000003511416264035500243070ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_FALSE_HPP #define PYTHONIC_INCLUDE_BUILTIN_FALSE_HPP #include "pythonic/include/types/bool.hpp" PYTHONIC_NS_BEGIN namespace builtins { static const bool False = false; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/FileNotFoundError.hpp000066400000000000000000000004211416264035500266210ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_FILENOTFOUNDERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_FILENOTFOUNDERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(FileNotFoundError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/FloatingPointError.hpp000066400000000000000000000004241416264035500270450ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_FLOATINGPOINTERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_FLOATINGPOINTERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(FloatingPointError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/FutureWarning.hpp000066400000000000000000000004051416264035500260550ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_FUTUREWARNING_HPP #define PYTHONIC_INCLUDE_BUILTIN_FUTUREWARNING_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(FutureWarning) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/GeneratorExit.hpp000066400000000000000000000004051416264035500260350ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_GENERATOREXIT_HPP #define PYTHONIC_INCLUDE_BUILTIN_GENERATOREXIT_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(GeneratorExit) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/IOError.hpp000066400000000000000000000003631416264035500246010ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_IOERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_IOERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(IOError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/ImportError.hpp000066400000000000000000000003771416264035500255510ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_IMPORTERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_IMPORTERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(ImportError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/ImportWarning.hpp000066400000000000000000000004051416264035500260550ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_IMPORTWARNING_HPP #define PYTHONIC_INCLUDE_BUILTIN_IMPORTWARNING_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(ImportWarning) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/IndentationError.hpp000066400000000000000000000004161416264035500265450ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_INDENTATIONERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_INDENTATIONERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(IndentationError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/IndexError.hpp000066400000000000000000000003741416264035500253430ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_INDEXERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_INDEXERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(IndexError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/KeyError.hpp000066400000000000000000000003661416264035500250250ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_KEYERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_KEYERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(KeyError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/KeyboardInterrupt.hpp000066400000000000000000000004211416264035500267300ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_KEYBOARDINTERRUPT_HPP #define PYTHONIC_INCLUDE_BUILTIN_KEYBOARDINTERRUPT_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(KeyboardInterrupt) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/LookupError.hpp000066400000000000000000000003771416264035500255500ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_LOOKUPERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_LOOKUPERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(LookupError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/MemoryError.hpp000066400000000000000000000003771416264035500255470ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_MEMORYERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_MEMORYERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(MemoryError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/NameError.hpp000066400000000000000000000003711416264035500251510ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_NAMEERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_NAMEERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(NameError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/None.hpp000066400000000000000000000003371416264035500241600ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_NONE_HPP #define PYTHONIC_INCLUDE_BUILTIN_NONE_HPP #include "pythonic/types/NoneType.hpp" PYTHONIC_NS_BEGIN namespace builtins { types::none_type const None; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/NotImplementedError.hpp000066400000000000000000000004271416264035500272170ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_NOTIMPLEMENTEDERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_NOTIMPLEMENTEDERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(NotImplementedError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/OSError.hpp000066400000000000000000000003631416264035500246130ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_OSERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_OSERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(OSError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/OverflowError.hpp000066400000000000000000000004051416264035500260720ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_OVERFLOWERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_OVERFLOWERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(OverflowError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/PendingDeprecationWarning.hpp000066400000000000000000000004511416264035500303460ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_PENDINGDEPRECATIONWARNING_HPP #define PYTHONIC_INCLUDE_BUILTIN_PENDINGDEPRECATIONWARNING_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(PendingDeprecationWarning) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/ReferenceError.hpp000066400000000000000000000004101416264035500261610ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_REFERENCEERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_REFERENCEERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(ReferenceError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/RuntimeError.hpp000066400000000000000000000004021416264035500257070ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_RUNTIMEERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_RUNTIMEERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(RuntimeError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/RuntimeWarning.hpp000066400000000000000000000004101416264035500262220ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_RUNTIMEWARNING_HPP #define PYTHONIC_INCLUDE_BUILTIN_RUNTIMEWARNING_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(RuntimeWarning) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/StopIteration.hpp000066400000000000000000000004051416264035500260610ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_STOPITERATION_HPP #define PYTHONIC_INCLUDE_BUILTIN_STOPITERATION_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(StopIteration) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/SyntaxError.hpp000066400000000000000000000003771416264035500255650ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_SYNTAXERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_SYNTAXERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(SyntaxError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/SyntaxWarning.hpp000066400000000000000000000004051416264035500260710ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_SYNTAXWARNING_HPP #define PYTHONIC_INCLUDE_BUILTIN_SYNTAXWARNING_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(SyntaxWarning) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/SystemError.hpp000066400000000000000000000003771416264035500255630ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_SYSTEMERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_SYSTEMERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(SystemError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/SystemExit.hpp000066400000000000000000000003741416264035500254000ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_SYSTEMEXIT_HPP #define PYTHONIC_INCLUDE_BUILTIN_SYSTEMEXIT_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(SystemExit) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/TabError.hpp000066400000000000000000000003661416264035500250030ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_TABERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_TABERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(TabError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/True.hpp000066400000000000000000000003451416264035500241770ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_TRUE_HPP #define PYTHONIC_INCLUDE_BUILTIN_TRUE_HPP #include "pythonic/include/types/bool.hpp" PYTHONIC_NS_BEGIN namespace builtins { static const bool True = true; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/TypeError.hpp000066400000000000000000000003711416264035500252120ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_TYPEERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_TYPEERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(TypeError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/UnboundLocalError.hpp000066400000000000000000000004211416264035500266520ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_UNBOUNDLOCALERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_UNBOUNDLOCALERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(UnboundLocalError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/UnicodeError.hpp000066400000000000000000000004021416264035500256520ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_UNICODEERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_UNICODEERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(UnicodeError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/UnicodeWarning.hpp000066400000000000000000000004101416264035500261650ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_UNICODEWARNING_HPP #define PYTHONIC_INCLUDE_BUILTIN_UNICODEWARNING_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(UnicodeWarning) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/UserWarning.hpp000066400000000000000000000003771416264035500255310ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_USERWARNING_HPP #define PYTHONIC_INCLUDE_BUILTIN_USERWARNING_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(UserWarning) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/ValueError.hpp000066400000000000000000000003741416264035500253500ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_VALUEERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_VALUEERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(ValueError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/Warning.hpp000066400000000000000000000003631416264035500246650ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_WARNING_HPP #define PYTHONIC_INCLUDE_BUILTIN_WARNING_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(Warning) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/ZeroDivisionError.hpp000066400000000000000000000004211416264035500267110ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_ZERODIVISIONERROR_HPP #define PYTHONIC_INCLUDE_BUILTIN_ZERODIVISIONERROR_HPP #include "pythonic/include/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace builtins { PYTHONIC_EXCEPTION_DECL(ZeroDivisionError) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/abs.hpp000066400000000000000000000006001416264035500240170ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_ABS_HPP #define PYTHONIC_INCLUDE_BUILTIN_ABS_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/abs.hpp" PYTHONIC_NS_BEGIN namespace builtins { // FIXME np.abs accept any iterator while builtins.abs only accept // numeric types && numpy.array USING_FUNCTOR(abs, numpy::functor::abs); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/all.hpp000066400000000000000000000004471416264035500240330ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_ALL_HPP #define PYTHONIC_INCLUDE_BUILTIN_ALL_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { template bool all(Iterable &&s); DEFINE_FUNCTOR(pythonic::builtins, all); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/any.hpp000066400000000000000000000004461416264035500240510ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_ANY_HPP #define PYTHONIC_INCLUDE_BUILTIN_ANY_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { template bool any(Iterable &&s); DEFINE_FUNCTOR(pythonic::builtins, any); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/assert.hpp000066400000000000000000000004061416264035500245570ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_ASSERT_HPP #define PYTHONIC_INCLUDE_BUILTIN_ASSERT_HPP #include "pythonic/include/types/str.hpp" PYTHONIC_NS_BEGIN void pythran_assert(bool cond); void pythran_assert(bool cond, types::str const &what); PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/bin.hpp000066400000000000000000000006411416264035500240270ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_BIN_HPP #define PYTHONIC_INCLUDE_BUILTIN_BIN_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/str.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { template typename std::enable_if::value, types::str>::type bin(T const &v); DEFINE_FUNCTOR(pythonic::builtins, bin); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/bool_.hpp000066400000000000000000000013531416264035500243520ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_BOOL_HPP #define PYTHONIC_INCLUDE_BUILTIN_BOOL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/tuple.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace functor { struct bool_ { using callable = void; using type = bool; bool operator()() const; template bool operator()(T const &val) const; template bool operator()(std::tuple const &val) const; template bool operator()(types::array const &val) const; friend std::ostream &operator<<(std::ostream &os, bool_) { return os << "bool"; } }; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/chr.hpp000066400000000000000000000005151416264035500240330ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_CHR_HPP #define PYTHONIC_INCLUDE_BUILTIN_CHR_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/str.hpp" PYTHONIC_NS_BEGIN namespace builtins { template types::str chr(T const &v); DEFINE_FUNCTOR(pythonic::builtins, chr); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/complex.hpp000066400000000000000000000011341416264035500247240ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_COMPLEX_HPP #define PYTHONIC_INCLUDE_BUILTIN_COMPLEX_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/complex.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace functor { struct complex { using callable = void; using type = std::complex; // TODO: doesn't handle string as first argument type operator()(double v0 = 0, double v1 = 0) const; friend std::ostream &operator<<(std::ostream &os, complex) { return os << "complex"; } }; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/complex/000077500000000000000000000000001416264035500242145ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/builtins/complex/conjugate.hpp000066400000000000000000000005431416264035500267060ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_COMPLEX_CONJUGATE_HPP #define PYTHONIC_INCLUDE_BUILTIN_COMPLEX_CONJUGATE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/conjugate.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace complex { USING_FUNCTOR(conjugate, numpy::functor::conjugate); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/dict.hpp000066400000000000000000000013201416264035500241750ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_DICT_HPP #define PYTHONIC_INCLUDE_BUILTIN_DICT_HPP #include "pythonic/include/types/dict.hpp" #include "pythonic/include/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace builtins { namespace anonymous { types::empty_dict dict(); template types::dict dict(types::dict const &); template auto dict(Iterable &&iterable) -> types::dict< typename std::decay(*iterable.begin()))>::type, typename std::decay(*iterable.begin()))>::type>; } DEFINE_FUNCTOR(pythonic::builtins::anonymous, dict); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/dict/000077500000000000000000000000001416264035500234705ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/builtins/dict/clear.hpp000066400000000000000000000005371416264035500252740ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_DICT_CLEAR_HPP #define PYTHONIC_INCLUDE_BUILTIN_DICT_CLEAR_HPP #include "pythonic/include/__dispatch__/clear.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace dict { USING_FUNCTOR(clear, pythonic::__dispatch__::functor::clear); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/dict/copy.hpp000066400000000000000000000005321416264035500251530ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_DICT_COPY_HPP #define PYTHONIC_INCLUDE_BUILTIN_DICT_COPY_HPP #include "pythonic/include/__dispatch__/copy.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace dict { USING_FUNCTOR(copy, pythonic::__dispatch__::functor::copy); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/dict/fromkeys.hpp000066400000000000000000000011421416264035500260360ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_DICT_FROMKEYS_HPP #define PYTHONIC_INCLUDE_BUILTIN_DICT_FROMKEYS_HPP #include "pythonic/include/builtins/None.hpp" #include "pythonic/include/types/dict.hpp" #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { namespace dict { template types::dict::type::value_type, V> fromkeys(Iterable &&iter, V const &v = builtins::None); DEFINE_FUNCTOR(pythonic::builtins::dict, fromkeys); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/dict/get.hpp000066400000000000000000000016111416264035500247570ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_DICT_GET_HPP #define PYTHONIC_INCLUDE_BUILTIN_DICT_GET_HPP #include "pythonic/include/types/dict.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace dict { template typename __combined::type get(types::dict const &d, W const &k, X const &default_); template types::none get(types::dict const &d, W const &k); template X get(types::empty_dict const &, W const &, X const &default_); // For typing only template typename __combined::type get(::dict_container, I, J); DEFINE_FUNCTOR(pythonic::builtins::dict, get); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/dict/items.hpp000066400000000000000000000007411416264035500253240ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_DICT_ITEMS_HPP #define PYTHONIC_INCLUDE_BUILTIN_DICT_ITEMS_HPP #include "pythonic/include/types/dict.hpp" #include "pythonic/include/types/list.hpp" #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { namespace dict { template auto items(D &&d) -> decltype(std::forward(d).items()); DEFINE_FUNCTOR(pythonic::builtins::dict, items); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/dict/keys.hpp000066400000000000000000000007121416264035500251540ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_DICT_KEYS_HPP #define PYTHONIC_INCLUDE_BUILTIN_DICT_KEYS_HPP #include "pythonic/include/types/dict.hpp" #include "pythonic/include/types/list.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace dict { template auto keys(D &&d) -> decltype(std::forward(d).keys()); DEFINE_FUNCTOR(pythonic::builtins::dict, keys); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/dict/pop.hpp000066400000000000000000000005251416264035500250010ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_DICT_POP_HPP #define PYTHONIC_INCLUDE_BUILTIN_DICT_POP_HPP #include "pythonic/include/__dispatch__/pop.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace dict { USING_FUNCTOR(pop, pythonic::__dispatch__::functor::pop); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/dict/popitem.hpp000066400000000000000000000007001416264035500256530ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_DICT_POPITEM_HPP #define PYTHONIC_INCLUDE_BUILTIN_DICT_POPITEM_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/dict.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { namespace dict { template auto popitem(D &&d) -> decltype(std::forward(d).popitem()); DEFINE_FUNCTOR(pythonic::builtins::dict, popitem); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/dict/setdefault.hpp000066400000000000000000000014511416264035500263420ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_DICT_SETDEFAULT_HPP #define PYTHONIC_INCLUDE_BUILTIN_DICT_SETDEFAULT_HPP #include "pythonic/include/types/dict.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace dict { template V &setdefault(types::dict &d, W const &k, X const &default_); template types::none setdefault(types::dict &d, W const &k); template V setdefault(types::dict &&d, W const &k, X const &default_); template types::none setdefault(types::dict &&d, W const &k); DEFINE_FUNCTOR(pythonic::builtins::dict, setdefault); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/dict/update.hpp000066400000000000000000000005441416264035500254660ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_DICT_UPDATE_HPP #define PYTHONIC_INCLUDE_BUILTIN_DICT_UPDATE_HPP #include "pythonic/include/__dispatch__/update.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace dict { USING_FUNCTOR(update, pythonic::__dispatch__::functor::update); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/dict/values.hpp000066400000000000000000000007231416264035500255020ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_DICT_VALUES_HPP #define PYTHONIC_INCLUDE_BUILTIN_DICT_VALUES_HPP #include "pythonic/include/types/dict.hpp" #include "pythonic/include/types/list.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace dict { template auto values(D &&d) -> decltype(std::forward(d).values()); DEFINE_FUNCTOR(pythonic::builtins::dict, values); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/divmod.hpp000066400000000000000000000007051416264035500245420ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_DIVMOD_HPP #define PYTHONIC_INCLUDE_BUILTIN_DIVMOD_HPP #include "pythonic/include/types/tuple.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { template auto divmod(T0 const &t0, T1 const &t1) // other types are left over -> decltype(types::make_tuple(t0 / t1, t0 % t1)); DEFINE_FUNCTOR(pythonic::builtins, divmod); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/enumerate.hpp000066400000000000000000000042321416264035500252440ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_ENUMERATE_HPP #define PYTHONIC_INCLUDE_BUILTIN_ENUMERATE_HPP #include "pythonic/include/types/tuple.hpp" #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { namespace details { // FIXME return value may be a type::make_tuple template using enumerate_iterator_base = std::iterator< typename std::iterator_traits::iterator_category, types::make_tuple_t< long, typename std::iterator_traits::value_type>>; template struct enumerate_iterator : public enumerate_iterator_base { long value; Iterator iter; enumerate_iterator(); enumerate_iterator(Iterator const &iter, long first); typename enumerate_iterator_base::value_type operator*() const { return types::make_tuple(value, *iter); } enumerate_iterator &operator++() { ++value, ++iter; return *this; } enumerate_iterator &operator+=(long n); bool operator!=(enumerate_iterator const &other) const; bool operator<(enumerate_iterator const &other) const; long operator-(enumerate_iterator const &other) const; bool operator==(enumerate_iterator const &it) const; }; template struct enumerate : private Iterable, /* to hold a reference on the iterable */ public enumerate_iterator< typename Iterable::iterator> /* to be compatible with builtins.next*/ { using iterator = enumerate_iterator; using iterator::operator*; iterator end_iter; enumerate(); enumerate(Iterable seq, long first); iterator &begin(); iterator const &begin() const; iterator end() const; }; } template details::enumerate::type>::type> enumerate(Iterable &&seq, long first = 0L); DEFINE_FUNCTOR(pythonic::builtins, enumerate); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/file.hpp000066400000000000000000000007321416264035500241770ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_FILE_HPP #define PYTHONIC_INCLUDE_BUILTIN_FILE_HPP #include "pythonic/include/types/file.hpp" #include "pythonic/include/types/str.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace anonymous { types::file file(types::str const &filename, types::str const &strmode = "r"); } DEFINE_FUNCTOR(pythonic::builtins::anonymous, file); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/file/000077500000000000000000000000001416264035500234645ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/builtins/file/close.hpp000066400000000000000000000006161416264035500253050ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_FILE_CLOSE_HPP #define PYTHONIC_INCLUDE_BUILTIN_FILE_CLOSE_HPP #include "pythonic/include/types/file.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { void close(types::file &f); void close(types::file &&f); DEFINE_FUNCTOR(pythonic::builtins::file, close); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/file/fileno.hpp000066400000000000000000000005671416264035500254610ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_FILE_FILENO_HPP #define PYTHONIC_INCLUDE_BUILTIN_FILE_FILENO_HPP #include "pythonic/include/types/file.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { long fileno(types::file const &f); DEFINE_FUNCTOR(pythonic::builtins::file, fileno); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/file/flush.hpp000066400000000000000000000006161416264035500253210ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_FILE_FLUSH_HPP #define PYTHONIC_INCLUDE_BUILTIN_FILE_FLUSH_HPP #include "pythonic/include/types/file.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { void flush(types::file &f); void flush(types::file &&f); DEFINE_FUNCTOR(pythonic::builtins::file, flush); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/file/isatty.hpp000066400000000000000000000005671416264035500255220ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_FILE_ISATTY_HPP #define PYTHONIC_INCLUDE_BUILTIN_FILE_ISATTY_HPP #include "pythonic/include/types/file.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { bool isatty(types::file const &f); DEFINE_FUNCTOR(pythonic::builtins::file, isatty); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/file/next.hpp000066400000000000000000000005311416264035500251520ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_FILE_NEXT_HPP #define PYTHONIC_INCLUDE_BUILTIN_FILE_NEXT_HPP #include "pythonic/include/__dispatch__/next.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { USING_FUNCTOR(next, pythonic::__dispatch__::functor::next); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/file/read.hpp000066400000000000000000000007371416264035500251170ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_FILE_READ_HPP #define PYTHONIC_INCLUDE_BUILTIN_FILE_READ_HPP #include "pythonic/include/types/file.hpp" #include "pythonic/include/types/str.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { types::str read(types::file &f, long size = -1); types::str read(types::file &&f, long size = -1); DEFINE_FUNCTOR(pythonic::builtins::file, read); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/file/readline.hpp000066400000000000000000000007631416264035500257660ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_FILE_READLINE_HPP #define PYTHONIC_INCLUDE_BUILTIN_FILE_READLINE_HPP #include "pythonic/include/types/file.hpp" #include "pythonic/include/types/str.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { types::str readline(types::file &f, long size = -1); types::str readline(types::file &&f, long size = -1); DEFINE_FUNCTOR(pythonic::builtins::file, readline); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/file/readlines.hpp000066400000000000000000000011071416264035500261420ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_FILE_READLINES_HPP #define PYTHONIC_INCLUDE_BUILTIN_FILE_READLINES_HPP #include "pythonic/include/types/file.hpp" #include "pythonic/include/types/list.hpp" #include "pythonic/include/types/str.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { template types::list readlines(F &&f); template types::list readlines(F &&f, long sizehint); DEFINE_FUNCTOR(pythonic::builtins::file, readlines); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/file/seek.hpp000066400000000000000000000010261416264035500251230ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_FILE_SEEK_HPP #define PYTHONIC_INCLUDE_BUILTIN_FILE_SEEK_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/file.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { void seek(types::file &f, long offset); void seek(types::file &&f, long offset); void seek(types::file &f, long offset, long whence); void seek(types::file &&f, long offset, long whence); DEFINE_FUNCTOR(pythonic::builtins::file, seek); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/file/tell.hpp000066400000000000000000000005571416264035500251440ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_FILE_TELL_HPP #define PYTHONIC_INCLUDE_BUILTIN_FILE_TELL_HPP #include "pythonic/include/types/file.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { long tell(types::file const &f); DEFINE_FUNCTOR(pythonic::builtins::file, tell); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/file/truncate.hpp000066400000000000000000000007721416264035500260300ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_FILE_TRUNCATE_HPP #define PYTHONIC_INCLUDE_BUILTIN_FILE_TRUNCATE_HPP #include "pythonic/include/types/file.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { void truncate(types::file &f); void truncate(types::file &&f); void truncate(types::file &f, long size); void truncate(types::file &&f, long size); DEFINE_FUNCTOR(pythonic::builtins::file, truncate); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/file/write.hpp000066400000000000000000000007461416264035500253360ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_FILE_WRITE_HPP #define PYTHONIC_INCLUDE_BUILTIN_FILE_WRITE_HPP #include "pythonic/include/types/file.hpp" #include "pythonic/include/types/str.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { long write(types::file &f, types::str const &str); long write(types::file &&f, types::str const &str); DEFINE_FUNCTOR(pythonic::builtins::file, write); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/file/writelines.hpp000066400000000000000000000006531416264035500263660ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_FILE_WRITELINES_HPP #define PYTHONIC_INCLUDE_BUILTIN_FILE_WRITELINES_HPP #include "pythonic/include/types/file.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace file { template void writelines(F &&f, T const &sequence); DEFINE_FUNCTOR(pythonic::builtins::file, writelines); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/filter.hpp000066400000000000000000000047751416264035500245600ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_FILTER_HPP #define PYTHONIC_INCLUDE_BUILTIN_FILTER_HPP #include "pythonic/include/utils/iterator.hpp" #include "pythonic/include/itertools/common.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace details { template struct filter_iterator : std::iterator { using sequence_type = typename std::remove_cv< typename std::remove_reference::type>::type; Operator op; typename List0::iterator iter; // FIXME : iter_end should be const because filter should be evaluate // only once. Some tests doesn't work with it for now because of // uncorrect itertools.product implementation typename List0::iterator iter_end; bool test_filter(std::true_type); bool test_filter(std::false_type); filter_iterator() = default; filter_iterator(Operator _op, List0 &_seq); filter_iterator(itertools::npos, Operator _op, List0 &_seq); typename List0::value_type operator*() const; filter_iterator &operator++(); void next_value(); bool operator==(filter_iterator const &other) const; bool operator!=(filter_iterator const &other) const; bool operator<(filter_iterator const &other) const; }; // Inherit from iterator_reminder to keep a reference on the iterator // && avoid a dangling reference // FIXME: It would be better to have a copy only if needed but Pythran // typing is ! good enough for this as arguments have // remove_cv/remove_ref template struct filter : utils::iterator_reminder, filter_iterator { using value_type = typename List0::value_type; using iterator = filter_iterator; iterator end_iter; filter() = default; filter(Operator _op, List0 const &_seq); iterator &begin(); iterator const &begin() const; iterator const &end() const; }; } template details::filter::type>::type, typename std::remove_cv< typename std::remove_reference::type>::type> filter(Operator &&_op, List0 &&_seq); DEFINE_FUNCTOR(pythonic::builtins, filter); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/float_.hpp000066400000000000000000000010051416264035500245160ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_FLOAT_HPP #define PYTHONIC_INCLUDE_BUILTIN_FLOAT_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace functor { struct float_ { using callable = void; using type = double; template type operator()(T &&t) const; type operator()() const; friend std::ostream &operator<<(std::ostream &os, float_) { return os << "float"; } }; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/float_/000077500000000000000000000000001416264035500240115ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/builtins/float_/is_integer.hpp000066400000000000000000000005251416264035500266540ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_FLOAT_ISINTEGER_HPP #define PYTHONIC_INCLUDE_BUILTIN_FLOAT_ISINTEGER_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace float_ { bool is_integer(double d); DEFINE_FUNCTOR(pythonic::builtins::float_, is_integer); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/getattr.hpp000066400000000000000000000002161416264035500247270ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_GETATTR_HPP #define PYTHONIC_INCLUDE_BUILTIN_GETATTR_HPP /* implemented in each attribute handler */ #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/hex.hpp000066400000000000000000000005161416264035500240440ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_HEX_HPP #define PYTHONIC_INCLUDE_BUILTIN_HEX_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/str.hpp" PYTHONIC_NS_BEGIN namespace builtins { template types::str hex(T const &v); DEFINE_FUNCTOR(pythonic::builtins, hex); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/id.hpp000066400000000000000000000005531416264035500236550ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_ID_HPP #define PYTHONIC_INCLUDE_BUILTIN_ID_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { template long id(T const &t); long id(long const &t); long id(double const &t); long id(bool const &t); DEFINE_FUNCTOR(pythonic::builtins, id); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/in.hpp000066400000000000000000000002631416264035500236650ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_IN_HPP #define PYTHONIC_INCLUDE_BUILTIN_IN_HPP PYTHONIC_NS_BEGIN template bool in(T &&t, V const &v); PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/int_.hpp000066400000000000000000000012461416264035500242120ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_INT_HPP #define PYTHONIC_INCLUDE_BUILTIN_INT_HPP #include "pythonic/include/types/str.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace functor { struct int_ { using callable = void; using type = long; type operator()(char const t[], long base) const; type operator()(types::str const &t, long base) const; type operator()(types::chr const &t, long base) const; template type operator()(T &&t) const; type operator()() const; friend std::ostream &operator<<(std::ostream &os, int_) { return os << "int"; } }; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/isinstance.hpp000066400000000000000000000015561416264035500254250ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_ISINSTANCE_HPP #define PYTHONIC_INCLUDE_BUILTIN_ISINSTANCE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/builtins/pythran/is_none.hpp" PYTHONIC_NS_BEGIN namespace types { class str; template struct isinstance : std::conditional::value, true_type, false_type> { }; // some specialization template <> struct isinstance { using type = true_type; }; template <> struct isinstance { using type = true_type; }; } namespace builtins { template typename types::isinstance< Obj, typename std::decay()())>::type>::type isinstance(Obj, Cls) { return {}; } DEFINE_FUNCTOR(pythonic::builtins, isinstance); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/iter.hpp000066400000000000000000000012471416264035500242250ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_ITER_HPP #define PYTHONIC_INCLUDE_BUILTIN_ITER_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace details { template struct iter : T::iterator { using iterator = typename T::iterator; iterator _end; T data; iter(); iter(T data); iterator &begin(); iterator const &begin() const; iterator const &end() const; }; } template details::iter< typename std::remove_cv::type>::type> iter(T &&t); DEFINE_FUNCTOR(pythonic::builtins, iter); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/len.hpp000066400000000000000000000007621416264035500240410ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_LEN_HPP #define PYTHONIC_INCLUDE_BUILTIN_LEN_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/yield.hpp" #include #include PYTHONIC_NS_BEGIN namespace builtins { template long len(std::tuple const &); template typename std::enable_if::value, long>::type len(T const &t); DEFINE_FUNCTOR(pythonic::builtins, len); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/list.hpp000066400000000000000000000012651416264035500242350ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_LIST_HPP #define PYTHONIC_INCLUDE_BUILTIN_LIST_HPP #include "pythonic/include/types/list.hpp" #include "pythonic/include/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace builtins { namespace anonymous { inline types::empty_list list(); inline types::empty_list list(types::empty_list); template types::list::type::iterator>::value_type>:: type> list(Iterable &&t); } DEFINE_FUNCTOR(pythonic::builtins::anonymous, list); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/list/000077500000000000000000000000001416264035500235205ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/builtins/list/append.hpp000066400000000000000000000012231416264035500254760ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_LIST_APPEND_HPP #define PYTHONIC_INCLUDE_BUILTIN_LIST_APPEND_HPP #include "pythonic/include/types/list.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace list { template types::none_type append(types::list &seq, F &&value); template types::none_type append(types::list &&seq, F &&value); template types::none_type append(types::empty_list &seq, F &&value); DEFINE_FUNCTOR(pythonic::builtins::list, append); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/list/count.hpp000066400000000000000000000005411416264035500253610ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_LIST_COUNT_HPP #define PYTHONIC_INCLUDE_BUILTIN_LIST_COUNT_HPP #include "pythonic/include/__dispatch__/count.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace list { USING_FUNCTOR(count, pythonic::__dispatch__::functor::count); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/list/extend.hpp000066400000000000000000000014471416264035500255260ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_LIST_EXTEND_HPP #define PYTHONIC_INCLUDE_BUILTIN_LIST_EXTEND_HPP #include "pythonic/include/types/list.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace list { template typename std::enable_if< !std::is_same::type, types::empty_list>::value, types::none_type>::type extend(T0 &&seq, T1 const &add); template typename std::enable_if< std::is_same::type, types::empty_list>::value, types::none_type>::type extend(T0 &&seq, T1 const &add); DEFINE_FUNCTOR(pythonic::builtins::list, extend); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/list/insert.hpp000066400000000000000000000007441416264035500255420ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_LIST_INSERT_HPP #define PYTHONIC_INCLUDE_BUILTIN_LIST_INSERT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/list.hpp" #include "pythonic/include/types/NoneType.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace list { template types::none_type insert(types::list &seq, long n, F &&value); DEFINE_FUNCTOR(pythonic::builtins::list, insert); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/list/pop.hpp000066400000000000000000000005261416264035500250320ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_LIST_POP_HPP #define PYTHONIC_INCLUDE_BUILTIN_LIST_POP_HPP #include "pythonic/include/__dispatch__/pop.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace list { USING_FUNCTOR(pop, pythonic::__dispatch__::functor::pop); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/list/remove.hpp000066400000000000000000000005451416264035500255320ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_LIST_REMOVE_HPP #define PYTHONIC_INCLUDE_BUILTIN_LIST_REMOVE_HPP #include "pythonic/include/__dispatch__/remove.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace list { USING_FUNCTOR(remove, pythonic::__dispatch__::functor::remove); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/list/reverse.hpp000066400000000000000000000007141416264035500257060ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_LIST_REVERSE_HPP #define PYTHONIC_INCLUDE_BUILTIN_LIST_REVERSE_HPP #include "pythonic/include/types/list.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace list { template types::none_type reverse(types::list &seq); DEFINE_FUNCTOR(pythonic::builtins::list, reverse); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/list/sort.hpp000066400000000000000000000010301416264035500252120ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_LIST_SORT_HPP #define PYTHONIC_INCLUDE_BUILTIN_LIST_SORT_HPP #include "pythonic/include/types/list.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace list { template types::none_type sort(types::list &seq); template types::none_type sort(types::list &seq, K key); DEFINE_FUNCTOR(pythonic::builtins::list, sort); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/map.hpp000066400000000000000000000122171416264035500240360ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_MAP_HPP #define PYTHONIC_INCLUDE_BUILTIN_MAP_HPP #include "pythonic/include/itertools/common.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/tuple.hpp" #include "pythonic/include/utils/int_.hpp" #include "pythonic/include/utils/iterator.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/seq.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { namespace details { template struct map_res { using type = decltype( std::declval()(std::declval::value_type>()...)); }; template struct map_res { using type = decltype(types::make_tuple(std::declval::value_type>()...)); }; template struct map_iterator : std::iterator< typename utils::iterator_min::type, typename map_res::type> { std::tuple it; Operator _op; map_iterator() = default; template map_iterator(Operator const &_op, std::tuple &_iters, utils::index_sequence); template map_iterator(itertools::npos, Operator const &_op, std::tuple &_iters, utils::index_sequence); typename map_res::type operator*() const; map_iterator &operator++(); map_iterator &operator+=(long i); map_iterator operator+(long i) const; bool operator==(map_iterator const &other) const; bool operator!=(map_iterator const &other) const; bool operator<(map_iterator const &other) const; long operator-(map_iterator const &other) const; private: template long min_len(map_iterator const &other, utils::int_) const; long min_len(map_iterator const &other, utils::int_<0>) const; template bool equal(map_iterator const &other, utils::int_) const; bool equal(map_iterator const &other, utils::int_<0>) const; template void advance(long i, utils::int_); void advance(long i, utils::int_<0>); template void next(utils::index_sequence); template typename map_res::type get_value(utils::index_sequence, std::true_type) const; template typename map_res::type get_value(utils::index_sequence, std::false_type) const; }; template struct map : utils::iterator_reminder, map_iterator { using iterator = map_iterator; using value_type = typename iterator::value_type; using dtype = typename types::dtype_of::type; static constexpr long value = 1 + utils::nested_container_depth::value; iterator end_iter; map() = default; // Use an extra template to enable forwarding template map(Operator const &_op, Types &&... _iters); iterator &begin(); iterator const &begin() const; iterator const &end() const; }; } template auto map(Operator &&_op, Iter &&... iters) -> details::map< typename std::remove_cv< typename std::remove_reference::type>::type, typename types::iterator::type>::type>::type...>; DEFINE_FUNCTOR(pythonic::builtins, map); } namespace types { template struct len_of> { static constexpr long value = len_of::type>::type>::value; }; template struct len_of> { static constexpr long _head = len_of::type>::type>::value; static constexpr long _tail = len_of>::value; // take the minimal value. If one is negative, it will be automatically // selected static constexpr long value = (_head < _tail ? _head : _tail); }; } PYTHONIC_NS_END /* type inference stuff {*/ #include "pythonic/include/types/combined.hpp" template struct __combined> { using type = typename __combined::value_type>>::type; }; #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/max.hpp000066400000000000000000000010111416264035500240340ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_MAX_HPP #define PYTHONIC_INCLUDE_BUILTIN_MAX_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/operator_/gt.hpp" #include "pythonic/include/builtins/minmax.hpp" PYTHONIC_NS_BEGIN namespace builtins { template auto max(Types &&... values) -> decltype(details::minmax(operator_::functor::lt{}, std::forward(values)...)); DEFINE_FUNCTOR(pythonic::builtins, max); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/min.hpp000066400000000000000000000010101416264035500240310ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_MIN_HPP #define PYTHONIC_INCLUDE_BUILTIN_MIN_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/operator_/gt.hpp" #include "pythonic/include/builtins/minmax.hpp" PYTHONIC_NS_BEGIN namespace builtins { template auto min(Types &&... values) -> decltype(details::minmax(operator_::functor::gt{}, std::forward(values)...)); DEFINE_FUNCTOR(pythonic::builtins, min); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/minmax.hpp000066400000000000000000000014741416264035500245550ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_MINMAX_HPP #define PYTHONIC_INCLUDE_BUILTIN_MINMAX_HPP #include #include "pythonic/include/builtins/pythran/kwonly.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace details { template typename std::decay::type::value_type minmax(Op const &, T &&t); template typename std::decay::type::value_type minmax(Op const &, T &&t, types::kwonly, F key); template typename std::enable_if::value, typename __combined::type>::type minmax(Op const &, T0 const &, T1 const &, Types const &...); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/next.hpp000066400000000000000000000005011416264035500242300ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_NEXT_HPP #define PYTHONIC_INCLUDE_BUILTIN_NEXT_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { template auto next(T &&y) -> decltype(*y); DEFINE_FUNCTOR(pythonic::builtins, next); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/oct.hpp000066400000000000000000000005151416264035500240440ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_OCT_HPP #define PYTHONIC_INCLUDE_BUILTIN_OCT_HPP #include "pythonic/include/types/str.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { template types::str oct(T const &v); DEFINE_FUNCTOR(pythonic::builtins, oct); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/open.hpp000066400000000000000000000006321416264035500242200ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_OPEN_HPP #define PYTHONIC_INCLUDE_BUILTIN_OPEN_HPP #include "pythonic/include/types/file.hpp" #include "pythonic/include/types/str.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { types::file open(types::str const &filename, types::str const &strmode = "r"); DEFINE_FUNCTOR(pythonic::builtins, open); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/ord.hpp000066400000000000000000000004731416264035500240460ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_ORD_HPP #define PYTHONIC_INCLUDE_BUILTIN_ORD_HPP #include "pythonic/include/types/str.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { long ord(types::str const &v); DEFINE_FUNCTOR(pythonic::builtins, ord); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/pow.hpp000066400000000000000000000011751416264035500240670ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_POW_HPP #define PYTHONIC_INCLUDE_BUILTIN_POW_HPP #include "pythonic/include/numpy/power.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { // this is only the case in python if the exponent is negative double pow(long, long); // in that case we are sure we have a positive exponent template long pow(long, std::integral_constant); template auto pow(Types &&... args) -> decltype(numpy::functor::power{}(std::forward(args)...)); DEFINE_FUNCTOR(pythonic::builtins, pow); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/print.hpp000066400000000000000000000007721416264035500244200ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_PRINT_HPP #define PYTHONIC_INCLUDE_BUILTIN_PRINT_HPP #include #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { void print_nonl(); template void print_nonl(T const &value, Types const &... values); void print(); template void print(T const &value, Types const &... values); DEFINE_FUNCTOR(pythonic::builtins, print); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/pythran/000077500000000000000000000000001416264035500242325ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/builtins/pythran/StaticIfBreak.hpp000066400000000000000000000007071416264035500274220ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_PYTHRAN_STATICIFBREAK_HPP #define PYTHONIC_INCLUDE_BUILTIN_PYTHRAN_STATICIFBREAK_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/static_if.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace pythran { template types::StaticIfBreak StaticIfBreak(T const &arg); DEFINE_FUNCTOR(pythonic::builtins::pythran, StaticIfBreak); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/pythran/StaticIfCont.hpp000066400000000000000000000007021416264035500272740ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_PYTHRAN_STATICIFCONT_HPP #define PYTHONIC_INCLUDE_BUILTIN_PYTHRAN_STATICIFCONT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/static_if.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace pythran { template types::StaticIfCont StaticIfCont(T const &arg); DEFINE_FUNCTOR(pythonic::builtins::pythran, StaticIfCont); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/pythran/StaticIfNoReturn.hpp000066400000000000000000000007261416264035500301530ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_PYTHRAN_STATICIFNORETURN_HPP #define PYTHONIC_INCLUDE_BUILTIN_PYTHRAN_STATICIFNORETURN_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/static_if.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace pythran { template types::StaticIfNoReturn StaticIfNoReturn(T const &arg); DEFINE_FUNCTOR(pythonic::builtins::pythran, StaticIfNoReturn); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/pythran/StaticIfReturn.hpp000066400000000000000000000007131416264035500276520ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_PYTHRAN_STATICIFRETURN_HPP #define PYTHONIC_INCLUDE_BUILTIN_PYTHRAN_STATICIFRETURN_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/static_if.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace pythran { template types::StaticIfReturn StaticIfReturn(T const &arg); DEFINE_FUNCTOR(pythonic::builtins::pythran, StaticIfReturn); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/pythran/abssqr.hpp000066400000000000000000000012561416264035500262420ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_PYTHRAN_ABSSQR_HPP #define PYTHONIC_INCLUDE_BUILTIN_PYTHRAN_ABSSQR_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace pythran { namespace details { template T abssqr(T const &v); template T abssqr(std::complex const &v); } #define NUMPY_NARY_FUNC_NAME abssqr #define NUMPY_NARY_FUNC_SYM details::abssqr #include "pythonic/include/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/pythran/and_.hpp000066400000000000000000000007351416264035500256510ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_PYTHRAN_AND_HPP #define PYTHONIC_INCLUDE_BUILTIN_PYTHRAN_AND_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/combined.hpp" #include "pythonic/include/types/lazy.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace pythran { template types::lazy_combined_t and_(T0 &&, T1 &&); DEFINE_FUNCTOR(pythonic::builtins::pythran, and_); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/pythran/is_none.hpp000066400000000000000000000043611416264035500264010ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_PYTHRAN_IS_NONE_HPP #define PYTHONIC_INCLUDE_BUILTIN_PYTHRAN_IS_NONE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/NoneType.hpp" PYTHONIC_NS_BEGIN namespace types { struct false_type; struct true_type { operator bool() const { return true; } false_type operator!() const; true_type operator&(true_type) const; false_type operator&(false_type) const; true_type operator|(true_type) const; true_type operator|(false_type) const; true_type operator==(true_type) const; false_type operator==(false_type) const; }; struct false_type { operator bool() const { return false; } true_type operator!() const { return {}; } false_type operator&(true_type) { return {}; } false_type operator&(false_type) { return {}; } true_type operator|(true_type) { return {}; } false_type operator|(false_type) { return {}; } false_type operator==(true_type) { return {}; } true_type operator==(false_type) { return {}; } }; false_type true_type::operator!() const { return {}; } true_type true_type::operator&(true_type) const { return {}; } false_type true_type::operator&(false_type) const { return {}; } true_type true_type::operator|(true_type) const { return {}; } true_type true_type::operator|(false_type) const { return {}; } true_type true_type::operator==(true_type) const { return {}; } false_type true_type::operator==(false_type) const { return {}; } } namespace builtins { namespace pythran { template types::false_type is_none(T const &) { return {}; }; template bool is_none(types::none const &n) { return n.is_none; }; types::true_type is_none(types::none_type const &) { return {}; }; DEFINE_FUNCTOR(pythonic::builtins::pythran, is_none); } } #ifdef ENABLE_PYTHON_MODULE template <> struct to_python : to_python { }; template <> struct to_python : to_python { }; #endif PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/pythran/kwonly.hpp000066400000000000000000000006251416264035500262710ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_PYTHRAN_KWONLY_HPP #define PYTHONIC_INCLUDE_BUILTIN_PYTHRAN_KWONLY_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace types { struct kwonly { }; } namespace builtins { namespace pythran { types::kwonly kwonly() { return {}; }; DEFINE_FUNCTOR(pythonic::builtins::pythran, kwonly); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/pythran/len_set.hpp000066400000000000000000000005701416264035500263760ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_PYTHRAN_LEN_SET_HPP #define PYTHONIC_INCLUDE_BUILTIN_PYTHRAN_LEN_SET_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace pythran { template long len_set(Iterable const &s); DEFINE_FUNCTOR(pythonic::builtins::pythran, len_set); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/pythran/make_shape.hpp000066400000000000000000000005601416264035500270410ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_MAKE_SHAPE_HPP #define PYTHONIC_INCLUDE_BUILTIN_MAKE_SHAPE_HPP PYTHONIC_NS_BEGIN namespace builtins { namespace pythran { template pythonic::types::pshape make_shape(Args... args); DEFINE_FUNCTOR(pythonic::builtins::pythran, make_shape); } // pythran } // builtins PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/pythran/or_.hpp000066400000000000000000000007311416264035500255230ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_PYTHRAN_OR_HPP #define PYTHONIC_INCLUDE_BUILTIN_PYTHRAN_OR_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/combined.hpp" #include "pythonic/include/types/lazy.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace pythran { template types::lazy_combined_t or_(T0 &&, T1 &&); DEFINE_FUNCTOR(pythonic::builtins::pythran, or_); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/pythran/static_if.hpp000066400000000000000000000042711416264035500267140ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_PYTHRAN_STATIC_IF_HPP #define PYTHONIC_INCLUDE_BUILTIN_PYTHRAN_STATIC_IF_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/builtins/pythran/is_none.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace pythran { namespace details { template struct static_if; template <> struct static_if { static_if(types::true_type) { } template F0 operator()(F0 f0, F1 f1) { return f0; } }; template <> struct static_if { static_if(types::false_type) { } template F1 operator()(F0 f0, F1 f1) { return f1; } }; template <> struct static_if { bool state_; static_if(bool state) : state_(state) { } template struct merged { bool state_; F0 f0; F1 f1; merged(bool state, F0 f0, F1 f1) : state_(state), f0(f0), f1(f1) { } template auto operator()(Args &&... args) const -> typename __combined< decltype(f0(std::forward(args)...)), decltype(f1(std::forward(args)...))>::type { if (state_) return f0(std::forward(args)...); else return f1(std::forward(args)...); } }; template merged operator()(F0 f0, F1 f1) { return {state_, f0, f1}; } }; } template auto static_if(T const &cond, F0 f0, F1 f1) -> decltype(details::static_if{cond}(f0, f1)); template auto static_if(int const &cond, F0 f0, F1 f1) -> decltype(details::static_if{(bool)cond}(f0, f1)) { return details::static_if{(bool)cond}(f0, f1); } DEFINE_FUNCTOR(pythonic::builtins::pythran, static_if); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/pythran/static_list.hpp000066400000000000000000000023551416264035500272720ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_PYTHRAN_STATIC_LIST_HPP #define PYTHONIC_INCLUDE_BUILTIN_PYTHRAN_STATIC_LIST_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/builtins/list.hpp" #include "pythonic/include/types/tuple.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace pythran { types::empty_list static_list(std::tuple<> const &other) { return {}; } template types::static_list static_list(types::array const &other); template types::static_list static_list(types::array &other); template types::static_list static_list(types::array &&other); template auto static_list(T &&other) -> decltype( pythonic::builtins::functor::list{}(std::forward(other))); template types::static_list::type, 1 + sizeof...(Tys)> static_list(std::tuple const &other) { return static_list( types::to_array::type>(other)); } DEFINE_FUNCTOR(pythonic::builtins::pythran, static_list); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/range.hpp000066400000000000000000000040451416264035500243550ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_RANGE_HPP #define PYTHONIC_INCLUDE_BUILTIN_RANGE_HPP #include "pythonic/include/types/list.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace { struct range_iterator : std::iterator { long value_; long step_; range_iterator() = default; range_iterator(long v, long s); long operator*() const; range_iterator &operator++(); range_iterator &operator--(); range_iterator operator++(int); range_iterator operator--(int); range_iterator &operator+=(long n); range_iterator &operator-=(long n); bool operator!=(range_iterator const &other) const; bool operator==(range_iterator const &other) const; bool operator<(range_iterator const &other) const; long operator-(range_iterator const &other) const; }; } struct range { using value_type = long; using iterator = range_iterator; using const_iterator = range_iterator; using reverse_iterator = range_iterator; using const_reverse_iterator = range_iterator; using dtype = long; static constexpr long value = 1; long begin_; long end_; long step_; range() = default; range(long b, long e, long s = 1); range(long e); iterator begin() const; iterator end() const; reverse_iterator rbegin() const; reverse_iterator rend() const; long size() const; long operator[](long i) const; }; DEFINE_FUNCTOR(pythonic::builtins, range); } PYTHONIC_NS_END namespace std { template long get(pythonic::builtins::range const &); template struct tuple_element { typedef long type; }; } /* type inference stuff {*/ #include "pythonic/include/types/combined.hpp" template struct __combined { using type = typename __combined>::type; }; #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/reduce.hpp000066400000000000000000000026071416264035500245320ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_REDUCE_HPP #define PYTHONIC_INCLUDE_BUILTIN_REDUCE_HPP #include "pythonic/include/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace builtins { template auto reduce(Operator op, Iterable s) -> decltype(op(std::declval::value_type>(), std::declval::value_type>())); // this convoluted expression computes the fixed-point type of the output // it's required because, e.g. static_list + static_list // returns array // and this widens to list template using reduce_helper_t = typename __combined< T, decltype(std::declval()( std::declval(), std::declval::value_type>()))>::type; template auto reduce(Operator op, Iterable s, T const &init) -> decltype(std::accumulate( s.begin(), s.end(), static_cast>(init), op)); DEFINE_FUNCTOR(pythonic::builtins, reduce); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/reversed.hpp000066400000000000000000000014761416264035500251050ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_REVERSED_HPP #define PYTHONIC_INCLUDE_BUILTIN_REVERSED_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace details { template struct reversed { using value_type = typename Iterable::value_type; using iterator = typename Iterable::reverse_iterator; using const_iterator = typename Iterable::const_reverse_iterator; Iterable iterable; reversed(); reversed(Iterable const &iterable); iterator begin(); iterator end(); const_iterator begin() const; const_iterator end() const; }; } template details::reversed reversed(Iterable const &iterable); DEFINE_FUNCTOR(pythonic::builtins, reversed); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/round.hpp000066400000000000000000000005431416264035500244070ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_ROUND_HPP #define PYTHONIC_INCLUDE_BUILTIN_ROUND_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { template double round(T const &v, size_t n); template double round(T const &v); DEFINE_FUNCTOR(pythonic::builtins, round); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/set.hpp000066400000000000000000000010401416264035500240440ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_SET_HPP #define PYTHONIC_INCLUDE_BUILTIN_SET_HPP #include "pythonic/include/types/set.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace anonymous { inline types::empty_set set(); template inline types::set::type::iterator>::value_type> set(Iterable &&t); } DEFINE_FUNCTOR(pythonic::builtins::anonymous, set); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/set/000077500000000000000000000000001416264035500233405ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/builtins/set/add.hpp000066400000000000000000000012031416264035500245750ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_SET_ADD_HPP #define PYTHONIC_INCLUDE_BUILTIN_SET_ADD_HPP #include "pythonic/include/types/set.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { template types::none_type add(types::set &s, F const &value); template types::none_type add(types::set &&s, F const &value); template types::none_type add(types::empty_set const &s, F &&value); DEFINE_FUNCTOR(pythonic::builtins::set, add); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/set/clear.hpp000066400000000000000000000005331416264035500251400ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_SET_CLEAR_HPP #define PYTHONIC_INCLUDE_BUILTIN_SET_CLEAR_HPP #include "pythonic/include/__dispatch__/clear.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { USING_FUNCTOR(clear, pythonic::__dispatch__::functor::clear); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/set/copy.hpp000066400000000000000000000005261416264035500250260ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_SET_COPY_HPP #define PYTHONIC_INCLUDE_BUILTIN_SET_COPY_HPP #include "pythonic/include/__dispatch__/copy.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { USING_FUNCTOR(copy, pythonic::__dispatch__::functor::copy); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/set/difference.hpp000066400000000000000000000017101416264035500261420ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_SET_DIFFERENCE_HPP #define PYTHONIC_INCLUDE_BUILTIN_SET_DIFFERENCE_HPP #include "pythonic/include/types/set.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { template types::set difference(types::set const &set, Types const &... others); template types::set difference(types::set &&set, Types const &... others); template types::empty_set difference(types::empty_set const &set, Types const &... others); template types::set difference(types::set const &set); template types::set difference(types::set &&set); types::empty_set difference(types::empty_set const &set); DEFINE_FUNCTOR(pythonic::builtins::set, difference); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/set/difference_update.hpp000066400000000000000000000015451416264035500275120ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_SET_DIFFERENCEUPDATE_HPP #define PYTHONIC_INCLUDE_BUILTIN_SET_DIFFERENCEUPDATE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/set.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { template types::none_type difference_update(types::set &set, Types const &... others); template types::none_type difference_update(types::set &&set, Types const &... others); template types::none_type difference_update(types::empty_set const &set, Types const &... others); DEFINE_FUNCTOR(pythonic::builtins::set, difference_update); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/set/discard.hpp000066400000000000000000000011171416264035500254620ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_SET_DISCARD_HPP #define PYTHONIC_INCLUDE_BUILTIN_SET_DISCARD_HPP #include "pythonic/include/types/set.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { template void discard(types::set &set, U const &elem); template void discard(types::set &&set, U const &elem); template void discard(types::empty_set const &set, U const &elem); DEFINE_FUNCTOR(pythonic::builtins::set, discard); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/set/intersection.hpp000066400000000000000000000015521416264035500265620ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_SET_INTERSECTION_HPP #define PYTHONIC_INCLUDE_BUILTIN_SET_INTERSECTION_HPP #include "pythonic/include/types/set.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { template typename __combined, Types...>::type intersection(types::set const &set, Types const &... others); /* No rvalue overload possible because of return type modification.: * >>> a = set([1,2,3]) * >>> b = set([1., 2., 3.]) * >>> a.intersection(b) * set([1.0, 2.0, 3.0]) */ template types::empty_set intersection(types::empty_set const &set, Types const &... others); DEFINE_FUNCTOR(pythonic::builtins::set, intersection); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/set/intersection_update.hpp000066400000000000000000000015621416264035500301250ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_SET_INTERSECTIONUPDATE_HPP #define PYTHONIC_INCLUDE_BUILTIN_SET_INTERSECTIONUPDATE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/set.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { template types::none_type intersection_update(types::set &set, Types const &... others); template types::none_type intersection_update(types::set &&set, Types const &... others); template types::none_type intersection_update(types::empty_set &&set, Types const &... others); DEFINE_FUNCTOR(pythonic::builtins::set, intersection_update); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/set/isdisjoint.hpp000066400000000000000000000010431416264035500262260ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_SET_ISDISJOINT_HPP #define PYTHONIC_INCLUDE_BUILTIN_SET_ISDISJOINT_HPP #include "pythonic/include/types/set.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { template bool isdisjoint(types::set const &calling_set, U const &arg_set); template bool isdisjoint(types::empty_set const &calling_set, U const &arg_set); DEFINE_FUNCTOR(pythonic::builtins::set, isdisjoint); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/set/issubset.hpp000066400000000000000000000010061416264035500257070ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_SET_ISSUBSET_HPP #define PYTHONIC_INCLUDE_BUILTIN_SET_ISSUBSET_HPP #include "pythonic/include/types/set.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { template bool issubset(types::set const &set, U const &other); template bool issubset(types::empty_set const &set, U const &other); DEFINE_FUNCTOR(pythonic::builtins::set, issubset); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/set/issuperset.hpp000066400000000000000000000010201416264035500262500ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_SET_ISSUPERSET_HPP #define PYTHONIC_INCLUDE_BUILTIN_SET_ISSUPERSET_HPP #include "pythonic/include/types/set.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { template bool issuperset(types::set const &set, U const &other); template bool issuperset(types::empty_set const &set, U const &other); DEFINE_FUNCTOR(pythonic::builtins::set, issuperset); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/set/remove.hpp000066400000000000000000000005401416264035500253450ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_SET_REMOVE_HPP #define PYTHONIC_INCLUDE_BUILTIN_SET_REMOVE_HPP #include "pythonic/include/__dispatch__/remove.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { USING_FUNCTOR(remove, pythonic::__dispatch__::functor::remove); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/set/symmetric_difference.hpp000066400000000000000000000016601416264035500302420ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_SET_SYMMETRICDIFFERENCE_HPP #define PYTHONIC_INCLUDE_BUILTIN_SET_SYMMETRICDIFFERENCE_HPP #include "pythonic/include/types/set.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { template typename __combined, U>::type symmetric_difference(types::set const &set, U const &other); /* No rvalue overload possible because of return type modification.: * >>> a = set([1, 2, 3]) * >>> b = set([2., 3., 4.]) * >>> a.symmetric_difference(b) * set([1.0, 4.0]) */ // combiner is used as other may be list but return is a set template typename __combined::type symmetric_difference(types::empty_set const &set, U const &other); DEFINE_FUNCTOR(pythonic::builtins::set, symmetric_difference); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/set/symmetric_difference_update.hpp000066400000000000000000000016151416264035500316040ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_SET_SYMMETRICDIFFERENCEUPDATE_HPP #define PYTHONIC_INCLUDE_BUILTIN_SET_SYMMETRICDIFFERENCEUPDATE_HPP #include "pythonic/include/types/set.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { template types::none_type symmetric_difference_update(types::set &set, U const &other); template types::none_type symmetric_difference_update(types::set &&set, U const &other); template types::none_type symmetric_difference_update(types::empty_set const &set, U const &other); DEFINE_FUNCTOR(pythonic::builtins::set, symmetric_difference_update); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/set/union_.hpp000066400000000000000000000015721416264035500253450ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_SET_UNION_HPP #define PYTHONIC_INCLUDE_BUILTIN_SET_UNION_HPP #include "pythonic/include/types/set.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { template typename __combined, Types...>::type union_(types::set const &set, Types const &... others); template typename __combined::type union_(types::empty_set const &init, Types const &... others); template types::set union_(types::set const &set); template typename __combined::type union_(T const &set); types::empty_set union_(types::empty_set const &init); DEFINE_FUNCTOR(pythonic::builtins::set, union_); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/set/update.hpp000066400000000000000000000005401416264035500253320ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_SET_UPDATE_HPP #define PYTHONIC_INCLUDE_BUILTIN_SET_UPDATE_HPP #include "pythonic/include/__dispatch__/update.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace set { USING_FUNCTOR(update, pythonic::__dispatch__::functor::update); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/slice.hpp000066400000000000000000000011651416264035500243600ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_SLICE_HPP #define PYTHONIC_INCLUDE_BUILTIN_SLICE_HPP #include "pythonic/include/types/slice.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace anonymous { types::contiguous_slice slice(types::none stop); types::contiguous_slice slice(types::none start, types::none stop); types::slice slice(types::none start, types::none stop, types::none step); } DEFINE_FUNCTOR(pythonic::builtins::anonymous, slice); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/sorted.hpp000066400000000000000000000017271416264035500245650ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_SORTED_HPP #define PYTHONIC_INCLUDE_BUILTIN_SORTED_HPP #include "pythonic/include/types/list.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { template types::list::type::iterator>::value_type>::type> sorted(Iterable &&seq); template types::list::type::iterator>::value_type>::type> sorted(Iterable &&seq, Key const &key, bool reverse = false); template types::list::type::iterator>::value_type>::type> sorted(Iterable &&seq, types::none_type const &key, bool reverse = false); DEFINE_FUNCTOR(pythonic::builtins, sorted); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/str.hpp000066400000000000000000000007531416264035500240730ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_STR_HPP #define PYTHONIC_INCLUDE_BUILTIN_STR_HPP #include "pythonic/include/types/str.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace anonymous { template types::str str(T const &t); inline types::str str(bool b); inline types::str str(long value); inline types::str str(double l); } DEFINE_FUNCTOR(pythonic::builtins::anonymous, str); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/str/000077500000000000000000000000001416264035500233555ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/builtins/str/__mod__.hpp000066400000000000000000000011511416264035500254370ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_STR_MOD_HPP #define PYTHONIC_INCLUDE_BUILTIN_STR_MOD_HPP #include "pythonic/include/types/str.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { template types::str __mod__(types::str const &, T const &arg); template types::str __mod__(types::str const &, std::tuple const &args); template types::str __mod__(types::str const &, types::array const &args); DEFINE_FUNCTOR(pythonic::builtins::str, __mod__); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/str/capitalize.hpp000066400000000000000000000006071416264035500262160ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_STR_CAPITALIZE_HPP #define PYTHONIC_INCLUDE_BUILTIN_STR_CAPITALIZE_HPP #include "pythonic/include/types/str.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { types::str capitalize(types::str const &s); DEFINE_FUNCTOR(pythonic::builtins::str, capitalize); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/str/count.hpp000066400000000000000000000005331416264035500252170ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_STR_COUNT_HPP #define PYTHONIC_INCLUDE_BUILTIN_STR_COUNT_HPP #include "pythonic/include/__dispatch__/count.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { USING_FUNCTOR(count, pythonic::__dispatch__::functor::count); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/str/endswith.hpp000066400000000000000000000010021416264035500257040ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_STR_ENDSWITH_HPP #define PYTHONIC_INCLUDE_BUILTIN_STR_ENDSWITH_HPP #include "pythonic/include/types/str.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { // TODO : Add implementation for tuple as first argument. bool endswith(types::str const &s, types::str const &suffix, long start = 0, long end = -1); DEFINE_FUNCTOR(pythonic::builtins::str, endswith); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/str/find.hpp000066400000000000000000000010561416264035500250100ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_STR_FIND_HPP #define PYTHONIC_INCLUDE_BUILTIN_STR_FIND_HPP #include "pythonic/include/types/str.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { long find(types::str const &s, types::str const &value, long start, long end); long find(types::str const &s, types::str const &value, long start); long find(types::str const &s, types::str const &value); DEFINE_FUNCTOR(pythonic::builtins::str, find); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/str/isalpha.hpp000066400000000000000000000005651416264035500255150ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_STR_ISALPHA_HPP #define PYTHONIC_INCLUDE_BUILTIN_STR_ISALPHA_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/str.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { bool isalpha(types::str const &s); DEFINE_FUNCTOR(pythonic::builtins::str, isalpha); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/str/isdigit.hpp000066400000000000000000000005651416264035500255300ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_STR_ISDIGIT_HPP #define PYTHONIC_INCLUDE_BUILTIN_STR_ISDIGIT_HPP #include "pythonic/include/types/str.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { bool isdigit(types::str const &s); DEFINE_FUNCTOR(pythonic::builtins::str, isdigit); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/str/join.hpp000066400000000000000000000026541416264035500250340ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_STR_JOIN_HPP #define PYTHONIC_INCLUDE_BUILTIN_STR_JOIN_HPP #include "pythonic/include/types/str.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { /* Join for string.join(string) */ template types::str join(S const &s, types::str const &iterable); /* Join for string.join(random acces iter but ! on string) */ template typename std::enable_if< !std::is_same::type>::type, types::str>::value && std::is_same< typename std::iterator_traits::type::iterator>::iterator_category, std::random_access_iterator_tag>::value, types::str>::type join(S const &s, Iterable &&iterable); /* Join for string.join(forward iterator) */ template typename std::enable_if< !std::is_same< typename std::iterator_traits::type::iterator>::iterator_category, std::random_access_iterator_tag>::value, types::str>::type join(S const &s, Iterable &&iterable); DEFINE_FUNCTOR(pythonic::builtins::str, join); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/str/lower.hpp000066400000000000000000000005631416264035500252220ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_STR_LOWER_HPP #define PYTHONIC_INCLUDE_BUILTIN_STR_LOWER_HPP #include "pythonic/include/types/str.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { types::str lower(types::str const &s); DEFINE_FUNCTOR(pythonic::builtins::str, lower); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/str/lstrip.hpp000066400000000000000000000006311416264035500254030ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_STR_LSTRIP_HPP #define PYTHONIC_INCLUDE_BUILTIN_STR_LSTRIP_HPP #include "pythonic/include/types/str.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { types::str lstrip(types::str const &self, types::str const &to_del = " "); DEFINE_FUNCTOR(pythonic::builtins::str, lstrip); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/str/replace.hpp000066400000000000000000000010311416264035500254740ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_STR_REPLACE_HPP #define PYTHONIC_INCLUDE_BUILTIN_STR_REPLACE_HPP #include "pythonic/include/types/str.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { types::str replace(types::str const &self, types::str const &old_pattern, types::str const &new_pattern, long count = std::numeric_limits::max()); DEFINE_FUNCTOR(pythonic::builtins::str, replace); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/str/rstrip.hpp000066400000000000000000000006321416264035500254120ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_STR_RSTRIP_HPP #define PYTHONIC_INCLUDE_BUILTIN_STR_RSTRIP_HPP #include "pythonic/include/types/str.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { types::str rstrip(types::str const &self, types::str const &to_del = " "); DEFINE_FUNCTOR(pythonic::builtins::str, rstrip); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/str/split.hpp000066400000000000000000000013301416264035500252160ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_STR_SPLIT_HPP #define PYTHONIC_INCLUDE_BUILTIN_STR_SPLIT_HPP #include "pythonic/include/types/list.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/str.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { types::list split(types::str const &in, types::str const &sep, long maxsplit = -1); types::list split(types::str const &s, types::none_type const & = {}, long maxsplit = -1); DEFINE_FUNCTOR(pythonic::builtins::str, split); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/str/startswith.hpp000066400000000000000000000007161416264035500263060ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_STR_STARTSWITH_HPP #define PYTHONIC_INCLUDE_BUILTIN_STR_STARTSWITH_HPP #include "pythonic/include/types/str.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { bool startswith(types::str const &s, types::str const &prefix, long start = 0, long end = -1); DEFINE_FUNCTOR(pythonic::builtins::str, startswith); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/str/strip.hpp000066400000000000000000000006301416264035500252260ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_STR_STRIP_HPP #define PYTHONIC_INCLUDE_BUILTIN_STR_STRIP_HPP #include "pythonic/include/types/str.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { types::str strip(types::str const &self, types::str const &to_del = " \n"); DEFINE_FUNCTOR(pythonic::builtins::str, strip); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/str/upper.hpp000066400000000000000000000005631416264035500252250ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_STR_UPPER_HPP #define PYTHONIC_INCLUDE_BUILTIN_STR_UPPER_HPP #include "pythonic/include/types/str.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace str { types::str upper(types::str const &s); DEFINE_FUNCTOR(pythonic::builtins::str, upper); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/sum.hpp000066400000000000000000000026771416264035500240760ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_SUM_HPP #define PYTHONIC_INCLUDE_BUILTIN_SUM_HPP #include "pythonic/include/types/assignable.hpp" #include "pythonic/include/types/tuple.hpp" #include "pythonic/include/utils/int_.hpp" #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { namespace details { template struct tuple_sum { auto operator()(Tuple const &t) -> decltype(std::get(t) + tuple_sum()(t)); }; template struct tuple_sum { auto operator()(Tuple const &t) -> decltype(std::get<0>(t)); }; } template auto sum(Iterable s, T start) -> decltype(std::accumulate( s.begin(), s.end(), static_cast::type>( start))); template auto sum(Iterable s) -> decltype(sum(s, 0L)) { return sum(s, 0L); } template auto sum(std::tuple const &t) -> decltype( details::tuple_sum, sizeof...(Types)-1>()(t)) { return details::tuple_sum, sizeof...(Types)-1>()(t); } template T sum(types::array_base const &t) { return details::tuple_sum, N - 1>()(t); } DEFINE_FUNCTOR(pythonic::builtins, sum); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/tuple.hpp000066400000000000000000000032631416264035500244130ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_TUPLE_HPP #define PYTHONIC_INCLUDE_BUILTIN_TUPLE_HPP #include "pythonic/include/types/tuple.hpp" #include "pythonic/include/types/dynamic_tuple.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace builtins { template std::tuple tuple(std::tuple const &t); template /* this is far from perfect, but how to cope with the difference between python tuples && c++ ones ? */ typename std::enable_if < types::len_of::type>::type>:: value<0, types::dynamic_tuple::type>::type::iterator>::value_type>>::type tuple(Iterable &&i); template < class StaticIterable> /* specialization if we are capable to statically compute the size of the input */ typename std::enable_if< types::len_of::type>::type>::value >= 0, types::array< typename std::iterator_traits< typename std::remove_cv::type>::type::iterator>::value_type, types::len_of::type>::type>::value>>::type tuple(StaticIterable &&i); DEFINE_FUNCTOR(pythonic::builtins, tuple); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/type.hpp000066400000000000000000000004701416264035500242400ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_TYPE_HPP #define PYTHONIC_INCLUDE_BUILTIN_TYPE_HPP PYTHONIC_NS_BEGIN namespace builtins { template struct type_functor; template typename type_functor::type type(T const &t); DEFINE_FUNCTOR(pythonic::builtins, type); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/xrange.hpp000066400000000000000000000026421416264035500245460ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_XRANGE_HPP #define PYTHONIC_INCLUDE_BUILTIN_XRANGE_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace builtins { namespace { struct xrange_iterator : std::iterator { long value_; long step_; xrange_iterator() = default; xrange_iterator(long v, long s); long operator*() const; xrange_iterator &operator++(); xrange_iterator operator++(int); xrange_iterator &operator+=(long n); bool operator!=(xrange_iterator const &other) const; bool operator==(xrange_iterator const &other) const; bool operator<(xrange_iterator const &other) const; long operator-(xrange_iterator const &other) const; }; } struct xrange { using value_type = long; using iterator = xrange_iterator; using const_iterator = xrange_iterator; using reverse_iterator = xrange_iterator; using const_reverse_iterator = xrange_iterator; long begin_; long end_; long step_; xrange() = default; xrange(long b, long e, long s = 1); xrange(long e); iterator begin() const; iterator end() const; reverse_iterator rbegin() const; reverse_iterator rend() const; }; DEFINE_FUNCTOR(pythonic::builtins, xrange); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/builtins/zip.hpp000066400000000000000000000006371416264035500240660ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_BUILTIN_ZIP_HPP #define PYTHONIC_INCLUDE_BUILTIN_ZIP_HPP #include "pythonic/include/builtins/None.hpp" #include "pythonic/include/builtins/map.hpp" PYTHONIC_NS_BEGIN namespace builtins { template auto zip(Iter &&... iters) -> decltype(map(builtins::None, std::forward(iters)...)); DEFINE_FUNCTOR(pythonic::builtins, zip); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/cmath/000077500000000000000000000000001416264035500220105ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/cmath/acos.hpp000066400000000000000000000004441416264035500234500ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_CMATH_ACOS_HPP #define PYTHONIC_INCLUDE_CMATH_ACOS_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { DEFINE_FUNCTOR_2(acos, std::acos); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/cmath/acosh.hpp000066400000000000000000000004501416264035500236150ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_CMATH_ACOSH_HPP #define PYTHONIC_INCLUDE_CMATH_ACOSH_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { DEFINE_FUNCTOR_2(acosh, std::acosh); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/cmath/asin.hpp000066400000000000000000000004441416264035500234550ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_CMATH_ASIN_HPP #define PYTHONIC_INCLUDE_CMATH_ASIN_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { DEFINE_FUNCTOR_2(asin, std::asin); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/cmath/asinh.hpp000066400000000000000000000004501416264035500236220ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_CMATH_ASINH_HPP #define PYTHONIC_INCLUDE_CMATH_ASINH_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { DEFINE_FUNCTOR_2(asinh, std::asinh); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/cmath/atan.hpp000066400000000000000000000004441416264035500234460ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_CMATH_ATAN_HPP #define PYTHONIC_INCLUDE_CMATH_ATAN_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { DEFINE_FUNCTOR_2(atan, std::atan); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/cmath/atanh.hpp000066400000000000000000000004501416264035500236130ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_CMATH_ATANH_HPP #define PYTHONIC_INCLUDE_CMATH_ATANH_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { DEFINE_FUNCTOR_2(atanh, std::atanh); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/cmath/cos.hpp000066400000000000000000000006441416264035500233110ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_CMATH_COS_HPP #define PYTHONIC_INCLUDE_CMATH_COS_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { template std::complex cos(std::complex const &v); template std::complex cos(T const &v); DEFINE_FUNCTOR(pythonic::cmath, cos); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/cmath/cosh.hpp000066400000000000000000000004441416264035500234570ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_CMATH_COSH_HPP #define PYTHONIC_INCLUDE_CMATH_COSH_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { DEFINE_FUNCTOR_2(cosh, std::cosh); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/cmath/e.hpp000066400000000000000000000004251416264035500227460ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_CMATH_E_HPP #define PYTHONIC_INCLUDE_CMATH_E_HPP #include "pythonic/utils/functor.hpp" #include "pythonic/include/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { double constexpr e = std::exp(1); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/cmath/exp.hpp000066400000000000000000000004401416264035500233130ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_CMATH_EXP_HPP #define PYTHONIC_INCLUDE_CMATH_EXP_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { DEFINE_FUNCTOR_2(exp, std::exp); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/cmath/isinf.hpp000066400000000000000000000004501416264035500236300ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_CMATH_ISINF_HPP #define PYTHONIC_INCLUDE_CMATH_ISINF_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { DEFINE_FUNCTOR_2(isinf, std::isinf); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/cmath/isnan.hpp000066400000000000000000000004501416264035500236300ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_CMATH_ISNAN_HPP #define PYTHONIC_INCLUDE_CMATH_ISNAN_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { DEFINE_FUNCTOR_2(isnan, std::isnan); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/cmath/log.hpp000066400000000000000000000005341416264035500233040ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_CMATH_LOG_HPP #define PYTHONIC_INCLUDE_CMATH_LOG_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { using std::log; double log(double x, double base); DEFINE_FUNCTOR(pythonic::cmath, log); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/cmath/log10.hpp000066400000000000000000000004501416264035500234420ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_CMATH_LOG10_HPP #define PYTHONIC_INCLUDE_CMATH_LOG10_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { DEFINE_FUNCTOR_2(log10, std::log10); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/cmath/pi.hpp000066400000000000000000000004451416264035500231340ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_CMATH_PI_HPP #define PYTHONIC_INCLUDE_CMATH_PI_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { double constexpr pi = std::atan(1) * 4; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/cmath/sin.hpp000066400000000000000000000004401416264035500233100ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_CMATH_SIN_HPP #define PYTHONIC_INCLUDE_CMATH_SIN_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { DEFINE_FUNCTOR_2(sin, std::sin); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/cmath/sinh.hpp000066400000000000000000000004441416264035500234640ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_CMATH_SINH_HPP #define PYTHONIC_INCLUDE_CMATH_SINH_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { DEFINE_FUNCTOR_2(sinh, std::sinh); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/cmath/sqrt.hpp000066400000000000000000000004441416264035500235140ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_CMATH_SQRT_HPP #define PYTHONIC_INCLUDE_CMATH_SQRT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { DEFINE_FUNCTOR_2(sqrt, std::sqrt); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/cmath/tan.hpp000066400000000000000000000004401416264035500233010ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_CMATH_TAN_HPP #define PYTHONIC_INCLUDE_CMATH_TAN_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { DEFINE_FUNCTOR_2(tan, std::tan); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/cmath/tanh.hpp000066400000000000000000000004441416264035500234550ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_CMATH_TANH_HPP #define PYTHONIC_INCLUDE_CMATH_TANH_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/complex.hpp" #include PYTHONIC_NS_BEGIN namespace cmath { DEFINE_FUNCTOR_2(tanh, std::tanh); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/functools/000077500000000000000000000000001416264035500227305ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/functools/partial.hpp000066400000000000000000000036731416264035500251060ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_FUNCTOOLS_PARTIAL_HPP #define PYTHONIC_INCLUDE_FUNCTOOLS_PARTIAL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/seq.hpp" #include #include PYTHONIC_NS_BEGIN namespace functools { namespace details { /* a task that captures its environment for later call */ template struct task { using callable = void; friend std::ostream &operator<<(std::ostream &os, task) { return os << "partial_function_wrapper"; } mutable std::tuple closure; // closure associated to // the task, mutable // because pythran assumes // all function calls are // const task(); task(task const &) = default; task(ClosureTypes const &... types); template auto call(utils::index_sequence, Types &&... types) const -> decltype(std::get<0>(closure)(std::get(closure)..., std::forward(types)...)) { return std::get<0>(closure)(std::get(closure)..., std::forward(types)...); } template auto operator()(Types &&... types) const -> decltype( this->call(utils::make_index_sequence(), std::forward(types)...)); }; } template // remove references as closure capture the env by copy details::task::type>::type...> partial(Types &&... types); DEFINE_FUNCTOR(pythonic::functools, partial); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/functools/reduce.hpp000066400000000000000000000004631416264035500247130ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_FUNCTOOLS_REDUCE_HPP #define PYTHONIC_INCLUDE_FUNCTOOLS_REDUCE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/builtins/reduce.hpp" PYTHONIC_NS_BEGIN namespace functools { USING_FUNCTOR(reduce, builtins::functor::reduce); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/io/000077500000000000000000000000001416264035500213235ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/io/_io/000077500000000000000000000000001416264035500220715ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/io/_io/TextIOWrapper/000077500000000000000000000000001416264035500246065ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/io/_io/TextIOWrapper/close.hpp000066400000000000000000000005361416264035500264300ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_CLOSE_HPP #define PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_CLOSE_HPP #include "pythonic/include/builtins/file/close.hpp" PYTHONIC_NS_BEGIN namespace io { namespace _io { namespace TextIOWrapper { USING_FUNCTOR(close, builtins::file::functor::close); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/io/_io/TextIOWrapper/fileno.hpp000066400000000000000000000005431416264035500265750ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_FILENO_HPP #define PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_FILENO_HPP #include "pythonic/include/builtins/file/fileno.hpp" PYTHONIC_NS_BEGIN namespace io { namespace _io { namespace TextIOWrapper { USING_FUNCTOR(fileno, builtins::file::functor::fileno); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/io/_io/TextIOWrapper/flush.hpp000066400000000000000000000005361416264035500264440ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_FLUSH_HPP #define PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_FLUSH_HPP #include "pythonic/include/builtins/file/flush.hpp" PYTHONIC_NS_BEGIN namespace io { namespace _io { namespace TextIOWrapper { USING_FUNCTOR(flush, builtins::file::functor::flush); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/io/_io/TextIOWrapper/isatty.hpp000066400000000000000000000005431416264035500266360ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_ISATTY_HPP #define PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_ISATTY_HPP #include "pythonic/include/builtins/file/isatty.hpp" PYTHONIC_NS_BEGIN namespace io { namespace _io { namespace TextIOWrapper { USING_FUNCTOR(isatty, builtins::file::functor::isatty); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/io/_io/TextIOWrapper/next.hpp000066400000000000000000000005301416264035500262730ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_NEXT_HPP #define PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_NEXT_HPP #include "pythonic/include/builtins/file/next.hpp" PYTHONIC_NS_BEGIN namespace io { namespace _io { namespace TextIOWrapper { USING_FUNCTOR(next, builtins::file::functor::next); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/io/_io/TextIOWrapper/read.hpp000066400000000000000000000005301416264035500262300ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_READ_HPP #define PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_READ_HPP #include "pythonic/include/builtins/file/read.hpp" PYTHONIC_NS_BEGIN namespace io { namespace _io { namespace TextIOWrapper { USING_FUNCTOR(read, builtins::file::functor::read); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/io/_io/TextIOWrapper/readline.hpp000066400000000000000000000005541416264035500271060ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_READLINE_HPP #define PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_READLINE_HPP #include "pythonic/include/builtins/file/readline.hpp" PYTHONIC_NS_BEGIN namespace io { namespace _io { namespace TextIOWrapper { USING_FUNCTOR(readline, builtins::file::functor::readline); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/io/_io/TextIOWrapper/readlines.hpp000066400000000000000000000005611416264035500272670ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_READLINES_HPP #define PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_READLINES_HPP #include "pythonic/include/builtins/file/readlines.hpp" PYTHONIC_NS_BEGIN namespace io { namespace _io { namespace TextIOWrapper { USING_FUNCTOR(readlines, builtins::file::functor::readlines); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/io/_io/TextIOWrapper/seek.hpp000066400000000000000000000005301416264035500262440ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_SEEK_HPP #define PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_SEEK_HPP #include "pythonic/include/builtins/file/seek.hpp" PYTHONIC_NS_BEGIN namespace io { namespace _io { namespace TextIOWrapper { USING_FUNCTOR(seek, builtins::file::functor::seek); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/io/_io/TextIOWrapper/tell.hpp000066400000000000000000000005301416264035500262550ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_TELL_HPP #define PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_TELL_HPP #include "pythonic/include/builtins/file/tell.hpp" PYTHONIC_NS_BEGIN namespace io { namespace _io { namespace TextIOWrapper { USING_FUNCTOR(tell, builtins::file::functor::tell); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/io/_io/TextIOWrapper/truncate.hpp000066400000000000000000000005541416264035500271500ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_TRUNCATE_HPP #define PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_TRUNCATE_HPP #include "pythonic/include/builtins/file/truncate.hpp" PYTHONIC_NS_BEGIN namespace io { namespace _io { namespace TextIOWrapper { USING_FUNCTOR(truncate, builtins::file::functor::truncate); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/io/_io/TextIOWrapper/write.hpp000066400000000000000000000005351416264035500264540ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_WRITE_HPP #define PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_WRITE_HPP #include "pythonic/include/builtins/file/write.hpp" PYTHONIC_NS_BEGIN namespace io { namespace _io { namespace TextIOWrapper { USING_FUNCTOR(write, builtins::file::functor::write); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/io/_io/TextIOWrapper/writelines.hpp000066400000000000000000000005661416264035500275130ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_WRITELINES_HPP #define PYTHONIC_INCLUDE_IO__IO_TEXTIOWRAPPER_WRITELINES_HPP #include "pythonic/include/builtins/file/writelines.hpp" PYTHONIC_NS_BEGIN namespace io { namespace _io { namespace TextIOWrapper { USING_FUNCTOR(writelines, builtins::file::functor::writelines); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/itertools/000077500000000000000000000000001416264035500227405ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/itertools/combinations.hpp000066400000000000000000000044201416264035500261360ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_ITERTOOLS_COMBINATIONS_HPP #define PYTHONIC_INCLUDE_ITERTOOLS_COMBINATIONS_HPP #include "pythonic/include/types/dynamic_tuple.hpp" #include "pythonic/include/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace itertools { namespace details { template struct combination_iterator : std::iterator, ptrdiff_t, types::dynamic_tuple *, types::dynamic_tuple /*no ref*/ > { std::vector pool; std::vector indices; long r; bool stopped; std::vector result; combination_iterator() = default; combination_iterator(bool); template combination_iterator(Iter &&pool, long r); types::dynamic_tuple operator*() const; combination_iterator &operator++(); bool operator!=(combination_iterator const &other) const; bool operator==(combination_iterator const &other) const; bool operator<(combination_iterator const &other) const; }; template struct combination : combination_iterator { using iterator = combination_iterator; using value_type = typename iterator::value_type; long num_elts; combination() = default; template combination(Iter &&iter, long elts); iterator const &begin() const; iterator begin(); iterator end() const; }; } template details::combination< typename std::remove_cv::type>::type> combinations(T0 &&iter, long num_elts); DEFINE_FUNCTOR(pythonic::itertools, combinations); } PYTHONIC_NS_END /* type inference stuff {*/ #include "pythonic/include/types/combined.hpp" template struct __combined> { using type = typename __combined::value_type>>::type; }; /* } */ #endif pythran-0.10.0+ds2/pythran/pythonic/include/itertools/common.hpp000066400000000000000000000002661416264035500247450ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_ITERTOOLS_COMMON_HPP #define PYTHONIC_INCLUDE_ITERTOOLS_COMMON_HPP PYTHONIC_NS_BEGIN namespace itertools { struct npos { }; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/itertools/count.hpp000066400000000000000000000033221416264035500246010ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_ITERTOOLS_COUNT_HPP #define PYTHONIC_INCLUDE_ITERTOOLS_COUNT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/combined.hpp" #include PYTHONIC_NS_BEGIN namespace itertools { namespace details { template struct count_iterator : std::iterator { T value; T step; count_iterator() = default; count_iterator(T value, T step); T operator*() const; count_iterator &operator++(); count_iterator &operator+=(long n); bool operator!=(count_iterator const &other) const; bool operator==(count_iterator const &other) const; bool operator<(count_iterator const &other) const; long operator-(count_iterator const &other) const; }; template struct count : count_iterator { using value_type = T; using iterator = count_iterator; count() = default; count(T value, T step); iterator &begin(); iterator const &begin() const; iterator end() const; }; } template details::count::type> count(T0 start, T1 step = 1); details::count count(); DEFINE_FUNCTOR(pythonic::itertools, count); } PYTHONIC_NS_END /* type inference stuff {*/ #include "pythonic/include/types/combined.hpp" template struct __combined> { using type = typename __combined::value_type>>::type; }; /* } */ #endif pythran-0.10.0+ds2/pythran/pythonic/include/itertools/ifilter.hpp000066400000000000000000000056541416264035500251210ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_ITERTOOLS_IFILTER_HPP #define PYTHONIC_INCLUDE_ITERTOOLS_IFILTER_HPP #include "pythonic/include/utils/iterator.hpp" #include "pythonic/include/itertools/common.hpp" #include "pythonic/include/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace itertools { namespace details { template struct ifilter_iterator : std::iterator { using sequence_type = typename std::remove_cv< typename std::remove_reference::type>::type; Operator op; typename List0::iterator iter; // FIXME : iter_end should be const because ifilter should be evaluate // only once. Some tests doesn't work with it for now because of // uncorrect itertools.product implementation typename List0::iterator iter_end; bool test_filter(std::true_type); bool test_filter(std::false_type); ifilter_iterator() = default; ifilter_iterator(Operator _op, List0 &_seq); ifilter_iterator(npos, Operator _op, List0 &_seq); typename List0::value_type operator*() const; ifilter_iterator &operator++(); void next_value(); bool operator==(ifilter_iterator const &other) const; bool operator!=(ifilter_iterator const &other) const; bool operator<(ifilter_iterator const &other) const; }; // Inherit from iterator_reminder to keep a reference on the iterator // && avoid a dangling reference // FIXME: It would be better to have a copy only if needed but Pythran // typing is ! good enough for this as arguments have // remove_cv/remove_ref template struct ifilter : utils::iterator_reminder, ifilter_iterator { using value_type = typename List0::value_type; using iterator = ifilter_iterator; iterator end_iter; ifilter() = default; ifilter(Operator _op, List0 const &_seq); iterator &begin(); iterator const &begin() const; iterator const &end() const; }; } template details::ifilter::type>::type, typename std::remove_cv< typename std::remove_reference::type>::type> ifilter(Operator &&_op, List0 &&_seq); DEFINE_FUNCTOR(pythonic::itertools, ifilter); } PYTHONIC_NS_END /* type inference stuff {*/ #include "pythonic/include/types/combined.hpp" template struct __combined> { using type = typename __combined::value_type>>::type; }; /* } */ #endif pythran-0.10.0+ds2/pythran/pythonic/include/itertools/islice.hpp000066400000000000000000000047161416264035500247310ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_ITERTOOLS_ISLICE_HPP #define PYTHONIC_INCLUDE_ITERTOOLS_ISLICE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/itertools/common.hpp" #include "pythonic/include/builtins/range.hpp" #include PYTHONIC_NS_BEGIN namespace itertools { template struct islice_iterator : std::iterator::value_type> { typename std::remove_reference< typename std::remove_cv::type>::type iterable_ref; typename std::remove_reference< typename std::remove_cv::type>::type::iterator iterable; builtins::range xr_ref; builtins::range_iterator state; builtins::range_iterator::value_type prev; islice_iterator(); islice_iterator(Iterable const &iterable, builtins::range const &xr); islice_iterator(npos const &n, Iterable const &iterable, builtins::range const &xr); typename Iterable::value_type operator*() const; islice_iterator &operator++(); bool operator==(islice_iterator const &other) const; bool operator!=(islice_iterator const &other) const; bool operator<(islice_iterator const &other) const; int operator-(islice_iterator const &other) const; }; template struct _islice : islice_iterator { using iterator = islice_iterator; using value_type = typename Iterable::value_type; iterator end_iter; _islice(); _islice(Iterable const &iterable, builtins::range const &xr); iterator &begin(); iterator const &begin() const; iterator end() const; }; template _islice::type>::type> islice(Iterable &&iterable, long start, long stop, long step = 1); template _islice::type>::type> islice(Iterable &&iterable, long stop); DEFINE_FUNCTOR(pythonic::itertools, islice); } PYTHONIC_NS_END /* type inference stuff {*/ #include "pythonic/include/types/combined.hpp" template struct __combined> { using type = typename __combined< E, container::value_type>>::type; }; /* } */ #endif pythran-0.10.0+ds2/pythran/pythonic/include/itertools/permutations.hpp000066400000000000000000000057741416264035500262200ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_ITERTOOLS_PERMUTATIONS_HPP #define PYTHONIC_INCLUDE_ITERTOOLS_PERMUTATIONS_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/dynamic_tuple.hpp" #include #include PYTHONIC_NS_BEGIN namespace itertools { /** Permutation iterator * * It wraps a vector && provide an iteration over every possible * permutation of the vector. The permutations are represented as *dynamic_tuple * of elements. * * The following iterator: * * permutations_iterator([0, 1, 2]) * * yields the following suite: * * [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)] * */ template struct permutations_iterator : std::iterator, ptrdiff_t, types::dynamic_tuple *, types::dynamic_tuple /* no ref*/ > { // Vector of inputs, contains elements to permute std::vector pool; // The current permutation as a dynamic_tuple of index in the pool // Internally it always has the same size as the pool, even if the // external view is limited std::vector curr_permut; // Size of the "visible" permutation size_t _size; bool end; // sentinel marker permutations_iterator(); permutations_iterator(std::vector const &iter, size_t num_elts, bool end); /** Build the permutation visible from the "outside" */ types::dynamic_tuple operator*() const; /* Generate next permutation * * If the size of the permutation is smaller than the size of the * pool, we may have to iterate multiple times */ permutations_iterator &operator++(); bool operator!=(permutations_iterator const &other) const; bool operator==(permutations_iterator const &other) const; bool operator<(permutations_iterator const &other) const; }; template // FIXME document why this inheritance??? struct _permutations : permutations_iterator { using iterator = permutations_iterator; using value_type = typename iterator::value_type; _permutations(); _permutations(T iter, long elts); iterator const &begin() const; iterator begin(); iterator end() const; }; template _permutations permutations(T0 iter, long num_elts); template _permutations permutations(T0 iter); DEFINE_FUNCTOR(pythonic::itertools, permutations); } PYTHONIC_NS_END /* type inference stuff {*/ #include "pythonic/include/types/combined.hpp" template struct __combined> { using type = typename __combined< E, container< typename pythonic::itertools::_permutations::value_type>>::type; }; /* } */ #endif pythran-0.10.0+ds2/pythran/pythonic/include/itertools/product.hpp000066400000000000000000000054241416264035500251360ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_ITERTOOLS_PRODUCT_HPP #define PYTHONIC_INCLUDE_ITERTOOLS_PRODUCT_HPP #include "pythonic/include/utils/iterator.hpp" #include "pythonic/include/utils/seq.hpp" #include "pythonic/include/utils/int_.hpp" #include "pythonic/include/itertools/common.hpp" #include "pythonic/include/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace itertools { namespace details { // FIXME : should be a combined_iterator_tag template struct product_iterator : std::iterator> { std::tuple const it_begin; std::tuple const it_end; std::tuple it; bool end; product_iterator() = default; template product_iterator(std::tuple &_iters, utils::index_sequence const &); template product_iterator(npos, std::tuple &_iters, utils::index_sequence const &); types::make_tuple_t operator*() const; product_iterator &operator++(); bool operator==(product_iterator const &other) const; bool operator!=(product_iterator const &other) const; bool operator<(product_iterator const &other) const; private: template void advance(utils::int_); void advance(utils::int_<0>); template types::make_tuple_t get_value(utils::index_sequence const &) const; }; template struct product : utils::iterator_reminder, product_iterator { using value_type = types::make_tuple_t; using iterator = product_iterator; iterator end_iter; product() = default; product(Iters const &... _iters); iterator &begin(); iterator const &begin() const; iterator const &end() const; }; } template details::product::type>::type...> product(Iter &&... iters); DEFINE_FUNCTOR(pythonic::itertools, product); } PYTHONIC_NS_END /* type inference stuff {*/ #include "pythonic/include/types/combined.hpp" template struct __combined> { using type = typename __combined::value_type>>::type; }; /* } */ #endif pythran-0.10.0+ds2/pythran/pythonic/include/itertools/repeat.hpp000066400000000000000000000030261416264035500247320ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_ITERTOOLS_REPEAT_HPP #define PYTHONIC_INCLUDE_ITERTOOLS_REPEAT_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace itertools { template struct repeat_iterator : std::iterator { T value_; long count_; repeat_iterator(T value, long count); repeat_iterator &operator++(); T operator*(); bool operator!=(repeat_iterator const &other) const; bool operator==(repeat_iterator const &other) const; bool operator<(repeat_iterator const &other) const; }; template struct _repeat : repeat_iterator { using iterator = repeat_iterator; using value_type = typename iterator::value_type; _repeat() = default; _repeat(T value, long count); iterator begin() const; iterator end() const; }; template _repeat repeat(T value, long num_elts); template _repeat repeat(T iter); DEFINE_FUNCTOR(pythonic::itertools, repeat); } PYTHONIC_NS_END /* type inference stuff {*/ #include "pythonic/include/types/combined.hpp" template struct __combined> { using type = typename __combined< E, container::value_type>>::type; }; /* } */ #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/000077500000000000000000000000001416264035500216455ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/math/acos.hpp000066400000000000000000000003621416264035500233040ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_ACOS_HPP #define PYTHONIC_INCLUDE_MATH_ACOS_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(acos, std::acos); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/acosh.hpp000066400000000000000000000003661416264035500234600ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_ACOSH_HPP #define PYTHONIC_INCLUDE_MATH_ACOSH_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(acosh, std::acosh); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/asin.hpp000066400000000000000000000003621416264035500233110ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_ASIN_HPP #define PYTHONIC_INCLUDE_MATH_ASIN_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(asin, std::asin); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/asinh.hpp000066400000000000000000000003661416264035500234650ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_ASINH_HPP #define PYTHONIC_INCLUDE_MATH_ASINH_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(asinh, std::asinh); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/atan.hpp000066400000000000000000000003621416264035500233020ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_ATAN_HPP #define PYTHONIC_INCLUDE_MATH_ATAN_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(atan, std::atan); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/atan2.hpp000066400000000000000000000003661416264035500233700ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_ATAN2_HPP #define PYTHONIC_INCLUDE_MATH_ATAN2_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(atan2, std::atan2); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/atanh.hpp000066400000000000000000000003661416264035500234560ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_ATANH_HPP #define PYTHONIC_INCLUDE_MATH_ATANH_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(atanh, std::atanh); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/ceil.hpp000066400000000000000000000004141416264035500232710ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_CEIL_HPP #define PYTHONIC_INCLUDE_MATH_CEIL_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace math { template long ceil(T x); DEFINE_FUNCTOR(pythonic::math, ceil); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/copysign.hpp000066400000000000000000000004021416264035500242050ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_COPYSIGN_HPP #define PYTHONIC_INCLUDE_MATH_COPYSIGN_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(copysign, std::copysign); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/cos.hpp000066400000000000000000000003561416264035500231460ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_COS_HPP #define PYTHONIC_INCLUDE_MATH_COS_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(cos, std::cos); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/cosh.hpp000066400000000000000000000003621416264035500233130ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_COSH_HPP #define PYTHONIC_INCLUDE_MATH_COSH_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(cosh, std::cosh); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/degrees.hpp000066400000000000000000000004331416264035500237740ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_DEGREES_HPP #define PYTHONIC_INCLUDE_MATH_DEGREES_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace math { template double degrees(T x); DEFINE_FUNCTOR(pythonic::math, degrees); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/e.hpp000066400000000000000000000003531416264035500226030ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_E_HPP #define PYTHONIC_INCLUDE_MATH_E_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { double constexpr e = std::exp(1); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/erf.hpp000066400000000000000000000003561416264035500231360ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_ERF_HPP #define PYTHONIC_INCLUDE_MATH_ERF_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(erf, std::erf); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/erfc.hpp000066400000000000000000000003621416264035500232760ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_ERFC_HPP #define PYTHONIC_INCLUDE_MATH_ERFC_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(erfc, std::erfc); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/exp.hpp000066400000000000000000000003561416264035500231560ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_EXP_HPP #define PYTHONIC_INCLUDE_MATH_EXP_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(exp, std::exp); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/expm1.hpp000066400000000000000000000003661416264035500234150ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_EXPM1_HPP #define PYTHONIC_INCLUDE_MATH_EXPM1_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(expm1, std::expm1); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/fabs.hpp000066400000000000000000000003621416264035500232720ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_FABS_HPP #define PYTHONIC_INCLUDE_MATH_FABS_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(fabs, std::fabs); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/factorial.hpp000066400000000000000000000004361416264035500243250ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_FACTORIAL_HPP #define PYTHONIC_INCLUDE_MATH_FACTORIAL_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace math { template T factorial(T x); DEFINE_FUNCTOR(pythonic::math, factorial); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/floor.hpp000066400000000000000000000004201416264035500234730ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_FLOOR_HPP #define PYTHONIC_INCLUDE_MATH_FLOOR_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace math { template long floor(T x); DEFINE_FUNCTOR(pythonic::math, floor); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/fmod.hpp000066400000000000000000000003621416264035500233040ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_FMOD_HPP #define PYTHONIC_INCLUDE_MATH_FMOD_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(fmod, std::fmod); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/frexp.hpp000066400000000000000000000005211416264035500235000ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_FREXP_HPP #define PYTHONIC_INCLUDE_MATH_FREXP_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/tuple.hpp" #include PYTHONIC_NS_BEGIN namespace math { std::tuple frexp(double x); DEFINE_FUNCTOR(pythonic::math, frexp); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/gamma.hpp000066400000000000000000000004221416264035500234360ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_GAMMA_HPP #define PYTHONIC_INCLUDE_MATH_GAMMA_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { double gamma(double x); DEFINE_FUNCTOR(pythonic::math, gamma); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/hypot.hpp000066400000000000000000000005331416264035500235220ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_HYPOT_HPP #define PYTHONIC_INCLUDE_MATH_HYPOT_HPP #include "pythonic/include/utils/functor.hpp" #include #undef hypot // This is a windows defined macro that clash with std::hypot && our hypot // function PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(hypot, std::hypot); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/isinf.hpp000066400000000000000000000005101416264035500234620ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_ISINF_HPP #define PYTHONIC_INCLUDE_MATH_ISINF_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { template bool isinf(T const &v) { return std::isinf(v); } DEFINE_FUNCTOR(pythonic::math, isinf); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/isnan.hpp000066400000000000000000000003661416264035500234730ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_ISNAN_HPP #define PYTHONIC_INCLUDE_MATH_ISNAN_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(isnan, std::isnan); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/ldexp.hpp000066400000000000000000000003661416264035500234770ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_LDEXP_HPP #define PYTHONIC_INCLUDE_MATH_LDEXP_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(ldexp, std::ldexp); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/lgamma.hpp000066400000000000000000000003721416264035500236160ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_LGAMMA_HPP #define PYTHONIC_INCLUDE_MATH_LGAMMA_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(lgamma, std::lgamma); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/log.hpp000066400000000000000000000004511416264035500231370ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_LOG_HPP #define PYTHONIC_INCLUDE_MATH_LOG_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { using std::log; double log(double x, double base); DEFINE_FUNCTOR(pythonic::math, log); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/log10.hpp000066400000000000000000000003661416264035500233050ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_LOG10_HPP #define PYTHONIC_INCLUDE_MATH_LOG10_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(log10, std::log10); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/log1p.hpp000066400000000000000000000003661416264035500234050ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_LOG1P_HPP #define PYTHONIC_INCLUDE_MATH_LOG1P_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(log1p, std::log1p); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/modf.hpp000066400000000000000000000005171416264035500233060ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_MODF_HPP #define PYTHONIC_INCLUDE_MATH_MODF_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/tuple.hpp" #include PYTHONIC_NS_BEGIN namespace math { std::tuple modf(double x); DEFINE_FUNCTOR(pythonic::math, modf); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/pi.hpp000066400000000000000000000005301416264035500227640ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_PI_HPP #define PYTHONIC_INCLUDE_MATH_PI_HPP PYTHONIC_NS_BEGIN namespace math { // see https://meetingcpp.com/blog/items/cpp-and-pi.html double constexpr pi = 3.14159265358979323846264338327950288419716939937510582097494459230781640628620899862803482534211706798214808651e+00; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/pow.hpp000066400000000000000000000003561416264035500231670ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_POW_HPP #define PYTHONIC_INCLUDE_MATH_POW_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(pow, std::pow); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/radians.hpp000066400000000000000000000004711416264035500240010ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_RADIANS_HPP #define PYTHONIC_INCLUDE_MATH_RADIANS_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/math/pi.hpp" PYTHONIC_NS_BEGIN namespace math { template double radians(T x); DEFINE_FUNCTOR(pythonic::math, radians); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/sin.hpp000066400000000000000000000003561416264035500231530ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_SIN_HPP #define PYTHONIC_INCLUDE_MATH_SIN_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(sin, std::sin); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/sinh.hpp000066400000000000000000000003621416264035500233200ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_SINH_HPP #define PYTHONIC_INCLUDE_MATH_SINH_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(sinh, std::sinh); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/sqrt.hpp000066400000000000000000000003621416264035500233500ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_SQRT_HPP #define PYTHONIC_INCLUDE_MATH_SQRT_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(sqrt, std::sqrt); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/tan.hpp000066400000000000000000000003561416264035500231440ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_TAN_HPP #define PYTHONIC_INCLUDE_MATH_TAN_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(tan, std::tan); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/tanh.hpp000066400000000000000000000003621416264035500233110ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_TANH_HPP #define PYTHONIC_INCLUDE_MATH_TANH_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { DEFINE_FUNCTOR_2(tanh, std::tanh); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/math/trunc.hpp000066400000000000000000000004201416264035500235050ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_MATH_TRUNC_HPP #define PYTHONIC_INCLUDE_MATH_TRUNC_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace math { template long trunc(T x); DEFINE_FUNCTOR(pythonic::math, trunc); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/000077500000000000000000000000001416264035500220645ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/.hpp000066400000000000000000000001201416264035500226450ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY__HPP #define PYTHONIC_INCLUDE_NUMPY__HPP #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/NINF.hpp000066400000000000000000000003431416264035500233270ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NINF_HPP #define PYTHONIC_INCLUDE_NUMPY_NINF_HPP #include PYTHONIC_NS_BEGIN namespace numpy { double const NINF = -std::numeric_limits::infinity(); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/abs.hpp000066400000000000000000000007541416264035500233500ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ABS_HPP #define PYTHONIC_INCLUDE_NUMPY_ABS_HPP #include "pythonic/include/types/numpy_op_helper.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME abs #define NUMPY_NARY_FUNC_SYM xsimd::abs #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/absolute.hpp000066400000000000000000000004411416264035500244120ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ABSOLUTE_HPP #define PYTHONIC_INCLUDE_NUMPY_ABSOLUTE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/abs.hpp" PYTHONIC_NS_BEGIN namespace numpy { USING_FUNCTOR(absolute, numpy::functor::abs); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/add.hpp000066400000000000000000000010171416264035500233240ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ADD_HPP #define PYTHONIC_INCLUDE_NUMPY_ADD_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/operator_/add.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME add #define NUMPY_NARY_FUNC_SYM pythonic::operator_::add #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/add/000077500000000000000000000000001416264035500226145ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/add/accumulate.hpp000066400000000000000000000002731416264035500254520ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ADD_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_ADD_ACCUMULATE_HPP #define UFUNC_NAME add #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/add/reduce.hpp000066400000000000000000000003551416264035500245770ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ADD_REDUCE_HPP #define PYTHONIC_INCLUDE_NUMPY_ADD_REDUCE_HPP #define UFUNC_NAME add #define UFUNC_INAME iadd #include "pythonic/include/numpy/ufunc_reduce.hpp" #undef UFUNC_NAME #undef UFUNC_INAME #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/alen.hpp000066400000000000000000000005031416264035500235120ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ALEN_HPP #define PYTHONIC_INCLUDE_NUMPY_ALEN_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template long alen(T &&expr); DEFINE_FUNCTOR(pythonic::numpy, alen); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/all.hpp000066400000000000000000000023161416264035500233470ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ALL_HPP #define PYTHONIC_INCLUDE_NUMPY_ALL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/numpy/multiply.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if::value, bool>::type all(E const &expr, types::none_type _ = types::none_type()); template typename std::enable_if< std::is_scalar::value || types::is_complex::value, bool>::type all(E const &expr, types::none_type _ = types::none_type()); template auto all(E const &array, long axis) -> typename std::enable_if::value || types::is_complex::value, decltype(all(array))>::type; template auto all(E const &array, long axis) -> typename std::enable_if::type; template typename std::enable_if< E::value != 1, types::ndarray>>::type all(E const &array, long axis); DEFINE_FUNCTOR(pythonic::numpy, all); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/allclose.hpp000066400000000000000000000007531416264035500244000ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ALLCLOSE_HPP #define PYTHONIC_INCLUDE_NUMPY_ALLCLOSE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/numpy/abs.hpp" #include "pythonic/include/numpy/isfinite.hpp" PYTHONIC_NS_BEGIN namespace numpy { template bool allclose(U const &u, V const &v, double rtol = 1e-5, double atol = 1e-8); DEFINE_FUNCTOR(pythonic::numpy, allclose); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/alltrue.hpp000066400000000000000000000005431416264035500242470ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ALLTRUE_HPP #define PYTHONIC_INCLUDE_NUMPY_ALLTRUE_HPP #include "pythonic/include/numpy/all.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto alltrue(Types &&... types) -> decltype(all(std::forward(types)...)); DEFINE_FUNCTOR(pythonic::numpy, alltrue); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/amax.hpp000066400000000000000000000003301416264035500235170ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_AMAX_HPP #define PYTHONIC_INCLUDE_NUMPY_AMAX_HPP #include "pythonic/include/numpy/max.hpp" PYTHONIC_NS_BEGIN namespace numpy { USING_FUNCTOR(amax, max); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/amin.hpp000066400000000000000000000003271416264035500235230ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_AMIN_HPP #define PYTHONIC_INCLUDE_NUMPY_AMIN_HPP #include "pythonic/include/numpy/min.hpp" PYTHONIC_NS_BEGIN namespace numpy { USING_FUNCTOR(amin, min); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/angle.hpp000066400000000000000000000011621416264035500236630ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ANGLE_HPP #define PYTHONIC_INCLUDE_NUMPY_ANGLE_HPP #include "pythonic/include/numpy/angle_in_deg.hpp" #include "pythonic/include/numpy/angle_in_rad.hpp" #include "pythonic/include/types/assignable.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto angle(T const &t, bool in_deg) -> typename assignable::type; // Numpy_expr can be use if only the first argument is given. template auto angle(T const &t) -> decltype(functor::angle_in_rad()(t)); DEFINE_FUNCTOR(pythonic::numpy, angle); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/angle_in_deg.hpp000066400000000000000000000013671416264035500251770ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ANGLEINDEG_HPP #define PYTHONIC_INCLUDE_NUMPY_ANGLEINDEG_HPP #include "pythonic/include/numpy/angle_in_rad.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/numpy/pi.hpp" /* NOTE: angle_in_deg is not part of the official Numpy API, * this file is here only to split the angle function in two parts */ PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template auto angle_in_deg(T const &t) -> decltype(angle_in_rad(t) * 180 / pi) { return angle_in_rad(t) * 180 / pi; } } #define NUMPY_NARY_FUNC_NAME angle_in_deg #define NUMPY_NARY_FUNC_SYM wrapper::angle_in_deg #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/angle_in_rad.hpp000066400000000000000000000014021416264035500251740ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ANGLEINRAD_HPP #define PYTHONIC_INCLUDE_NUMPY_ANGLEINRAD_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/numpy/arctan.hpp" /* NOTE: angle_in_rad is not part of the official Numpy API, * this file is here only to split the angle function in two parts */ PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template auto angle_in_rad(T const &t) -> decltype(std::atan2(std::imag(t), std::real(t))); } #define NUMPY_NARY_FUNC_NAME angle_in_rad #define NUMPY_NARY_FUNC_SYM wrapper::angle_in_rad #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/any.hpp000066400000000000000000000023111416264035500233610ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ANY_HPP #define PYTHONIC_INCLUDE_NUMPY_ANY_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/numpy/add.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if::value, bool>::type any(E const &expr, types::none_type _ = types::none_type()); template typename std::enable_if< std::is_scalar::value || types::is_complex::value, bool>::type any(E const &expr, types::none_type _ = types::none_type()); template auto any(E const &array, long axis) -> typename std::enable_if::value || types::is_complex::value, decltype(any(array))>::type; template auto any(E const &array, long axis) -> typename std::enable_if::type; template typename std::enable_if< E::value != 1, types::ndarray>>::type any(E const &array, long axis); DEFINE_FUNCTOR(pythonic::numpy, any); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/append.hpp000066400000000000000000000021551416264035500240470ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_APPEND_HPP #define PYTHONIC_INCLUDE_NUMPY_APPEND_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if< !types::is_dtype::value, types::ndarray< typename __combined::type>::type, types::pshape>>::type append(types::ndarray const &nto, F const &data); template typename std::enable_if< types::is_dtype::value, types::ndarray< typename __combined::type>::type, types::pshape>>::type append(types::ndarray const &nto, F const &data); template types::ndarray::type, typename types::dtype_of::type>::type, types::pshape> append(T const &to, F const &data); DEFINE_FUNCTOR(pythonic::numpy, append); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/arange.hpp000066400000000000000000000122021416264035500240270ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ARANGE_HPP #define PYTHONIC_INCLUDE_NUMPY_ARANGE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/operator_/pos.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { #ifdef USE_XSIMD template struct arange_simd_iterator { using vector_type = xsimd::simd_type; vector_type curr_; vector_type step_; long index_; arange_simd_iterator(T start, T step, long n) : curr_{}, step_{static_cast(vector_type::size * step)}, index_{static_cast(n / vector_type::size)} { T from[vector_type::size]; for (size_t i = 0; i < vector_type::size; ++i) from[i] = start + i * step; curr_ = xsimd::load_unaligned(from); } vector_type operator*() const { return curr_; } arange_simd_iterator &operator++() { curr_ += step_; ++index_; return *this; } arange_simd_iterator &operator+=(long n) { curr_ += n * step_; index_ += n; return *this; } arange_simd_iterator operator+(long n) const { arange_simd_iterator other{*this}; return other += n; } arange_simd_iterator &operator--() { curr_ -= step_; --index_; return *this; } long operator-(arange_simd_iterator const &other) const { return index_ - other.index_; } bool operator!=(arange_simd_iterator const &other) const { return index_ != other.index_; } bool operator==(arange_simd_iterator const &other) const { return index_ == other.index_; } bool operator<(arange_simd_iterator const &other) const { return index_ < other.index_; } arange_simd_iterator & operator=(arange_simd_iterator const &other) = default; }; #endif template struct arange_index { T start, step; long size; using iterator = types::nditerator; using const_iterator = types::const_nditerator; using dtype = T; using value_type = dtype; using shape_t = types::pshape; #ifdef USE_XSIMD using simd_iterator = arange_simd_iterator; using simd_iterator_nobroadcast = simd_iterator; template simd_iterator vbegin(vectorizer) const { return {start, step, 0}; } template simd_iterator vend(vectorizer) const { return {static_cast(start + size * step), step, size}; } #endif static constexpr size_t value = 1; static constexpr bool is_strided = false; static constexpr bool is_vectorizable = types::is_vectorizable::value; T fast(long i) const { return start + i * step; } dtype load(long i) const { return fast(i); } template long shape() const { return size; } types::ndarray operator[](types::slice s) const { auto ns = s.normalize(size); arange_index r{start + s.lower * step, step * ns.step, ns.size()}; return { types::numpy_expr{ r}}; } types::ndarray operator()(types::slice s) const { return operator[](s); } types::ndarray operator[](types::contiguous_slice s) const { auto ns = s.normalize(size); arange_index r{start + s.lower * step, step, ns.size()}; return { types::numpy_expr{ r}}; } types::ndarray operator()(types::contiguous_slice s) const { return operator[](s); } template auto operator()(S const &... s) const -> typename std::enable_if< (sizeof...(S) > 1), decltype(std::declval>()(s...))>::type { return types::ndarray{ types::numpy_expr{ *this}}(s...); } const_iterator begin() const { return {*this, 0}; } const_iterator end() const { return {*this, size}; } iterator begin() { return {*this, 0}; } iterator end() { return {*this, size}; } }; } template ::type>> types::numpy_expr> arange(T begin, U end, S step = S(1), dtype d = dtype()); template types::numpy_expr::type>> arange(T end); DEFINE_FUNCTOR(pythonic::numpy, arange); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/arccos.hpp000066400000000000000000000007041416264035500240500ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ARCCOS_HPP #define PYTHONIC_INCLUDE_NUMPY_ARCCOS_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME arccos #define NUMPY_NARY_FUNC_SYM xsimd::acos #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/arccosh.hpp000066400000000000000000000007061416264035500242220ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ARCCOSH_HPP #define PYTHONIC_INCLUDE_NUMPY_ARCCOSH_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME arccosh #define NUMPY_NARY_FUNC_SYM xsimd::acosh #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/arcsin.hpp000066400000000000000000000007031416264035500240540ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ARCSIN_HPP #define PYTHONIC_INCLUDE_NUMPY_ARCSIN_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME arcsin #define NUMPY_NARY_FUNC_SYM xsimd::asin #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/arcsinh.hpp000066400000000000000000000007071416264035500242300ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ARCSINH_HPP #define PYTHONIC_INCLUDE_NUMPY_ARCSINH_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME arcsinh #define NUMPY_NARY_FUNC_SYM xsimd::asinh #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/arctan.hpp000066400000000000000000000007021416264035500240440ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ARCTAN_HPP #define PYTHONIC_INCLUDE_NUMPY_ARCTAN_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME arctan #define NUMPY_NARY_FUNC_SYM xsimd::atan #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/arctan2.hpp000066400000000000000000000007751416264035500241400ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ARCTAN2_HPP #define PYTHONIC_INCLUDE_NUMPY_ARCTAN2_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME arctan2 #define NUMPY_NARY_FUNC_SYM xsimd::atan2 #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/arctan2/000077500000000000000000000000001416264035500234165ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/arctan2/accumulate.hpp000066400000000000000000000003071416264035500262520ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ARCTAN2_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_ARCTAN2_ACCUMULATE_HPP #define UFUNC_NAME arctan2 #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/arctanh.hpp000066400000000000000000000007061416264035500242200ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ARCTANH_HPP #define PYTHONIC_INCLUDE_NUMPY_ARCTANH_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME arctanh #define NUMPY_NARY_FUNC_SYM xsimd::atanh #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/argmax.hpp000066400000000000000000000010021416264035500240450ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ARGMAX_HPP #define PYTHONIC_INCLUDE_NUMPY_ARGMAX_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template long argmax(E const &expr); template types::ndarray> argmax(E const &expr, long axis); DEFINE_FUNCTOR(pythonic::numpy, argmax); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/argmin.hpp000066400000000000000000000010011416264035500240420ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ARGMIN_HPP #define PYTHONIC_INCLUDE_NUMPY_ARGMIN_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template long argmin(E const &expr); template types::ndarray> argmin(E const &expr, long axis); DEFINE_FUNCTOR(pythonic::numpy, argmin); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/argsort.hpp000066400000000000000000000006541416264035500242630ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ARGSORT_HPP #define PYTHONIC_INCLUDE_NUMPY_ARGSORT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray argsort(types::ndarray const &a); NUMPY_EXPR_TO_NDARRAY0_DECL(argsort); DEFINE_FUNCTOR(pythonic::numpy, argsort); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/argwhere.hpp000066400000000000000000000006101416264035500243760ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ARGWHERE_HPP #define PYTHONIC_INCLUDE_NUMPY_ARGWHERE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename types::ndarray> argwhere(E const &expr); DEFINE_FUNCTOR(pythonic::numpy, argwhere); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/around.hpp000066400000000000000000000032611416264035500240670ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_AROUND_HPP #define PYTHONIC_INCLUDE_NUMPY_AROUND_HPP #include "pythonic/include/numpy/rint.hpp" #include "pythonic/include/numpy/floor_divide.hpp" #include "pythonic/include/numpy/asarray.hpp" #include "pythonic/include/numpy/float64.hpp" #include "pythonic/include/numpy/multiply.hpp" PYTHONIC_NS_BEGIN namespace numpy { // fast path template auto around(E &&a) -> decltype(functor::rint{}(std::forward(a))); // generic floating point version, pure numpy_expr template auto around(E &&a, long decimals) -> typename std::enable_if< !std::is_integral< typename types::dtype_of::type>::type>::value, decltype(functor::rint{}(functor::multiply{}( std::forward(a), std::declval::type>::type>())) / std::declval::type>::type>())>::type; // the integer version is only relevant when decimals < 0 template auto around(E &&a, long decimals) -> typename std::enable_if< std::is_integral< typename types::dtype_of::type>::type>::value, decltype(numpy::functor::floor_divide{}( functor::float64{}(std::forward(a)), std::declval::type>::type>()) * std::declval::type>::type>())>::type; DEFINE_FUNCTOR(pythonic::numpy, around); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/array.hpp000066400000000000000000000042451416264035500237200ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ARRAY_HPP #define PYTHONIC_INCLUDE_NUMPY_ARRAY_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/nested_container.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template ::type::dtype>> typename std::enable_if< types::has_size::type>::value, types::ndarray::type::value>>>::type array(T &&iterable, dtype d = dtype()); template ::type::dtype>> typename std::enable_if< !types::has_size::type>::value && !types::is_dtype::type>::value, types::ndarray::type::value>>>::type array(T &&iterable, dtype d = dtype()); template ::type>::type>> typename std::enable_if< !types::has_size::type>::value && types::is_dtype::type>::value, typename dtype::type>::type array(T &&non_iterable, dtype d = dtype()); template types::ndarray>> array(std::tuple<>, dtype); template types::ndarray array(types::ndarray const &arr); template > types::ndarray::shape_t> array(types::array_base const &, dtype d = dtype()); template > types::ndarray::shape_t> array(types::array_base &&, dtype d = dtype()); DEFINE_FUNCTOR(pythonic::numpy, array); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/array2string.hpp000066400000000000000000000006201416264035500252220ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ARRAY2STRING_HPP #define PYTHONIC_INCLUDE_NUMPY_ARRAY2STRING_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/str.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::str array2string(E &&a); DEFINE_FUNCTOR(pythonic::numpy, array2string); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/array_equal.hpp000066400000000000000000000005651416264035500251100ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ARRAYEQUAL_HPP #define PYTHONIC_INCLUDE_NUMPY_ARRAYEQUAL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template bool array_equal(U const &u, V const &v); DEFINE_FUNCTOR(pythonic::numpy, array_equal); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/array_equiv.hpp000066400000000000000000000012741416264035500251300ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ARRAYEQUIV_HPP #define PYTHONIC_INCLUDE_NUMPY_ARRAYEQUIV_HPP #include "pythonic/include/numpy/array_equal.hpp" #include "pythonic/include/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if::type array_equiv(U const &u, V const &v); template typename std::enable_if < U::value::type array_equiv(U const &u, V const &v); template typename std::enable_if<(U::value > V::value), bool>::type array_equiv(U const &u, V const &v); DEFINE_FUNCTOR(pythonic::numpy, array_equiv); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/array_split.hpp000066400000000000000000000014271416264035500251320ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ARRAYSPLIT_HPP #define PYTHONIC_INCLUDE_NUMPY_ARRAYSPLIT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::list::value>>> array_split(types::ndarray const &a, long nb_split); template typename std::enable_if< types::is_iterable::value, types::list::value>>>>::type array_split(types::ndarray const &a, I const &split_mask); NUMPY_EXPR_TO_NDARRAY0_DECL(array_split); DEFINE_FUNCTOR(pythonic::numpy, array_split); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/array_str.hpp000066400000000000000000000003661416264035500246100ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ARRAYSTR_HPP #define PYTHONIC_INCLUDE_NUMPY_ARRAYSTR_HPP #include "pythonic/include/numpy/array2string.hpp" PYTHONIC_NS_BEGIN namespace numpy { USING_FUNCTOR(array_str, array2string); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/asarray.hpp000066400000000000000000000022161416264035500242400ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ASARRAY_HPP #define PYTHONIC_INCLUDE_NUMPY_ASARRAY_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/numpy/array.hpp" PYTHONIC_NS_BEGIN namespace numpy { template struct _asarray { template auto operator()(Types &&... args) -> decltype(array(std::forward(args)...)); }; template struct _asarray, T> { template F &&operator()(F &&a, dtype d = types::none_type()); }; template auto asarray(E &&e, types::none_type d = types::none_type()) -> decltype( _asarray::type, typename types::dtype_of::type>::type>{}( std::forward(e))); template auto asarray(E &&e, dtype d) -> decltype(_asarray::type, typename dtype::type>{}(std::forward(e), d)); DEFINE_FUNCTOR(pythonic::numpy, asarray); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/asarray_chkfinite.hpp000066400000000000000000000010571416264035500262660ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ASARRAYCHKFINITE_HPP #define PYTHONIC_INCLUDE_NUMPY_ASARRAYCHKFINITE_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/numpy/isfinite.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template I asarray_chkfinite(I const &a); } #define NUMPY_NARY_FUNC_NAME asarray_chkfinite #define NUMPY_NARY_FUNC_SYM wrapper::asarray_chkfinite #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ascontiguousarray.hpp000066400000000000000000000004071416264035500263600ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ASCONTIGUOUSARRAY_HPP #define PYTHONIC_INCLUDE_NUMPY_ASCONTIGUOUSARRAY_HPP #include "pythonic/include/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { USING_FUNCTOR(ascontiguousarray, asarray); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/asfarray.hpp000066400000000000000000000006641416264035500244130ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ASFARRAY_HPP #define PYTHONIC_INCLUDE_NUMPY_ASFARRAY_HPP #include "pythonic/include/numpy/asarray.hpp" #include "pythonic/include/numpy/float64.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto asfarray(E &&e, dtype d = dtype()) -> decltype(asarray(std::forward(e), d)); DEFINE_FUNCTOR(pythonic::numpy, asfarray); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/asscalar.hpp000066400000000000000000000012571416264035500243730ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ASSCALAR_HPP #define PYTHONIC_INCLUDE_NUMPY_ASSCALAR_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template using asscalar_result_type = typename std::conditional< std::is_integral::value, long, typename std::conditional::value, double, std::complex>::type>::type; template asscalar_result_type asscalar(E const &expr); DEFINE_FUNCTOR(pythonic::numpy, asscalar); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/atleast_1d.hpp000066400000000000000000000011321416264035500246130ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ATLEAST1D_HPP #define PYTHONIC_INCLUDE_NUMPY_ATLEAST1D_HPP #include "pythonic/include/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if< types::is_dtype::value, types::ndarray>>>::type atleast_1d(T t); template auto atleast_1d(T const &t) -> typename std::enable_if::value), decltype(asarray(t))>::type; DEFINE_FUNCTOR(pythonic::numpy, atleast_1d); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/atleast_2d.hpp000066400000000000000000000022561416264035500246240ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ATLEAST2D_HPP #define PYTHONIC_INCLUDE_NUMPY_ATLEAST2D_HPP #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if< types::is_dtype::value, types::ndarray, std::integral_constant>>>::type atleast_2d(T t); template auto atleast_2d(T const &t) -> typename std::enable_if < (!types::is_dtype::value) && T::value<2, types::ndarray< typename T::dtype, types::pshape, typename std::tuple_element< 0, typename T::shape_t>::type>>>::type; template auto atleast_2d(T &&t) -> typename std::enable_if< (!types::is_dtype::type>::type>::value) && std::decay::type::value >= 2, decltype(std::forward(t))>::type; DEFINE_FUNCTOR(pythonic::numpy, atleast_2d); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/atleast_3d.hpp000066400000000000000000000031331416264035500246200ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ATLEAST3D_HPP #define PYTHONIC_INCLUDE_NUMPY_ATLEAST3D_HPP #include "pythonic/include/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if< types::is_dtype::value, types::ndarray, std::integral_constant, std::integral_constant>>>::type atleast_3d(T t); template auto atleast_3d(T const &t) -> typename std::enable_if< (!types::is_dtype::value) && (T::value == 1), types::ndarray, typename std::tuple_element< 0, typename T::shape_t>::type, std::integral_constant>>>::type; template auto atleast_3d(T const &t) -> typename std::enable_if< (!types::is_dtype::value) && (T::value == 2), types::ndarray< typename T::dtype, types::pshape< typename std::tuple_element<0, typename T::shape_t>::type, typename std::tuple_element<1, typename T::shape_t>::type, std::integral_constant>>>::type; template auto atleast_3d(T const &t) -> typename std::enable_if<(!types::is_dtype::value) && T::value >= 3, decltype(asarray(t))>::type; DEFINE_FUNCTOR(pythonic::numpy, atleast_3d); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/average.hpp000066400000000000000000000013061416264035500242070ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_AVERAGE_HPP #define PYTHONIC_INCLUDE_NUMPY_AVERAGE_HPP #include "pythonic/include/numpy/asarray.hpp" #include "pythonic/include/numpy/sum.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto average(E const &expr, types::none_type const &axis = builtins::None) -> decltype(sum(expr, axis) / 1.); template auto average(E const &expr, long axis) -> decltype(sum(expr, axis) / 1.); template auto average(E const &expr, types::none_type const &axis, W const &weights) -> decltype(average(expr *asarray(weights) / average(asarray(weights)))); DEFINE_FUNCTOR(pythonic::numpy, average); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/base_repr.hpp000066400000000000000000000005531416264035500245420ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_BASEREPR_HPP #define PYTHONIC_INCLUDE_NUMPY_BASEREPR_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { types::str base_repr(long number, long base = 2, long padding = 0); DEFINE_FUNCTOR(pythonic::numpy, base_repr); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/binary_repr.hpp000066400000000000000000000006031416264035500251100ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_BINARYREPR_HPP #define PYTHONIC_INCLUDE_NUMPY_BINARYREPR_HPP #include "pythonic/include/numpy/base_repr.hpp" PYTHONIC_NS_BEGIN namespace numpy { types::str binary_repr(long number, types::none_type width = builtins::None); types::str binary_repr(long number, long width); DEFINE_FUNCTOR(pythonic::numpy, binary_repr); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/bincount.hpp000066400000000000000000000020421416264035500244140ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_BINCOUNT_HPP #define PYTHONIC_INCLUDE_NUMPY_BINCOUNT_HPP #include "pythonic/include/numpy/max.hpp" #include "pythonic/include/utils/numpy_conversion.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if::value == 1, types::ndarray>>::type bincount(types::ndarray const &expr, types::none_type weights = builtins::None, types::none minlength = builtins::None); template typename std::enable_if< std::tuple_size::value == 1, types::ndarray() * std::declval()), types::pshape>>::type bincount(types::ndarray const &expr, E const &weights, types::none minlength = builtins::None); NUMPY_EXPR_TO_NDARRAY0_DECL(bincount); DEFINE_FUNCTOR(pythonic::numpy, bincount); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/bitwise_and.hpp000066400000000000000000000010511416264035500250620ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_BITWISE_AND_HPP #define PYTHONIC_INCLUDE_NUMPY_BITWISE_AND_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/operator_/and_.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME bitwise_and #define NUMPY_NARY_FUNC_SYM pythonic::operator_::and_ #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/bitwise_and/000077500000000000000000000000001416264035500243545ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/bitwise_and/accumulate.hpp000066400000000000000000000003231416264035500272060ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_BITWISE_AND_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_BITWISE_AND_ACCUMULATE_HPP #define UFUNC_NAME bitwise_and #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/bitwise_and/reduce.hpp000066400000000000000000000004051416264035500263330ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_BITWISE_AND_REDUCE_HPP #define PYTHONIC_INCLUDE_NUMPY_BITWISE_AND_REDUCE_HPP #define UFUNC_NAME bitwise_and #define UFUNC_INAME iand #include "pythonic/include/numpy/ufunc_reduce.hpp" #undef UFUNC_NAME #undef UFUNC_INAME #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/bitwise_not.hpp000066400000000000000000000013301416264035500251200ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_BITWISENOT_HPP #define PYTHONIC_INCLUDE_NUMPY_BITWISENOT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template decltype(~std::declval()) bitwise_not(A const &a); bool bitwise_not(bool t0); } #define NUMPY_NARY_FUNC_NAME bitwise_not #define NUMPY_NARY_FUNC_SYM wrapper::bitwise_not #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END // ndarray have to be include after as bitwise_not is used as a numpy_operator #include "pythonic/include/types/ndarray.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/bitwise_or.hpp000066400000000000000000000007561416264035500247530ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_BITWISE_OR_HPP #define PYTHONIC_INCLUDE_NUMPY_BITWISE_OR_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/operator_/or_.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME bitwise_or #define NUMPY_NARY_FUNC_SYM pythonic::operator_::or_ #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/bitwise_or/000077500000000000000000000000001416264035500242325ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/bitwise_or/accumulate.hpp000066400000000000000000000003201416264035500270610ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_BITWISE_OR_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_BITWISE_OR_ACCUMULATE_HPP #define UFUNC_NAME bitwise_or #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/bitwise_or/reduce.hpp000066400000000000000000000004011416264035500262050ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_BITWISE_OR_REDUCE_HPP #define PYTHONIC_INCLUDE_NUMPY_BITWISE_OR_REDUCE_HPP #define UFUNC_NAME bitwise_or #define UFUNC_INAME ior #include "pythonic/include/numpy/ufunc_reduce.hpp" #undef UFUNC_NAME #undef UFUNC_INAME #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/bitwise_xor.hpp000066400000000000000000000010511416264035500251300ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_BITWISE_XOR_HPP #define PYTHONIC_INCLUDE_NUMPY_BITWISE_XOR_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/operator_/xor_.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME bitwise_xor #define NUMPY_NARY_FUNC_SYM pythonic::operator_::xor_ #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/bitwise_xor/000077500000000000000000000000001416264035500244225ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/bitwise_xor/accumulate.hpp000066400000000000000000000003231416264035500272540ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_BITWISE_XOR_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_BITWISE_XOR_ACCUMULATE_HPP #define UFUNC_NAME bitwise_xor #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/bitwise_xor/reduce.hpp000066400000000000000000000004051416264035500264010ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_BITWISE_XOR_REDUCE_HPP #define PYTHONIC_INCLUDE_NUMPY_BITWISE_XOR_REDUCE_HPP #define UFUNC_NAME bitwise_xor #define UFUNC_INAME ixor #include "pythonic/include/numpy/ufunc_reduce.hpp" #undef UFUNC_NAME #undef UFUNC_INAME #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/bool_.hpp000066400000000000000000000011471416264035500236720ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_BOOL_HPP #define PYTHONIC_INCLUDE_NUMPY_BOOL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { bool bool_(); template bool bool_(V v); } #define NUMPY_NARY_FUNC_NAME bool_ #define NUMPY_NARY_FUNC_SYM details::bool_ #define NUMPY_NARY_EXTRA_METHOD using type = bool; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/broadcast_to.hpp000066400000000000000000000010431416264035500252370ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_BROADCAST_TO_HPP #define PYTHONIC_INCLUDE_NUMPY_BROADCAST_TO_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/numpy/empty.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto broadcast_to(E const &expr, pS shape) -> decltype(numpy::functor::empty{}( shape, typename types::dtype_t::type>{})); DEFINE_FUNCTOR(pythonic::numpy, broadcast_to); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/byte.hpp000066400000000000000000000011431416264035500235370ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_BYTE_HPP #define PYTHONIC_INCLUDE_NUMPY_BYTE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { char byte(); template char byte(V v); } #define NUMPY_NARY_FUNC_NAME byte #define NUMPY_NARY_FUNC_SYM details::byte #define NUMPY_NARY_EXTRA_METHOD using type = char; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/cbrt.hpp000066400000000000000000000006751416264035500235370ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_CBRT_HPP #define PYTHONIC_INCLUDE_NUMPY_CBRT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME cbrt #define NUMPY_NARY_FUNC_SYM xsimd::cbrt #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ceil.hpp000066400000000000000000000006741416264035500235200ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_CEIL_HPP #define PYTHONIC_INCLUDE_NUMPY_CEIL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME ceil #define NUMPY_NARY_FUNC_SYM xsimd::ceil #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/clip.hpp000066400000000000000000000011511416264035500235220ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_CLIP_HPP #define PYTHONIC_INCLUDE_NUMPY_CLIP_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template typename __combined::type clip(T const &v, Mi a_min, Ma a_max); template typename __combined::type clip(T const &v, Mi a_min); } #define NUMPY_NARY_FUNC_NAME clip #define NUMPY_NARY_FUNC_SYM wrapper::clip #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/complex.hpp000066400000000000000000000012601416264035500242430ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_COMPLEX_HPP #define PYTHONIC_INCLUDE_NUMPY_COMPLEX_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/complex.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { std::complex complex(double v = 0, double v2 = 0.); } #define NUMPY_NARY_FUNC_NAME complex #define NUMPY_NARY_FUNC_SYM details::complex #define NUMPY_NARY_EXTRA_METHOD using type = std::complex; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/complex128.hpp000066400000000000000000000013441416264035500245010ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_COMPLEX128_HPP #define PYTHONIC_INCLUDE_NUMPY_COMPLEX128_HPP #include "pythonic/include/types/complex.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { std::complex complex128(); template std::complex complex128(V v); } #define NUMPY_NARY_FUNC_NAME complex128 #define NUMPY_NARY_FUNC_SYM details::complex128 #define NUMPY_NARY_EXTRA_METHOD using type = std::complex; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/complex256.hpp000066400000000000000000000013631416264035500245040ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_COMPLEX256_HPP #define PYTHONIC_INCLUDE_NUMPY_COMPLEX256_HPP #include "pythonic/include/types/complex.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { std::complex complex256(); template std::complex complex256(V v); } #define NUMPY_NARY_FUNC_NAME complex256 #define NUMPY_NARY_FUNC_SYM details::complex256 #define NUMPY_NARY_EXTRA_METHOD using type = std::complex; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/complex64.hpp000066400000000000000000000014471416264035500244240ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_COMPLEX64_HPP #define PYTHONIC_INCLUDE_NUMPY_COMPLEX64_HPP #include "pythonic/include/types/complex.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { std::complex complex64(); template std::complex complex64(V v); template std::complex complex64(std::complex v); } #define NUMPY_NARY_FUNC_NAME complex64 #define NUMPY_NARY_FUNC_SYM details::complex64 #define NUMPY_NARY_EXTRA_METHOD using type = std::complex; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/concatenate.hpp000066400000000000000000000016411416264035500250630ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_CONCATENATE_HPP #define PYTHONIC_INCLUDE_NUMPY_CONCATENATE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> concatenate(types::array_base const &args, long axis = 0); template auto concatenate(std::tuple const &args, long axis = 0) -> types::ndarray< typename __combined::type::dtype...>::type, types::array< long, std::tuple_element<0, std::tuple>::type::value>>; template types::ndarray> concatenate(types::list const &args, long axis = 0); DEFINE_FUNCTOR(pythonic::numpy, concatenate); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/conj.hpp000066400000000000000000000004411416264035500235250ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_CONJ_HPP #define PYTHONIC_INCLUDE_NUMPY_CONJ_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/conjugate.hpp" PYTHONIC_NS_BEGIN namespace numpy { USING_FUNCTOR(conj, numpy::functor::conjugate); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/conjugate.hpp000066400000000000000000000017501416264035500245570ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_CONJUGATE_HPP #define PYTHONIC_INCLUDE_NUMPY_CONJUGATE_HPP #include "pythonic/include/types/numpy_op_helper.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template std::complex conjugate(std::complex const &v) { return std::conj(v); } template xsimd::batch, N> conjugate(xsimd::batch, N> const &v) { return xsimd::conj(v); } template xsimd::batch conjugate(xsimd::batch const &v) { return v; } template T conjugate(T const &v) { return v; } } #define NUMPY_NARY_FUNC_NAME conjugate #define NUMPY_NARY_FUNC_SYM wrapper::conjugate #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/convolve.hpp000066400000000000000000000012641416264035500244330ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_CONVOLVE_HPP #define PYTHONIC_INCLUDE_NUMPY_CONVOLVE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> convolve(A const &inA, B const &inB, U renorm = types::str("full")); template types::ndarray> convolve(A const &inA, B const &inB); NUMPY_EXPR_TO_NDARRAY0_DECL(convolve) DEFINE_FUNCTOR(pythonic::numpy, convolve) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/copy.hpp000066400000000000000000000022421416264035500235470ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_COPY_HPP #define PYTHONIC_INCLUDE_NUMPY_COPY_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_conversion.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { // list case template typename std::enable_if< !types::is_array::value && !types::is_dtype::value, types::ndarray>>::type copy(E const &v); // scalar / complex case template auto copy(E const &v) -> typename std::enable_if::value, E>::type; // No copy is required for numpy_expr template auto copy(E &&v) -> typename std::enable_if::value, decltype(std::forward(v))>::type; // ndarray case template types::ndarray copy(types::ndarray const &a); // transposed ndarray case template types::numpy_texpr> copy(types::numpy_texpr> const &a); DEFINE_FUNCTOR(pythonic::numpy, copy); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/copysign.hpp000066400000000000000000000010021416264035500244210ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_COPYSIGN_HPP #define PYTHONIC_INCLUDE_NUMPY_COPYSIGN_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME copysign #define NUMPY_NARY_FUNC_SYM xsimd::copysign #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/copysign/000077500000000000000000000000001416264035500237175ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/copysign/accumulate.hpp000066400000000000000000000003121416264035500265470ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_COPYSIGN_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_COPYSIGN_ACCUMULATE_HPP #define UFUNC_NAME copysign #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/copyto.hpp000066400000000000000000000006171416264035500241160ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_COPYTO_HPP #define PYTHONIC_INCLUDE_NUMPY_COPYTO_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray copyto(types::ndarray &out, E const &expr); DEFINE_FUNCTOR(pythonic::numpy, copyto); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/correlate.hpp000066400000000000000000000010121416264035500245470ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_CORRELATE_HPP #define PYTHONIC_INCLUDE_NUMPY_CORRELATE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> correlate(A const &inA, B const &inB, types::str const &renorm = types::str("valid")); NUMPY_EXPR_TO_NDARRAY0_DECL(correlate) DEFINE_FUNCTOR(pythonic::numpy, correlate) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/cos.hpp000066400000000000000000000006701416264035500233640ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_COS_HPP #define PYTHONIC_INCLUDE_NUMPY_COS_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME cos #define NUMPY_NARY_FUNC_SYM xsimd::cos #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/cosh.hpp000066400000000000000000000006741416264035500235400ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_COSH_HPP #define PYTHONIC_INCLUDE_NUMPY_COSH_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME cosh #define NUMPY_NARY_FUNC_SYM xsimd::cosh #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/count_nonzero.hpp000066400000000000000000000015001416264035500254730ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_COUNT_NONZERO_HPP #define PYTHONIC_INCLUDE_NUMPY_COUNT_NONZERO_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto _count_nonzero(E begin, E end, long &count, utils::int_<1>) -> typename std::enable_if::value>::type; template auto _count_nonzero(E begin, E end, long &count, utils::int_<1>) -> typename std::enable_if::value>::type; template void _count_nonzero(E begin, E end, long &count, utils::int_); template long count_nonzero(E const &array); DEFINE_FUNCTOR(pythonic::numpy, count_nonzero); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/cross.hpp000066400000000000000000000007261416264035500237330ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_CROSS_HPP #define PYTHONIC_INCLUDE_NUMPY_CROSS_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray< typename __combined::type, types::array> cross(E const &e, F const &f); DEFINE_FUNCTOR(pythonic::numpy, cross); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ctypeslib/000077500000000000000000000000001416264035500240625ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ctypeslib/as_array.hpp000066400000000000000000000012031416264035500263700ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_CTYPESLIB_AS_ARRAY_HPP #define PYTHONIC_INCLUDE_NUMPY_CTYPESLIB_AS_ARRAY_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/pointer.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace ctypeslib { template typename std::enable_if::value, types::ndarray>::type as_array(types::pointer, pS); template types::ndarray> as_array(types::pointer, long); DEFINE_FUNCTOR(pythonic::numpy::ctypeslib, as_array); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/cumprod.hpp000066400000000000000000000010761416264035500242520ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_CUMPROD_HPP #define PYTHONIC_INCLUDE_NUMPY_CUMPROD_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/partial_sum.hpp" #include "pythonic/include/operator_/imul.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto cumprod(E &&e, Opts &&... opts) -> decltype(partial_sum( std::forward(e), std::forward(opts)...)); NUMPY_EXPR_TO_NDARRAY0_DECL(cumprod); DEFINE_FUNCTOR(pythonic::numpy, cumprod); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/cumproduct.hpp000066400000000000000000000003611416264035500247620ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_CUMPRODUCT_HPP #define PYTHONIC_INCLUDE_NUMPY_CUMPRODUCT_HPP #include "pythonic/include/numpy/cumprod.hpp" PYTHONIC_NS_BEGIN namespace numpy { USING_FUNCTOR(cumproduct, cumprod); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/cumsum.hpp000066400000000000000000000010201416264035500240770ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_CUMSUM_HPP #define PYTHONIC_INCLUDE_NUMPY_CUMSUM_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/partial_sum.hpp" #include "pythonic/include/operator_/iadd.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto cumsum(E &&e, Opts &&... opts) -> decltype(partial_sum( std::forward(e), std::forward(opts)...)); DEFINE_FUNCTOR(pythonic::numpy, cumsum); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/deg2rad.hpp000066400000000000000000000011551416264035500241070ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_DEG2RAD_HPP #define PYTHONIC_INCLUDE_NUMPY_DEG2RAD_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/numpy/pi.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template auto deg2rad(T const &val) -> decltype(val *pi / 180) { return val * pi / 180; } } #define NUMPY_NARY_FUNC_NAME deg2rad #define NUMPY_NARY_FUNC_SYM wrapper::deg2rad #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/degrees.hpp000066400000000000000000000005671416264035500242230ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_DEGREES_HPP #define PYTHONIC_INCLUDE_NUMPY_DEGREES_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/numpy/rad2deg.hpp" PYTHONIC_NS_BEGIN namespace numpy { USING_FUNCTOR(degrees, rad2deg); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/delete_.hpp000066400000000000000000000014151416264035500241770ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_DELETE_HPP #define PYTHONIC_INCLUDE_NUMPY_DELETE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> delete_(types::ndarray const &a, long index, types::none_type axis = builtins::None); template typename std::enable_if::value, types::ndarray>>::type delete_(types::ndarray const &in, I const &indices, types::none_type axis = builtins::None); NUMPY_EXPR_TO_NDARRAY0_DECL(delete_); DEFINE_FUNCTOR(pythonic::numpy, delete_); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/diag.hpp000066400000000000000000000016771416264035500235140ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_DIAG_HPP #define PYTHONIC_INCLUDE_NUMPY_DIAG_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_conversion.hpp" #include "pythonic/include/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if::value == 2, types::ndarray>>::type diag(types::ndarray const &a, long k = 0); template typename std::enable_if::value == 1, types::ndarray>>::type diag(types::ndarray const &a, long k = 0); template auto diag(types::list const &a, long k = 0) -> decltype(diag(asarray(a), k)); NUMPY_EXPR_TO_NDARRAY0_DECL(diag); DEFINE_FUNCTOR(pythonic::numpy, diag); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/diagflat.hpp000066400000000000000000000003451416264035500243520ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_DIAGFLAT_HPP #define PYTHONIC_INCLUDE_NUMPY_DIAGFLAT_HPP #include "pythonic/include/numpy/diag.hpp" PYTHONIC_NS_BEGIN namespace numpy { USING_FUNCTOR(diagflat, diag); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/diagonal.hpp000066400000000000000000000003451416264035500243550ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_DIAGONAL_HPP #define PYTHONIC_INCLUDE_NUMPY_DIAGONAL_HPP #include "pythonic/include/numpy/diag.hpp" PYTHONIC_NS_BEGIN namespace numpy { USING_FUNCTOR(diagonal, diag); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/diff.hpp000066400000000000000000000007201416264035500235040ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_DIFF_HPP #define PYTHONIC_INCLUDE_NUMPY_DIFF_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> diff(E const &expr, long n = 1, long axis = -1); DEFINE_FUNCTOR(pythonic::numpy, diff); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/digitize.hpp000066400000000000000000000007541416264035500244130ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_DIGITIZE_HPP #define PYTHONIC_INCLUDE_NUMPY_DIGITIZE_HPP #include "pythonic/include/numpy/asarray.hpp" #include "pythonic/include/builtins/None.hpp" #include "pythonic/include/operator_/gt.hpp" #include "pythonic/include/operator_/lt.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> digitize(E const &expr, F const &b); DEFINE_FUNCTOR(pythonic::numpy, digitize); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/divide.hpp000066400000000000000000000010301416264035500240330ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_DIVIDE_HPP #define PYTHONIC_INCLUDE_NUMPY_DIVIDE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/operator_/div.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME divide #define NUMPY_NARY_FUNC_SYM pythonic::operator_::div #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/divide/000077500000000000000000000000001416264035500233305ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/divide/accumulate.hpp000066400000000000000000000003041416264035500261610ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_DIVIDE_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_DIVIDE_ACCUMULATE_HPP #define UFUNC_NAME divide #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/dot.hpp000066400000000000000000000260431416264035500233700ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_DOT_HPP #define PYTHONIC_INCLUDE_NUMPY_DOT_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/numpy/sum.hpp" #include "pythonic/include/types/numpy_expr.hpp" #include "pythonic/include/types/traits.hpp" template struct is_blas_type : pythonic::types::is_complex { }; template <> struct is_blas_type : std::true_type { }; template <> struct is_blas_type : std::true_type { }; template struct is_strided { template static decltype(T::is_strided, std::true_type{}) get(T *); static std::false_type get(...); static constexpr bool value = decltype(get((E *)nullptr))::value; }; template struct is_blas_array { // FIXME: also support gexpr with stride? static constexpr bool value = pythonic::types::is_array::value && is_blas_type>::value && !is_strided::value; }; PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if::value && types::is_dtype::value, decltype(std::declval() * std::declval())>::type dot(E const &e, F const &f); /// Vector / Vector multiplication template typename std::enable_if< types::is_numexpr_arg::value && types::is_numexpr_arg::value && E::value == 1 && F::value == 1 && (!is_blas_array::value || !is_blas_array::value || !std::is_same::value), typename __combined::type>::type dot(E const &e, F const &f); template typename std::enable_if::value && std::is_same::value && is_blas_array::value && is_blas_array::value, float>::type dot(E const &e, F const &f); template typename std::enable_if::value && std::is_same::value && is_blas_array::value && is_blas_array::value, double>::type dot(E const &e, F const &f); template typename std::enable_if< E::value == 1 && F::value == 1 && std::is_same>::value && std::is_same>::value && is_blas_array::value && is_blas_array::value, std::complex>::type dot(E const &e, F const &f); template typename std::enable_if< E::value == 1 && F::value == 1 && std::is_same>::value && std::is_same>::value && is_blas_array::value && is_blas_array::value, std::complex>::type dot(E const &e, F const &f); /// Matrix / Vector multiplication // We transpose the matrix to reflect our C order template typename std::enable_if::value && std::tuple_size::value == 2 && std::tuple_size::value == 1, types::ndarray>>::type dot(types::ndarray const &f, types::ndarray const &e); // The trick is to not transpose the matrix so that MV become VM template typename std::enable_if::value && std::tuple_size::value == 1 && std::tuple_size::value == 2, types::ndarray>>::type dot(types::ndarray const &e, types::ndarray const &f); // If arguments could be use with blas, we evaluate them as we need pointer // on array for blas template typename std::enable_if< types::is_numexpr_arg::value && types::is_numexpr_arg::value // It is an array_like && (!(types::is_ndarray::value && types::is_ndarray::value) || !std::is_same::value) && is_blas_type::value && is_blas_type::value // With dtype compatible with // blas && E::value == 2 && F::value == 1, // And it is matrix / vect types::ndarray< typename __combined::type, types::pshape>>::type dot(E const &e, F const &f); // If arguments could be use with blas, we evaluate them as we need pointer // on array for blas template typename std::enable_if< types::is_numexpr_arg::value && types::is_numexpr_arg::value // It is an array_like && (!(types::is_ndarray::value && types::is_ndarray::value) || !std::is_same::value) && is_blas_type::value && is_blas_type::value // With dtype compatible with // blas && E::value == 1 && F::value == 2, // And it is vect / matrix types::ndarray< typename __combined::type, types::pshape>>::type dot(E const &e, F const &f); // If one of the arg doesn't have a "blas compatible type", we use a slow // matrix vector multiplication. template typename std::enable_if< (!is_blas_type::value || !is_blas_type::value) && E::value == 1 && F::value == 2, // And it is vect / matrix types::ndarray< typename __combined::type, types::pshape>>::type dot(E const &e, F const &f); // If one of the arg doesn't have a "blas compatible type", we use a slow // matrix vector multiplication. template typename std::enable_if< (!is_blas_type::value || !is_blas_type::value) && E::value == 2 && F::value == 1, // And it is vect / matrix types::ndarray< typename __combined::type, types::pshape>>::type dot(E const &e, F const &f); /// Matrix / Matrix multiplication // The trick is to use the transpose arguments to reflect C order. // We want to perform A * B in C order but blas order is F order. // So we compute B'A' == (AB)'. As this equality is perform with F order // We doesn't have to return a texpr because we want a C order matrice!! template typename std::enable_if::value && std::tuple_size::value == 2 && std::tuple_size::value == 2, types::ndarray>>::type dot(types::ndarray const &a, types::ndarray const &b); template typename std::enable_if< is_blas_type::value && std::tuple_size::value == 2 && std::tuple_size::value == 2 && std::tuple_size::value == 2, types::ndarray>::type & dot(types::ndarray const &a, types::ndarray const &b, types::ndarray &c); // texpr variants: MT, TM, TT template typename std::enable_if::value && std::tuple_size::value == 2 && std::tuple_size::value == 2, types::ndarray>>::type dot(types::numpy_texpr> const &a, types::ndarray const &b); template typename std::enable_if::value && std::tuple_size::value == 2 && std::tuple_size::value == 2, types::ndarray>>::type dot(types::ndarray const &a, types::numpy_texpr> const &b); template typename std::enable_if::value && std::tuple_size::value == 2 && std::tuple_size::value == 2, types::ndarray>>::type dot(types::numpy_texpr> const &a, types::numpy_texpr> const &b); // If arguments could be use with blas, we evaluate them as we need pointer // on array for blas template typename std::enable_if< types::is_numexpr_arg::value && types::is_numexpr_arg::value // It is an array_like && (!(types::is_ndarray::value && types::is_ndarray::value) || !std::is_same::value) && is_blas_type::value && is_blas_type::value // With dtype compatible with // blas && E::value == 2 && F::value == 2, // And both are matrix types::ndarray< typename __combined::type, types::array>>::type dot(E const &e, F const &f); // If one of the arg doesn't have a "blas compatible type", we use a slow // matrix multiplication. template typename std::enable_if< (!is_blas_type::value || !is_blas_type::value) && E::value == 2 && F::value == 2, // And it is matrix / matrix types::ndarray< typename __combined::type, types::array>>::type dot(E const &e, F const &f); // N x M where N >= 3 and M == 1 template typename std::enable_if< (E::value >= 3 && F::value == 1), types::ndarray< typename __combined::type, types::array>>::type dot(E const &e, F const &f); // N x M where N >= 3 and M >= 2 template typename std::enable_if< (E::value >= 3 && F::value >= 2), types::ndarray< typename __combined::type, types::array>>::type dot(E const &e, F const &f); DEFINE_FUNCTOR(pythonic::numpy, dot); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/double_.hpp000066400000000000000000000006001416264035500242020ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_DOUBLE_HPP #define PYTHONIC_INCLUDE_NUMPY_DOUBLE_HPP #include "pythonic/include/numpy/float64.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME double_ #define NUMPY_NARY_FUNC_SYM details::float64 #define NUMPY_NARY_EXTRA_METHOD using type = double; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/dtype/000077500000000000000000000000001416264035500232115ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/dtype/type.hpp000066400000000000000000000005571416264035500247120ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_DTYPE_TYPE_HPP #define PYTHONIC_INCLUDE_NUMPY_DTYPE_TYPE_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace dtype { template auto type(T const &t, V const &v) -> decltype(t(v)); DEFINE_FUNCTOR(pythonic::numpy::dtype, type); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/e.hpp000066400000000000000000000003111416264035500230140ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_E_HPP #define PYTHONIC_INCLUDE_NUMPY_E_HPP PYTHONIC_NS_BEGIN namespace numpy { double constexpr e = 2.718281828459045235360287471352662498; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ediff1d.hpp000066400000000000000000000006761416264035500241100ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_EDIFF1D_HPP #define PYTHONIC_INCLUDE_NUMPY_EDIFF1D_HPP #include "pythonic/include/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> ediff1d(E const &expr); template auto ediff1d(types::list const &expr) -> decltype(ediff1d(asarray(expr))); DEFINE_FUNCTOR(pythonic::numpy, ediff1d); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/empty.hpp000066400000000000000000000015211416264035500237320ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_EMPTY_HPP #define PYTHONIC_INCLUDE_NUMPY_EMPTY_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/float64.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> empty(pS const &shape, dtype d = dtype()); template types::ndarray> empty(long size, dtype d = dtype()); template types::ndarray>> empty(std::integral_constant, dtype d = dtype()); DEFINE_FUNCTOR(pythonic::numpy, empty); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/empty_like.hpp000066400000000000000000000011721416264035500247400ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_EMPTYLIKE_HPP #define PYTHONIC_INCLUDE_NUMPY_EMPTYLIKE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/empty.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto empty_like(E const &expr, dtype d = dtype()) -> decltype(empty(sutils::getshape(expr), d)); template auto empty_like(E const &expr, types::none_type d = builtins::None) -> decltype(empty(sutils::getshape(expr), types::dtype_t())); DEFINE_FUNCTOR(pythonic::numpy, empty_like) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/equal.hpp000066400000000000000000000010231416264035500237000ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_EQUAL_HPP #define PYTHONIC_INCLUDE_NUMPY_EQUAL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/operator_/eq.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME equal #define NUMPY_NARY_FUNC_SYM pythonic::operator_::eq #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/equal/000077500000000000000000000000001416264035500231735ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/equal/accumulate.hpp000066400000000000000000000003011416264035500260210ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_EQUAL_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_EQUAL_ACCUMULATE_HPP #define UFUNC_NAME equal #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/exp.hpp000066400000000000000000000006701416264035500233740ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_EXP_HPP #define PYTHONIC_INCLUDE_NUMPY_EXP_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME exp #define NUMPY_NARY_FUNC_SYM xsimd::exp #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/expand_dims.hpp000066400000000000000000000006051416264035500250710ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_EXPAND_DIMS_HPP #define PYTHONIC_INCLUDE_NUMPY_EXPAND_DIMS_HPP #include PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> expand_dims(T const &input, int axis); DEFINE_FUNCTOR(pythonic::numpy, expand_dims); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/expm1.hpp000066400000000000000000000007011416264035500236250ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_EXPM1_HPP #define PYTHONIC_INCLUDE_NUMPY_EXPM1_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME expm1 #define NUMPY_NARY_FUNC_SYM xsimd::expm1 #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/eye.hpp000066400000000000000000000012451416264035500233610ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_EYE_HPP #define PYTHONIC_INCLUDE_NUMPY_EYE_HPP #include "pythonic/include/numpy/zeros.hpp" #include "pythonic/include/numpy/float64.hpp" #include "pythonic/include/builtins/None.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> eye(long N, long M, long k = 0, dtype d = dtype()); template types::ndarray> eye(long N, types::none_type M = builtins::None, long k = 0, dtype d = dtype()); DEFINE_FUNCTOR(pythonic::numpy, eye); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fabs.hpp000066400000000000000000000004251416264035500235110ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FABS_HPP #define PYTHONIC_INCLUDE_NUMPY_FABS_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/abs.hpp" PYTHONIC_NS_BEGIN namespace numpy { USING_FUNCTOR(fabs, numpy::functor::abs); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fft/000077500000000000000000000000001416264035500226435ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fft/c2c.hpp000066400000000000000000000010501416264035500240170ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FFT_C2C_HPP #define PYTHONIC_INCLUDE_NUMPY_FFT_C2C_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace fft { template types::ndarray, types::array::value>> c2c(types::ndarray, pS> const &a, long n = -1, long axis = -1, types::str const &norm = {}, bool const forward = true); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fft/fft.hpp000066400000000000000000000116471416264035500241440ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FFT_FFT_HPP #define PYTHONIC_INCLUDE_NUMPY_FFT_FFT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN /** * **Noteable difference to numpy.fft.fft:** * In contrast to numpy.fft.fft this implementation preserves precision * of floating point and complex inputs, i.e. complex input yields * complex output. numpy.fft.fft always returns complex, even for * long double input. This follows the same reasoning as given by numpy compiled * with intel_mkl (see here: https://github.com/IntelPython/mkl_fft/issues/10). * Conversion to double precision causes code to be slower and hurts use cases * where single precision preservation is desired, e.g. when interacting with *GPUs * or instruments. Moreover for the case of long double inputs, this avoids * loss of precision. **/ namespace numpy { namespace fft { template types::ndarray< typename std::enable_if::value, T>::type, types::array::value>> fft(types::ndarray const &a, long n = -1, long axis = -1, types::str const &norm = {}); template types::ndarray< typename std::enable_if::value, T>::type, types::array::value>> fft(types::ndarray const &a, types::none_type n, long axis, types::str const &norm); template types::ndarray< typename std::enable_if::value, T>::type, types::array::value>> fft(types::ndarray const &a, long n, long axis, types::none_type norm); template types::ndarray< typename std::enable_if::value, T>::type, types::array::value>> fft(types::ndarray const &a, types::none_type n, long axis = -1, types::none_type norm = types::none_type{}); template types::ndarray::value, std::complex>::type, types::array::value>> fft(types::ndarray const &a, long n = -1, long axis = -1, types::str const &norm = {}); template types::ndarray::value, std::complex>::type, types::array::value>> fft(types::ndarray const &a, types::none_type n, long axis, types::str const &norm); template types::ndarray::value, std::complex>::type, types::array::value>> fft(types::ndarray const &a, long n, long axis, types::none_type norm); template types::ndarray::value, std::complex>::type, types::array::value>> fft(types::ndarray const &a, types::none_type n, long axis = -1, types::none_type norm = types::none_type{}); template types::ndarray::value, std::complex>::type, types::array::value>> fft(types::ndarray const &a, long n = -1, long axis = -1, types::str const &norm = {}); template types::ndarray::value, std::complex>::type, types::array::value>> fft(types::ndarray const &a, types::none_type n, long axis, types::str const &norm); template types::ndarray::value, std::complex>::type, types::array::value>> fft(types::ndarray const &a, long n, long axis, types::none_type norm); template types::ndarray::value, std::complex>::type, types::array::value>> fft(types::ndarray const &a, types::none_type n, long axis = -1, types::none_type norm = types::none_type{}); NUMPY_EXPR_TO_NDARRAY0_DECL(fft); DEFINE_FUNCTOR(pythonic::numpy::fft, fft); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fft/hfft.hpp000066400000000000000000000073421416264035500243110ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FFT_HFFT_HPP #define PYTHONIC_INCLUDE_NUMPY_FFT_HFFT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" /** * **Noteable difference to numpy.fft.hfft:** * In contrast to numpy.fft.hfft this implementation preserves precision * of floating point and complex inputs, i.e. complex input yields * complex output. numpy.fft.fft always returns complex, even for * long double input. This follows the same reasoning as given by numpy compiled * with intel_mkl (see here: https://github.com/IntelPython/mkl_fft/issues/10). * Conversion to double precision causes code to be slower and hurts use cases * where single precision preservation is desired, e.g. when interacting with *GPUs * or instruments. Moreover for the case of long double inputs, this avoids * loss of precision. **/ PYTHONIC_NS_BEGIN namespace numpy { namespace fft { template types::ndarray::value>> hfft(types::ndarray, pS> const &a, long n = -1, long axis = -1, types::str const &norm = {}); template types::ndarray::value>> hfft(types::ndarray, pS> const &a, types::none_type n, long axis, types::str const &norm); template types::ndarray::value>> hfft(types::ndarray, pS> const &a, long n, long axis, types::none_type norm); template types::ndarray::value>> hfft(types::ndarray, pS> const &a, types::none_type n, long axis = -1, types::none_type norm = types::none_type{}); template types::ndarray::value, typename std::conditional::value, double, T>::type>::type, types::array::value>> hfft(types::ndarray const &a, long n = -1, long axis = -1, types::str const &norm = {}); template types::ndarray::value, typename std::conditional::value, double, T>::type>::type, types::array::value>> hfft(types::ndarray const &a, types::none_type n, long axis, types::str const &norm); template types::ndarray::value, typename std::conditional::value, double, T>::type>::type, types::array::value>> hfft(types::ndarray const &a, long n, long axis, types::none_type norm); template types::ndarray::value, typename std::conditional::value, double, T>::type>::type, types::array::value>> hfft(types::ndarray const &a, types::none_type n, long axis = -1, types::none_type norm = types::none_type{}); NUMPY_EXPR_TO_NDARRAY0_DECL(hfft); DEFINE_FUNCTOR(pythonic::numpy::fft, hfft); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fft/ifft.hpp000066400000000000000000000117041416264035500243070ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FFT_IFFT_HPP #define PYTHONIC_INCLUDE_NUMPY_FFT_IFFT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN /** * **Noteable difference to numpy.fft.ifft:** * In contrast to numpy.fft.fft this implementation preserves precision * of floating point and complex inputs, i.e. complex input yields * complex output. numpy.fft.fft always returns complex, even for * long double input. This follows the same reasoning as given by numpy compiled * with intel_mkl (see here: https://github.com/IntelPython/mkl_fft/issues/10). * Conversion to double precision causes code to be slower and hurts use cases * where single precision preservation is desired, e.g. when interacting with *GPUs * or instruments. Moreover for the case of long double inputs, this avoids * loss of precision. **/ namespace numpy { namespace fft { template types::ndarray< typename std::enable_if::value, T>::type, types::array::value>> ifft(types::ndarray const &a, long n = -1, long axis = -1, types::str const &norm = {}); template types::ndarray< typename std::enable_if::value, T>::type, types::array::value>> ifft(types::ndarray const &a, types::none_type n, long axis, types::str const &norm); template types::ndarray< typename std::enable_if::value, T>::type, types::array::value>> ifft(types::ndarray const &a, long n, long axis, types::none_type norm); template types::ndarray< typename std::enable_if::value, T>::type, types::array::value>> ifft(types::ndarray const &a, types::none_type n, long axis = -1, types::none_type norm = types::none_type{}); template types::ndarray::value, std::complex>::type, types::array::value>> ifft(types::ndarray const &a, long n = -1, long axis = -1, types::str const &norm = {}); template types::ndarray::value, std::complex>::type, types::array::value>> ifft(types::ndarray const &a, types::none_type n, long axis, types::str const &norm); template types::ndarray::value, std::complex>::type, types::array::value>> ifft(types::ndarray const &a, long n, long axis, types::none_type norm); template types::ndarray::value, std::complex>::type, types::array::value>> ifft(types::ndarray const &a, types::none_type n, long axis = -1, types::none_type norm = types::none_type{}); template types::ndarray::value, std::complex>::type, types::array::value>> ifft(types::ndarray const &a, long n = -1, long axis = -1, types::str const &norm = {}); template types::ndarray::value, std::complex>::type, types::array::value>> ifft(types::ndarray const &a, types::none_type n, long axis, types::str const &norm); template types::ndarray::value, std::complex>::type, types::array::value>> ifft(types::ndarray const &a, long n, long axis, types::none_type norm); template types::ndarray::value, std::complex>::type, types::array::value>> ifft(types::ndarray const &a, types::none_type n, long axis = -1, types::none_type norm = types::none_type{}); NUMPY_EXPR_TO_NDARRAY0_DECL(ifft); DEFINE_FUNCTOR(pythonic::numpy::fft, ifft); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fft/ihfft.hpp000066400000000000000000000075201416264035500244600ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FFT_IHFFT_HPP #define PYTHONIC_INCLUDE_NUMPY_FFT_IHFFT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" /** * **Noteable difference to numpy.fft.ihfft:** * In contrast to numpy.fft.ihfft this implementation preserves precision * of floating point and complex inputs, i.e. complex input yields * complex output. numpy.fft.fft always returns complex, even for * long double input. This follows the same reasoning as given by numpy compiled * with intel_mkl (see here: https://github.com/IntelPython/mkl_fft/issues/10). * Conversion to double precision causes code to be slower and hurts use cases * where single precision preservation is desired, e.g. when interacting with *GPUs * or instruments. Moreover for the case of long double inputs, this avoids * loss of precision. **/ PYTHONIC_NS_BEGIN namespace numpy { namespace fft { template types::ndarray::value, std::complex>::type, types::array::value>> ihfft(types::ndarray const &a, long n = -1, long axis = -1, types::str const &norm = {}); template types::ndarray::value, std::complex>::type, types::array::value>> ihfft(types::ndarray const &a, types::none_type n, long axis, types::str const &norm); template types::ndarray::value, std::complex>::type, types::array::value>> ihfft(types::ndarray const &a, long n, long axis, types::none_type norm); template types::ndarray::value, std::complex>::type, types::array::value>> ihfft(types::ndarray const &a, types::none_type n, long axis = -1, types::none_type norm = types::none_type{}); template types::ndarray::value, std::complex>::type, types::array::value>> ihfft(types::ndarray const &a, long n = -1, long axis = -1, types::str const &norm = {}); template types::ndarray::value, std::complex>::type, types::array::value>> ihfft(types::ndarray const &a, types::none_type n, long axis, types::str const &norm); template types::ndarray::value, std::complex>::type, types::array::value>> ihfft(types::ndarray const &a, long n, long axis, types::none_type norm); template types::ndarray::value, std::complex>::type, types::array::value>> ihfft(types::ndarray const &a, types::none_type n, long axis = -1, types::none_type norm = types::none_type{}); NUMPY_EXPR_TO_NDARRAY0_DECL(ihfft); DEFINE_FUNCTOR(pythonic::numpy::fft, ihfft); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fft/irfft.hpp000066400000000000000000000073701416264035500244750ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FFT_IRFFT_HPP #define PYTHONIC_INCLUDE_NUMPY_FFT_IRFFT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" /** * **Noteable difference to numpy.fft.irfft:** * In contrast to numpy.fft.irfft this implementation preserves precision * of floating point and complex inputs, i.e. complex input yields * complex output. numpy.fft.fft always returns complex, even for * long double input. This follows the same reasoning as given by numpy compiled * with intel_mkl (see here: https://github.com/IntelPython/mkl_fft/issues/10). * Conversion to double precision causes code to be slower and hurts use cases * where single precision preservation is desired, e.g. when interacting with *GPUs * or instruments. Moreover for the case of long double inputs, this avoids * loss of precision. **/ PYTHONIC_NS_BEGIN namespace numpy { namespace fft { template types::ndarray::value>> irfft(types::ndarray, pS> const &a, long n = -1, long axis = -1, types::str const &norm = {}); template types::ndarray::value>> irfft(types::ndarray, pS> const &a, types::none_type n, long axis, types::str const &norm); template types::ndarray::value>> irfft(types::ndarray, pS> const &a, long n, long axis, types::none_type norm); template types::ndarray::value>> irfft(types::ndarray, pS> const &a, types::none_type n, long axis = -1, types::none_type norm = types::none_type{}); template types::ndarray::value, typename std::conditional::value, double, T>::type>::type, types::array::value>> irfft(types::ndarray const &a, long n = -1, long axis = -1, types::str const &norm = {}); template types::ndarray::value, typename std::conditional::value, double, T>::type>::type, types::array::value>> irfft(types::ndarray const &a, types::none_type n, long axis, types::str const &norm); template types::ndarray::value, typename std::conditional::value, double, T>::type>::type, types::array::value>> irfft(types::ndarray const &a, long n, long axis, types::none_type norm); template types::ndarray::value, typename std::conditional::value, double, T>::type>::type, types::array::value>> irfft(types::ndarray const &a, types::none_type n, long axis = -1, types::none_type norm = types::none_type{}); NUMPY_EXPR_TO_NDARRAY0_DECL(irfft); DEFINE_FUNCTOR(pythonic::numpy::fft, irfft); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fft/rfft.hpp000066400000000000000000000074721416264035500243270ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FFT_RFFT_HPP #define PYTHONIC_INCLUDE_NUMPY_FFT_RFFT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" /** * **Noteable difference to numpy.fft.rfft:** * In contrast to numpy.fft.rfft this implementation preserves precision * of floating point and complex inputs, i.e. complex input yields * complex output. numpy.fft.fft always returns complex, even for * long double input. This follows the same reasoning as given by numpy compiled * with intel_mkl (see here: https://github.com/IntelPython/mkl_fft/issues/10). * Conversion to double precision causes code to be slower and hurts use cases * where single precision preservation is desired, e.g. when interacting with *GPUs * or instruments. Moreover for the case of long double inputs, this avoids * loss of precision. **/ PYTHONIC_NS_BEGIN namespace numpy { namespace fft { template types::ndarray::value, std::complex>::type, types::array::value>> rfft(types::ndarray const &a, long n = -1, long axis = -1, types::str const &norm = {}); template types::ndarray::value, std::complex>::type, types::array::value>> rfft(types::ndarray const &a, types::none_type n, long axis, types::str const &norm); template types::ndarray::value, std::complex>::type, types::array::value>> rfft(types::ndarray const &a, long n, long axis, types::none_type norm); template types::ndarray::value, std::complex>::type, types::array::value>> rfft(types::ndarray const &a, types::none_type n, long axis = -1, types::none_type norm = types::none_type{}); template types::ndarray::value, std::complex>::type, types::array::value>> rfft(types::ndarray const &a, long n = -1, long axis = -1, types::str const &norm = {}); template types::ndarray::value, std::complex>::type, types::array::value>> rfft(types::ndarray const &a, types::none_type n, long axis, types::str const &norm); template types::ndarray::value, std::complex>::type, types::array::value>> rfft(types::ndarray const &a, long n, long axis, types::none_type norm); template types::ndarray::value, std::complex>::type, types::array::value>> rfft(types::ndarray const &a, types::none_type n, long axis = -1, types::none_type norm = types::none_type{}); NUMPY_EXPR_TO_NDARRAY0_DECL(rfft); DEFINE_FUNCTOR(pythonic::numpy::fft, rfft); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fill_diagonal.hpp000066400000000000000000000006241416264035500253630ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FILL_DIAGONAL_HPP #define PYTHONIC_INCLUDE_NUMPY_FILL_DIAGONAL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/NoneType.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::none_type fill_diagonal(E &&, typename std::decay::type::dtype); DEFINE_FUNCTOR(pythonic::numpy, fill_diagonal) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/finfo.hpp000066400000000000000000000006601416264035500237000ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FINFO_HPP #define PYTHONIC_INCLUDE_NUMPY_FINFO_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/float64.hpp" #include "pythonic/include/types/finfo.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::finfo finfo(dtype d = dtype()); DEFINE_FUNCTOR(pythonic::numpy, finfo) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fix.hpp000066400000000000000000000006341416264035500233660ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FIX_HPP #define PYTHONIC_INCLUDE_NUMPY_FIX_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME fix #define NUMPY_NARY_FUNC_SYM std::trunc #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/flatnonzero.hpp000066400000000000000000000005331416264035500251370ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FLATNONZERO_HPP #define PYTHONIC_INCLUDE_NUMPY_FLATNONZERO_HPP #include "pythonic/include/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> flatnonzero(E const &expr); DEFINE_FUNCTOR(pythonic::numpy, flatnonzero); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/flip.hpp000066400000000000000000000013411416264035500235260ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FLIP_HPP #define PYTHONIC_INCLUDE_NUMPY_FLIP_HPP #include "pythonic/include/types/numpy_gexpr.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_conversion.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { template auto flip(E const &expr, S const &slices, utils::index_sequence) -> decltype(expr(slices[I]...)); } template auto flip(E const &expr, long axis) -> decltype(details::flip(expr, std::array{}, utils::make_index_sequence{})); DEFINE_FUNCTOR(pythonic::numpy, flip); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fliplr.hpp000066400000000000000000000007461416264035500240740ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FLIPLR_HPP #define PYTHONIC_INCLUDE_NUMPY_FLIPLR_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto fliplr(E &&expr) -> decltype(std::forward(expr)( types::contiguous_slice{builtins::None, builtins::None}, types::slice{builtins::None, builtins::None, -1})); DEFINE_FUNCTOR(pythonic::numpy, fliplr); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/flipud.hpp000066400000000000000000000006471416264035500240670ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FLIPUD_HPP #define PYTHONIC_INCLUDE_NUMPY_FLIPUD_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto flipud(E &&expr) -> decltype( std::forward(expr)[types::slice{builtins::None, builtins::None, -1}]); DEFINE_FUNCTOR(pythonic::numpy, flipud); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/float128.hpp000066400000000000000000000011441416264035500241350ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FLOAT128_HPP #define PYTHONIC_INCLUDE_NUMPY_FLOAT128_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { long double float128(); template long double float128(V v); } #define NUMPY_NARY_FUNC_NAME float128 #define NUMPY_NARY_FUNC_SYM details::float128 #define NUMPY_NARY_EXTRA_METHOD using type = long double; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/float32.hpp000066400000000000000000000011701416264035500240460ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FLOAT32_HPP #define PYTHONIC_INCLUDE_NUMPY_FLOAT32_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { float float32(); template float float32(V v); } #define NUMPY_NARY_FUNC_NAME float32 #define NUMPY_NARY_FUNC_SYM details::float32 #define NUMPY_NARY_EXTRA_METHOD using type = float; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/float64.hpp000066400000000000000000000011171416264035500240540ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FLOAT64_HPP #define PYTHONIC_INCLUDE_NUMPY_FLOAT64_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { double float64(); template double float64(V v); } #define NUMPY_NARY_FUNC_NAME float64 #define NUMPY_NARY_FUNC_SYM details::float64 #define NUMPY_NARY_EXTRA_METHOD using type = double; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/float_.hpp000066400000000000000000000005751416264035500240500ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FLOAT_HPP #define PYTHONIC_INCLUDE_NUMPY_FLOAT_HPP #include "pythonic/include/numpy/float64.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME float_ #define NUMPY_NARY_FUNC_SYM details::float64 #define NUMPY_NARY_EXTRA_METHOD using type = double; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/floor.hpp000066400000000000000000000007001416264035500237130ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FLOOR_HPP #define PYTHONIC_INCLUDE_NUMPY_FLOOR_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME floor #define NUMPY_NARY_FUNC_SYM xsimd::floor #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/floor_divide.hpp000066400000000000000000000031261416264035500252440ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FLOORDIVIDE_HPP #define PYTHONIC_INCLUDE_NUMPY_FLOORDIVIDE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include//numpy/floor.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template std::complex::type> divfloor(std::complex const &arg0, std::complex const &arg1) { return {functor::floor{}(std::real(arg0 / arg1)), 0}; } template auto divfloor(Arg0 const &arg0, Arg1 const &arg1) -> typename std::enable_if<(std::is_integral::value && std::is_integral::value), decltype(arg0 / arg1)>::type { bool opposite_sign = (arg0 >= 0 && arg1 < 0) || (arg0 < 0 && arg1 >= 0); return (arg0 + opposite_sign * (-arg1 + 1)) / arg1; } template auto divfloor(Arg0 const &arg0, Arg1 const &arg1) -> typename std::enable_if::value || !std::is_integral::value, decltype(functor::floor{}(arg0 / arg1))>::type { return functor::floor{}(arg0 / arg1); } } #define NUMPY_NARY_FUNC_NAME floor_divide #define NUMPY_NARY_FUNC_SYM wrapper::divfloor #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/floor_divide/000077500000000000000000000000001416264035500245315ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/floor_divide/accumulate.hpp000066400000000000000000000003261416264035500273660ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FLOOR_DIVIDE_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_FLOOR_DIVIDE_ACCUMULATE_HPP #define UFUNC_NAME floor_divide #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fmax.hpp000066400000000000000000000003371416264035500235330ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FMAX_HPP #define PYTHONIC_INCLUDE_NUMPY_FMAX_HPP #include "pythonic/include/numpy/maximum.hpp" PYTHONIC_NS_BEGIN namespace numpy { USING_FUNCTOR(fmax, maximum); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fmax/000077500000000000000000000000001416264035500230175ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fmax/accumulate.hpp000066400000000000000000000002761416264035500256600ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FMAX_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_FMAX_ACCUMULATE_HPP #define UFUNC_NAME fmax #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fmax/reduce.hpp000066400000000000000000000003601416264035500247760ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FMAX_REDUCE_HPP #define PYTHONIC_INCLUDE_NUMPY_FMAX_REDUCE_HPP #define UFUNC_NAME fmax #define UFUNC_INAME imax #include "pythonic/include/numpy/ufunc_reduce.hpp" #undef UFUNC_NAME #undef UFUNC_INAME #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fmin.hpp000066400000000000000000000003371416264035500235310ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FMIN_HPP #define PYTHONIC_INCLUDE_NUMPY_FMIN_HPP #include "pythonic/include/numpy/minimum.hpp" PYTHONIC_NS_BEGIN namespace numpy { USING_FUNCTOR(fmin, minimum); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fmin/000077500000000000000000000000001416264035500230155ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fmin/accumulate.hpp000066400000000000000000000002761416264035500256560ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FMIN_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_FMIN_ACCUMULATE_HPP #define UFUNC_NAME fmin #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fmin/reduce.hpp000066400000000000000000000003601416264035500247740ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FMIN_REDUCE_HPP #define PYTHONIC_INCLUDE_NUMPY_FMIN_REDUCE_HPP #define UFUNC_NAME fmin #define UFUNC_INAME imin #include "pythonic/include/numpy/ufunc_reduce.hpp" #undef UFUNC_NAME #undef UFUNC_INAME #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fmod.hpp000066400000000000000000000007621416264035500235270ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FMOD_HPP #define PYTHONIC_INCLUDE_NUMPY_FMOD_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME fmod #define NUMPY_NARY_FUNC_SYM xsimd::fmod #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fmod/000077500000000000000000000000001416264035500230115ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fmod/accumulate.hpp000066400000000000000000000002761416264035500256520ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FMOD_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_FMOD_ACCUMULATE_HPP #define UFUNC_NAME fmod #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/frexp.hpp000066400000000000000000000013461416264035500237250ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FREXP_HPP #define PYTHONIC_INCLUDE_NUMPY_FREXP_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_conversion.hpp" #include "pythonic/include/types/traits.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if::value, std::tuple>::type frexp(T val); template typename std::enable_if< !types::is_dtype::value, std::tuple, types::ndarray>>::type frexp(E const &arr); DEFINE_FUNCTOR(pythonic::numpy, frexp); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fromfile.hpp000066400000000000000000000012231416264035500243760ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FROMFILE_HPP #define PYTHONIC_INCLUDE_NUMPY_FROMFILE_HPP #include "pythonic/include/numpy/float64.hpp" #include "pythonic/include/types/list.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/str.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> fromfile(types::str const &file_name, dtype d = dtype(), long count = -1, types::str const &sep = {}, long offset = 0); DEFINE_FUNCTOR(pythonic::numpy, fromfile); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fromfunction.hpp000066400000000000000000000030521416264035500253060ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FROMFUNCTION_HPP #define PYTHONIC_INCLUDE_NUMPY_FROMFUNCTION_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/builtins/None.hpp" #include "pythonic/include/utils/tags.hpp" PYTHONIC_NS_BEGIN namespace numpy { template struct fromfunction_helper; template struct fromfunction_helper { template types::ndarray::type>::type>::type, pS> operator()(F &&f, pS const &shape, dtype d = dtype()); }; template struct fromfunction_helper { template types::ndarray< typename std::remove_cv::type>::type>::type, pS> operator()(F &&f, pS const &shape, dtype d = dtype()); }; template auto fromfunction(F &&f, pS const &shape, dtype d = dtype()) -> decltype(fromfunction_helper::value, dtype, typename pythonic::purity_of::type>()( std::forward(f), shape)); /* TODO: must specialize for higher order */ DEFINE_FUNCTOR(pythonic::numpy, fromfunction); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fromiter.hpp000066400000000000000000000011501416264035500244210ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FROMITER_HPP #define PYTHONIC_INCLUDE_NUMPY_FROMITER_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/float64.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray::type>::type::value_type, 1> fromiter(Iterable &&iterable, dtype d = dtype(), long count = -1); DEFINE_FUNCTOR(pythonic::numpy, fromiter); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/fromstring.hpp000066400000000000000000000012571416264035500247740ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FROMSTRING_HPP #define PYTHONIC_INCLUDE_NUMPY_FROMSTRING_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/float64.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/list.hpp" #include "pythonic/include/types/str.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> fromstring(types::str const &string, dtype d = dtype(), long count = -1, types::str const &sep = {}); DEFINE_FUNCTOR(pythonic::numpy, fromstring); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/full.hpp000066400000000000000000000025061416264035500235420ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FULL_HPP #define PYTHONIC_INCLUDE_NUMPY_FULL_HPP #include "pythonic/include/numpy/float64.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> full(pS const &shape, F fill_value, dtype d); template types::ndarray> full(long size, F fill_value, dtype d); template types::ndarray>> full(std::integral_constant, F fill_value, dtype d); template types::ndarray> full(pS const &shape, F fill_value, types::none_type _ = {}); template types::ndarray> full(long size, F fill_value, types::none_type _ = {}); template types::ndarray>> full(std::integral_constant, F fill_value, types::none_type _ = {}); DEFINE_FUNCTOR(pythonic::numpy, full); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/full_like.hpp000066400000000000000000000013111416264035500245370ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_FULLLIKE_HPP #define PYTHONIC_INCLUDE_NUMPY_FULLLIKE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/full.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto full_like(E const &expr, F fill_value, dtype d = dtype()) -> decltype(full(sutils::getshape(expr), fill_value, d)); template auto full_like(E const &expr, F fill_value, types::none_type d = builtins::None) -> decltype(full(sutils::getshape(expr), fill_value, types::dtype_t())); DEFINE_FUNCTOR(pythonic::numpy, full_like) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/greater.hpp000066400000000000000000000010311416264035500242210ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_GREATER_HPP #define PYTHONIC_INCLUDE_NUMPY_GREATER_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/operator_/gt.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME greater #define NUMPY_NARY_FUNC_SYM pythonic::operator_::gt #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/greater/000077500000000000000000000000001416264035500235155ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/greater/accumulate.hpp000066400000000000000000000003071416264035500263510ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_GREATER_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_GREATER_ACCUMULATE_HPP #define UFUNC_NAME greater #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/greater_equal.hpp000066400000000000000000000010511416264035500254120ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_GREATEREQUAL_HPP #define PYTHONIC_INCLUDE_NUMPY_GREATEREQUAL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/operator_/ge.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME greater_equal #define NUMPY_NARY_FUNC_SYM pythonic::operator_::ge #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/greater_equal/000077500000000000000000000000001416264035500247045ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/greater_equal/accumulate.hpp000066400000000000000000000003311416264035500275350ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_GREATER_EQUAL_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_GREATER_EQUAL_ACCUMULATE_HPP #define UFUNC_NAME greater_equal #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/heaviside.hpp000066400000000000000000000010241416264035500245330ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_HEAVISIDE_HPP #define PYTHONIC_INCLUDE_NUMPY_HEAVISIDE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { template T1 heaviside(T0 x0, T1 x1); } #define NUMPY_NARY_FUNC_NAME heaviside #define NUMPY_NARY_FUNC_SYM details::heaviside #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/heaviside/000077500000000000000000000000001416264035500240255ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/heaviside/accumulate.hpp000066400000000000000000000003151416264035500266600ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_HEAVISIDE_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_HEAVISIDE_ACCUMULATE_HPP #define UFUNC_NAME heaviside #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/hstack.hpp000066400000000000000000000005751416264035500240610ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_HSTACK_HPP #define PYTHONIC_INCLUDE_NUMPY_HSTACK_HPP #include PYTHONIC_NS_BEGIN namespace numpy { template auto hstack(ArraySequence &&seq) -> decltype(concatenate(std::forward(seq), 1)); DEFINE_FUNCTOR(pythonic::numpy, hstack); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/hypot.hpp000066400000000000000000000007661416264035500237510ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_HYPOT_HPP #define PYTHONIC_INCLUDE_NUMPY_HYPOT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME hypot #define NUMPY_NARY_FUNC_SYM xsimd::hypot #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/hypot/000077500000000000000000000000001416264035500232275ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/hypot/accumulate.hpp000066400000000000000000000003011416264035500260550ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_HYPOT_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_HYPOT_ACCUMULATE_HPP #define UFUNC_NAME hypot #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/identity.hpp000066400000000000000000000006251416264035500244310ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_IDENTITY_HPP #define PYTHONIC_INCLUDE_NUMPY_IDENTITY_HPP #include "pythonic/include/numpy/eye.hpp" #include "pythonic/include/numpy/float64.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto identity(long n, dtype d = dtype()) -> decltype(eye(n, n, 0, d)); DEFINE_FUNCTOR(pythonic::numpy, identity); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/imag.hpp000066400000000000000000000012231416264035500235100ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_IMAG_HPP #define PYTHONIC_INCLUDE_NUMPY_IMAG_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/asarray.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/list.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto imag(E &&expr) -> decltype(builtins::getattr(types::attr::IMAG{}, std::forward(expr))); template auto imag(types::list const &expr) -> decltype(imag(numpy::functor::asarray{}(expr))); DEFINE_FUNCTOR(pythonic::numpy, imag); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/indices.hpp000066400000000000000000000011021416264035500242050ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_INDICES_HPP #define PYTHONIC_INCLUDE_NUMPY_INDICES_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/int64.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray< typename dtype::type, sutils::push_front_t< pS, std::integral_constant::value>>> indices(pS const &shape, dtype d = dtype()); DEFINE_FUNCTOR(pythonic::numpy, indices); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/inf.hpp000066400000000000000000000003371416264035500233540ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_INF_HPP #define PYTHONIC_INCLUDE_NUMPY_INF_HPP #include PYTHONIC_NS_BEGIN namespace numpy { double const inf = std::numeric_limits::infinity(); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/inner.hpp000066400000000000000000000003321416264035500237060ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_INNER_HPP #define PYTHONIC_INCLUDE_NUMPY_INNER_HPP #include "pythonic/include/numpy/dot.hpp" PYTHONIC_NS_BEGIN namespace numpy { USING_FUNCTOR(inner, dot); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/insert.hpp000066400000000000000000000035571416264035500241130ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_INSERT_HPP #define PYTHONIC_INCLUDE_NUMPY_INSERT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/traits.hpp" #include "pythonic/include/builtins/None.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if::value && types::is_iterable::value, types::ndarray>>::type insert(types::ndarray in, I const &indices, F const &data, types::none_type axis = builtins::None); template typename std::enable_if::value && !types::is_iterable::value, types::ndarray>>::type insert(types::ndarray in, I const &indices, F const &data, types::none_type axis = builtins::None); template typename std::enable_if::value && types::is_iterable::value, types::ndarray>>::type insert(types::ndarray in, I const &indices, F const &data, types::none_type axis = builtins::None); template typename std::enable_if::value && !types::is_iterable::value, types::ndarray>>::type insert(types::ndarray in, I const &indices, F const &data, types::none_type axis = builtins::None); template E insert(E, Args const &...); DEFINE_FUNCTOR(pythonic::numpy, insert); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/int16.hpp000066400000000000000000000011621416264035500235360ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_INT16_HPP #define PYTHONIC_INCLUDE_NUMPY_INT16_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { int16_t int16(); template int16_t int16(V v); } #define NUMPY_NARY_FUNC_NAME int16 #define NUMPY_NARY_FUNC_SYM details::int16 #define NUMPY_NARY_EXTRA_METHOD using type = int16_t; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/int32.hpp000066400000000000000000000011621416264035500235340ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_INT32_HPP #define PYTHONIC_INCLUDE_NUMPY_INT32_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { int32_t int32(); template int32_t int32(V v); } #define NUMPY_NARY_FUNC_NAME int32 #define NUMPY_NARY_FUNC_SYM details::int32 #define NUMPY_NARY_EXTRA_METHOD using type = int32_t; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/int64.hpp000066400000000000000000000011621416264035500235410ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_INT64_HPP #define PYTHONIC_INCLUDE_NUMPY_INT64_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { int64_t int64(); template int64_t int64(V v); } #define NUMPY_NARY_FUNC_NAME int64 #define NUMPY_NARY_FUNC_SYM details::int64 #define NUMPY_NARY_EXTRA_METHOD using type = int64_t; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/int8.hpp000066400000000000000000000011511416264035500234550ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_INT8_HPP #define PYTHONIC_INCLUDE_NUMPY_INT8_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { int8_t int8(); template int8_t int8(V v); } #define NUMPY_NARY_FUNC_NAME int8 #define NUMPY_NARY_FUNC_SYM details::int8 #define NUMPY_NARY_EXTRA_METHOD using type = int8_t; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/int_.hpp000066400000000000000000000011421416264035500235240ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_INT__HPP #define PYTHONIC_INCLUDE_NUMPY_INT__HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { long int_(); template long int_(V v); } #define NUMPY_NARY_FUNC_NAME int_ #define NUMPY_NARY_FUNC_SYM details::int_ #define NUMPY_NARY_EXTRA_METHOD using type = long; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/intc.hpp000066400000000000000000000011401416264035500235260ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_INTC_HPP #define PYTHONIC_INCLUDE_NUMPY_INTC_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { int intc(); template int intc(V v); } #define NUMPY_NARY_FUNC_NAME intc #define NUMPY_NARY_FUNC_SYM details::intc #define NUMPY_NARY_EXTRA_METHOD using type = int; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/interp.hpp000066400000000000000000000032301416264035500240740ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_INTERP_HPP #define PYTHONIC_INCLUDE_NUMPY_INTERP_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_conversion.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/builtins/None.hpp" PYTHONIC_NS_BEGIN namespace numpy { // None,None,None template types::ndarray> interp(T1 x, T2 xp, T3 fp, types::none_type left = types::none_type{}, types::none_type right = types::none_type{}, types::none_type period = types::none_type{}); // left None None template types::ndarray> interp(T1 x, T2 xp, T3 fp, t1 left, types::none_type right = types::none_type{}, types::none_type period = types::none_type{}); // None right None template types::ndarray> interp(T1 x, T2 xp, T3 fp, types::none_type left, t1 right, types::none_type period = types::none_type{}); // None None period template types::ndarray> interp(T1 x, T2 xp, T3 fp, types::none_type left, types::none_type right, t1 period); // left right None template types::ndarray> interp(T1 x, T2 xp, T3 fp, t1 left, t2 right, types::none_type period = types::none_type{}); NUMPY_EXPR_TO_NDARRAY0_DECL(interp); DEFINE_FUNCTOR(pythonic::numpy, interp); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/intersect1d.hpp000066400000000000000000000011301416264035500250150ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_INTERSECT1D_HPP #define PYTHONIC_INCLUDE_NUMPY_INTERSECT1D_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/combined.hpp" #include "pythonic/include/numpy/asarray.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray< typename __combined::type, types::pshape> intersect1d(E const &e, F const &f); DEFINE_FUNCTOR(pythonic::numpy, intersect1d); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/intp.hpp000066400000000000000000000011571416264035500235530ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_INTP_HPP #define PYTHONIC_INCLUDE_NUMPY_INTP_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { intptr_t intp(); template intptr_t intp(V v); } #define NUMPY_NARY_FUNC_NAME intp #define NUMPY_NARY_FUNC_SYM details::intp #define NUMPY_NARY_EXTRA_METHOD using type = intptr_t; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/invert.hpp000066400000000000000000000007361416264035500241120ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_INVERT_HPP #define PYTHONIC_INCLUDE_NUMPY_INVERT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/operator_/invert.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME invert #define NUMPY_NARY_FUNC_SYM operator_::invert #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/isclose.hpp000066400000000000000000000012731416264035500242410ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ISCLOSE_HPP #define PYTHONIC_INCLUDE_NUMPY_ISCLOSE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/numpy/abs.hpp" #include "pythonic/include/numpy/isfinite.hpp" #include "pythonic/include/numpy/isnan.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template bool isclose(T0 const &u, T1 const &v, double rtol = 1e-5, double atol = 1e-8, bool equal_nan = false); } #define NUMPY_NARY_FUNC_NAME isclose #define NUMPY_NARY_FUNC_SYM wrapper::isclose #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/iscomplex.hpp000066400000000000000000000013731416264035500246040ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ISCOMPLEX_HPP #define PYTHONIC_INCLUDE_NUMPY_ISCOMPLEX_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template typename std::enable_if::value, bool>::type iscomplex(I const &a); template constexpr typename std::enable_if::value, bool>::type iscomplex(I const &a); } #define NUMPY_NARY_FUNC_NAME iscomplex #define NUMPY_NARY_FUNC_SYM wrapper::iscomplex #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/isfinite.hpp000066400000000000000000000012771416264035500244160ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ISFINITE_HPP #define PYTHONIC_INCLUDE_NUMPY_ISFINITE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template bool isfinite(std::complex const &t) { return std::isfinite(t.real()) && std::isfinite(t.imag()); } template bool isfinite(T const &v) { return std::isfinite(v); } } #define NUMPY_NARY_FUNC_NAME isfinite #define NUMPY_NARY_FUNC_SYM wrapper::isfinite #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/isinf.hpp000066400000000000000000000010671416264035500237110ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ISINF_HPP #define PYTHONIC_INCLUDE_NUMPY_ISINF_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template bool isinf(T const &v); template bool isinf(std::complex const &v); } #define NUMPY_NARY_FUNC_NAME isinf #define NUMPY_NARY_FUNC_SYM wrapper::isinf #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/isnan.hpp000066400000000000000000000015251416264035500237100ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ISNAN_HPP #define PYTHONIC_INCLUDE_NUMPY_ISNAN_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template bool isnan(std::complex const &v); template auto isnan(T const &v) -> typename std::enable_if< std::is_floating_point::type>::value, bool>::type; template auto isnan(T const &v) -> typename std::enable_if< !std::is_floating_point::type>::value, bool>::type; } #define NUMPY_NARY_FUNC_NAME isnan #define NUMPY_NARY_FUNC_SYM wrapper::isnan #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/isneginf.hpp000066400000000000000000000011361416264035500244000ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ISNEGINF_HPP #define PYTHONIC_INCLUDE_NUMPY_ISNEGINF_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/numpy/isinf.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template auto isneginf(T const &t) -> decltype(functor::isinf{}(t) && (t < 0)); } #define NUMPY_NARY_FUNC_NAME isneginf #define NUMPY_NARY_FUNC_SYM wrapper::isneginf #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/isposinf.hpp000066400000000000000000000011331416264035500244250ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ISPOSINF_HPP #define PYTHONIC_INCLUDE_NUMPY_ISPOSINF_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/numpy/isinf.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template auto isposinf(T const &t) -> decltype(functor::isinf{}(t) && t >= 0); } #define NUMPY_NARY_FUNC_NAME isposinf #define NUMPY_NARY_FUNC_SYM wrapper::isposinf #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/isreal.hpp000066400000000000000000000013371416264035500240600ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ISREAL_HPP #define PYTHONIC_INCLUDE_NUMPY_ISREAL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template typename std::enable_if::value, bool>::type isreal(I const &a); template typename std::enable_if::value, bool>::type isreal(I const &a); } #define NUMPY_NARY_FUNC_NAME isreal #define NUMPY_NARY_FUNC_SYM wrapper::isreal #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/isrealobj.hpp000066400000000000000000000006231416264035500245500ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ISREALOBJ_HPP #define PYTHONIC_INCLUDE_NUMPY_ISREALOBJ_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { template constexpr bool isrealobj(E const &expr); DEFINE_FUNCTOR(pythonic::numpy, isrealobj); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/isscalar.hpp000066400000000000000000000006401416264035500243760ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ISSCALAR_HPP #define PYTHONIC_INCLUDE_NUMPY_ISSCALAR_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/traits.hpp" #include "pythonic/include/types/str.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { template constexpr bool isscalar(E const &); DEFINE_FUNCTOR(pythonic::numpy, isscalar); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/issctype.hpp000066400000000000000000000013641416264035500244440ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ISSCTYPE_HPP #define PYTHONIC_INCLUDE_NUMPY_ISSCTYPE_HPP #include "pythonic/include/numpy/isscalar.hpp" PYTHONIC_NS_BEGIN namespace types { class str; } namespace numpy { template constexpr auto issctype(E const &expr) -> typename std::enable_if::value && !std::is_same::value, bool>::type; template constexpr auto issctype(E const &expr) -> typename std::enable_if::value || std::is_same::value, bool>::type; DEFINE_FUNCTOR(pythonic::numpy, issctype); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ldexp.hpp000066400000000000000000000007311416264035500237120ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LDEXP_HPP #define PYTHONIC_INCLUDE_NUMPY_LDEXP_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME ldexp #define NUMPY_NARY_FUNC_SYM std::ldexp #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ldexp/000077500000000000000000000000001416264035500232005ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ldexp/accumulate.hpp000066400000000000000000000003011416264035500260260ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LDEXP_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_LDEXP_ACCUMULATE_HPP #define UFUNC_NAME ldexp #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/left_shift.hpp000066400000000000000000000010521416264035500247220ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LEFT_SHIFT_HPP #define PYTHONIC_INCLUDE_NUMPY_LEFT_SHIFT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/operator_/lshift.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME left_shift #define NUMPY_NARY_FUNC_SYM pythonic::operator_::lshift #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/left_shift/000077500000000000000000000000001416264035500242135ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/left_shift/accumulate.hpp000066400000000000000000000003201416264035500270420ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LEFT_SHIFT_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_LEFT_SHIFT_ACCUMULATE_HPP #define UFUNC_NAME left_shift #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/less.hpp000066400000000000000000000010201416264035500235340ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LESS_HPP #define PYTHONIC_INCLUDE_NUMPY_LESS_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/operator_/lt.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME less #define NUMPY_NARY_FUNC_SYM pythonic::operator_::lt #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/less/000077500000000000000000000000001416264035500230325ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/less/accumulate.hpp000066400000000000000000000002761416264035500256730ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LESS_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_LESS_ACCUMULATE_HPP #define UFUNC_NAME less #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/less_equal.hpp000066400000000000000000000010401416264035500247250ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LESSEQUAL_HPP #define PYTHONIC_INCLUDE_NUMPY_LESSEQUAL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/operator_/le.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME less_equal #define NUMPY_NARY_FUNC_SYM pythonic::operator_::le #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/less_equal/000077500000000000000000000000001416264035500242215ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/less_equal/accumulate.hpp000066400000000000000000000003201416264035500270500ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LESS_EQUAL_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_LESS_EQUAL_ACCUMULATE_HPP #define UFUNC_NAME less_equal #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/lexsort.hpp000066400000000000000000000005731416264035500243020ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LEXSORT_HPP #define PYTHONIC_INCLUDE_NUMPY_LEXSORT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> lexsort(pS const &keys); DEFINE_FUNCTOR(pythonic::numpy, lexsort) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/linalg/000077500000000000000000000000001416264035500233325ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/linalg/matrix_power.hpp000066400000000000000000000006511416264035500265650ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LINALG_MATRIX_POWER_HPP #define PYTHONIC_INCLUDE_NUMPY_LINALG_MATRIX_POWER_HPP #include "pythonic/include/numpy/array.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace linalg { template auto matrix_power(E const &expr, long n) -> decltype(numpy::functor::array{}(expr)); DEFINE_FUNCTOR(pythonic::numpy::linalg, matrix_power); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/linalg/norm.hpp000066400000000000000000000033771416264035500250300ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LINALG_NORM_HPP #define PYTHONIC_INCLUDE_NUMPY_LINALG_NORM_HPP #include "pythonic/include/numpy/sqrt.hpp" #include "pythonic/include/builtins/pythran/abssqr.hpp" #include "pythonic/include/numpy/sum.hpp" #include "pythonic/include/numpy/asfarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace linalg { template auto norm(Array &&array, types::none_type ord = {}, types::none_type axis = {}) -> decltype( pythonic::numpy::functor::sqrt{}(pythonic::numpy::functor::sum{}( pythonic::builtins::pythran::functor::abssqr{}( std::forward(array))))); template using norm_dtype_t = typename std::conditional< std::is_floating_point< typename std::decay::type::dtype()>::value, typename std::decay::type::dtype(), double>::type; template using norm_t = typename std::conditional< std::decay::type::value == 1, norm_dtype_t, types::ndarray< norm_dtype_t, types::array::type::value - 1>>>::type; template norm_t norm(Array &&array, double ord, types::none_type axis = {}); template norm_t norm(Array &&array, types::none_type ord, double axis); template norm_t norm(Array &&array, double ord, long axis); template norm_t norm(Array &&array, double ord, types::array axis); template norm_t norm(Array &&array, double ord, types::array axis); DEFINE_FUNCTOR(pythonic::numpy::linalg, norm); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/linspace.hpp000066400000000000000000000007431416264035500243770ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LINSPACE_HPP #define PYTHONIC_INCLUDE_NUMPY_LINSPACE_HPP #include "pythonic/include/numpy/arange.hpp" PYTHONIC_NS_BEGIN namespace numpy { template > types::ndarray> linspace(double start, double stop, long num = 50, bool endpoint = true, bool retstep = false, dtype d = dtype()); DEFINE_FUNCTOR(pythonic::numpy, linspace); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/log.hpp000066400000000000000000000006701416264035500233610ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LOG_HPP #define PYTHONIC_INCLUDE_NUMPY_LOG_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME log #define NUMPY_NARY_FUNC_SYM xsimd::log #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/log10.hpp000066400000000000000000000007001416264035500235140ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LOG10_HPP #define PYTHONIC_INCLUDE_NUMPY_LOG10_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME log10 #define NUMPY_NARY_FUNC_SYM xsimd::log10 #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/log1p.hpp000066400000000000000000000007011416264035500236150ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LOG1P_HPP #define PYTHONIC_INCLUDE_NUMPY_LOG1P_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME log1p #define NUMPY_NARY_FUNC_SYM xsimd::log1p #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/log2.hpp000066400000000000000000000006741416264035500234470ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LOG2_HPP #define PYTHONIC_INCLUDE_NUMPY_LOG2_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME log2 #define NUMPY_NARY_FUNC_SYM xsimd::log2 #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/logaddexp.hpp000066400000000000000000000013071416264035500245450ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LOGADDEXP_HPP #define PYTHONIC_INCLUDE_NUMPY_LOGADDEXP_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/numpy/log.hpp" #include "pythonic/include/numpy/exp.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template auto logaddexp(T0 const &t0, T1 const &t1) -> decltype(functor::log{}(functor::exp{}(t0) + functor::exp{}(t1))); } #define NUMPY_NARY_FUNC_NAME logaddexp #define NUMPY_NARY_FUNC_SYM wrapper::logaddexp #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/logaddexp/000077500000000000000000000000001416264035500240335ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/logaddexp/accumulate.hpp000066400000000000000000000003151416264035500266660ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LOGADDEXP_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_LOGADDEXP_ACCUMULATE_HPP #define UFUNC_NAME logaddexp #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/logaddexp2.hpp000066400000000000000000000014741416264035500246340ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LOGADDEXP2_HPP #define PYTHONIC_INCLUDE_NUMPY_LOGADDEXP2_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/numpy/log2.hpp" #include "pythonic/include/numpy/power.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template auto logaddexp2(T0 const &t0, T1 const &t1) -> decltype(functor::log2{}(functor::power{}(T0(2), t0) + functor::power{}(T1(2), t1))); } #define NUMPY_NARY_FUNC_NAME logaddexp2 #define NUMPY_NARY_FUNC_SYM wrapper::logaddexp2 #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/logaddexp2/000077500000000000000000000000001416264035500241155ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/logaddexp2/accumulate.hpp000066400000000000000000000003201416264035500267440ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LOGADDEXP2_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_LOGADDEXP2_ACCUMULATE_HPP #define UFUNC_NAME logaddexp2 #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/logical_and.hpp000066400000000000000000000011711416264035500250310ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LOGICALAND_HPP #define PYTHONIC_INCLUDE_NUMPY_LOGICALAND_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template auto logical_and(T0 const &t0, T1 const &t1) -> decltype(t0 &&t1); } #define NUMPY_NARY_FUNC_NAME logical_and #define NUMPY_NARY_FUNC_SYM wrapper::logical_and #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/logical_and/000077500000000000000000000000001416264035500243205ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/logical_and/accumulate.hpp000066400000000000000000000003231416264035500271520ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LOGICAL_AND_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_LOGICAL_AND_ACCUMULATE_HPP #define UFUNC_NAME logical_and #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/logical_not.hpp000066400000000000000000000007611416264035500250730ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LOGICALNOT_HPP #define PYTHONIC_INCLUDE_NUMPY_LOGICALNOT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/operator_/not_.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME logical_not #define NUMPY_NARY_FUNC_SYM pythonic::operator_::not_ #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/logical_or.hpp000066400000000000000000000011641416264035500247110ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LOGICALOR_HPP #define PYTHONIC_INCLUDE_NUMPY_LOGICALOR_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template auto logical_or(T0 const &t0, T1 const &t1) -> decltype(t0 || t1); } #define NUMPY_NARY_FUNC_NAME logical_or #define NUMPY_NARY_FUNC_SYM wrapper::logical_or #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/logical_or/000077500000000000000000000000001416264035500241765ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/logical_or/accumulate.hpp000066400000000000000000000003201416264035500270250ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LOGICAL_OR_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_LOGICAL_OR_ACCUMULATE_HPP #define UFUNC_NAME logical_or #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/logical_xor.hpp000066400000000000000000000014731416264035500251040ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LOGICALXOR_HPP #define PYTHONIC_INCLUDE_NUMPY_LOGICALXOR_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template auto logical_xor(T0 const &t0, T1 const &t1) -> decltype((t0 && !t1) || (t1 && !t0)); template bool logical_xor(std::complex const &t0, std::complex const &t1) { return (!!t0 && !t1) || (!!t1 && !t0); } } #define NUMPY_NARY_FUNC_NAME logical_xor #define NUMPY_NARY_FUNC_SYM wrapper::logical_xor #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/logical_xor/000077500000000000000000000000001416264035500243665ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/logical_xor/accumulate.hpp000066400000000000000000000003231416264035500272200ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LOGICAL_XOR_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_LOGICAL_XOR_ACCUMULATE_HPP #define UFUNC_NAME logical_xor #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/logspace.hpp000066400000000000000000000010571416264035500243750ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LOGSPACE_HPP #define PYTHONIC_INCLUDE_NUMPY_LOGSPACE_HPP #include "pythonic/include/numpy/linspace.hpp" #include "pythonic/include/numpy/power.hpp" PYTHONIC_NS_BEGIN namespace numpy { auto logspace(double start, double stop, long num = 50, bool endpoint = true, double base = 10.0) -> decltype(functor::power()(base, functor::linspace()(start, stop, num, endpoint))); DEFINE_FUNCTOR(pythonic::numpy, logspace); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/longlong.hpp000066400000000000000000000012121416264035500244100ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_LONGLONG_HPP #define PYTHONIC_INCLUDE_NUMPY_LONGLONG_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { long long longlong(); template long long longlong(V v); } #define NUMPY_NARY_FUNC_NAME longlong #define NUMPY_NARY_FUNC_SYM details::longlong #define NUMPY_NARY_EXTRA_METHOD using type = long long; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/max.hpp000066400000000000000000000007141416264035500233640ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_MAX_HPP #define PYTHONIC_INCLUDE_NUMPY_MAX_HPP #include "pythonic/include/numpy/reduce.hpp" #include "pythonic/include/operator_/imax.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto max(Args &&... args) -> decltype( reduce(std::forward(args)...)); DEFINE_FUNCTOR(pythonic::numpy, max); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/maximum.hpp000066400000000000000000000007721416264035500242600ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_MAXIMUM_HPP #define PYTHONIC_INCLUDE_NUMPY_MAXIMUM_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME maximum #define NUMPY_NARY_FUNC_SYM xsimd::max #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/maximum/000077500000000000000000000000001416264035500235415ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/maximum/accumulate.hpp000066400000000000000000000003071416264035500263750ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_MAXIMUM_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_MAXIMUM_ACCUMULATE_HPP #define UFUNC_NAME maximum #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/maximum/reduce.hpp000066400000000000000000000003711416264035500255220ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_MAXIMUM_REDUCE_HPP #define PYTHONIC_INCLUDE_NUMPY_MAXIMUM_REDUCE_HPP #define UFUNC_NAME maximum #define UFUNC_INAME imax #include "pythonic/include/numpy/ufunc_reduce.hpp" #undef UFUNC_NAME #undef UFUNC_INAME #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/mean.hpp000066400000000000000000000040151416264035500235150ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_MEAN_HPP #define PYTHONIC_INCLUDE_NUMPY_MEAN_HPP #include "pythonic/include/numpy/sum.hpp" #include "pythonic/include/numpy/expand_dims.hpp" #include "pythonic/include/builtins/None.hpp" #include "pythonic/include/types/immediate.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { template struct make_scalar_pshape : sutils::concat>, typename make_scalar_pshape::type> { }; template <> struct make_scalar_pshape<1> { using type = types::pshape>; }; template struct dtype_or_double_helper { using type = typename dtype::type; }; template <> struct dtype_or_double_helper { using type = double; }; template using dtype_or_double = typename dtype_or_double_helper::type; } template auto mean(E const &expr, types::none_type axis = {}, dtype d = {}, types::none_type out = {}, types::false_immediate keep_dims = {}) -> decltype(sum(expr, axis, d) / details::dtype_or_double(expr.flat_size())); template auto mean(E const &expr, long axis, dtype d = {}, types::none_type out = {}, types::false_immediate keep_dims = {}) -> decltype(sum(expr, axis, d)); template types::ndarray, typename details::make_scalar_pshape::type> mean(E const &expr, types::none_type axis, dtype d, types::none_type out, types::true_immediate keep_dims); template auto mean(E const &expr, long axis, dtype d, types::none_type out, types::true_immediate keep_dims) -> decltype(expand_dims(mean(expr, axis, d), axis)); DEFINE_FUNCTOR(pythonic::numpy, mean); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/median.hpp000066400000000000000000000020511416264035500240300ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_MEDIAN_HPP #define PYTHONIC_INCLUDE_NUMPY_MEDIAN_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/numpy/asarray.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { template decltype(std::declval() + 1.) median(types::ndarray const &arr, types::none_type = {}); template typename std::enable_if< std::tuple_size::value != 1, types::ndarray() + 1.), types::array::value - 1>>>::type median(types::ndarray const &arr, long axis); template typename std::enable_if::value == 1, decltype(std::declval() + 1.)>::type median(types::ndarray const &arr, long axis); NUMPY_EXPR_TO_NDARRAY0_DECL(median); DEFINE_FUNCTOR(pythonic::numpy, median); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/min.hpp000066400000000000000000000007151416264035500233630ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_MIN_HPP #define PYTHONIC_INCLUDE_NUMPY_MIN_HPP #include "pythonic/include/numpy/reduce.hpp" #include "pythonic/include/operator_/imin.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto min(Args &&... args) -> decltype( reduce(std::forward(args)...)); DEFINE_FUNCTOR(pythonic::numpy, min); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/minimum.hpp000066400000000000000000000007721416264035500242560ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_MINIMUM_HPP #define PYTHONIC_INCLUDE_NUMPY_MINIMUM_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME minimum #define NUMPY_NARY_FUNC_SYM xsimd::min #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/minimum/000077500000000000000000000000001416264035500235375ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/minimum/accumulate.hpp000066400000000000000000000003071416264035500263730ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_MINIMUM_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_MINIMUM_ACCUMULATE_HPP #define UFUNC_NAME minimum #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/minimum/reduce.hpp000066400000000000000000000003711416264035500255200ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_MINIMUM_REDUCE_HPP #define PYTHONIC_INCLUDE_NUMPY_MINIMUM_REDUCE_HPP #define UFUNC_NAME minimum #define UFUNC_INAME imin #include "pythonic/include/numpy/ufunc_reduce.hpp" #undef UFUNC_NAME #undef UFUNC_INAME #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/mod.hpp000066400000000000000000000007041416264035500233550ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_MOD_HPP #define PYTHONIC_INCLUDE_NUMPY_MOD_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/assignable.hpp" #include "pythonic/include/operator_/mod.hpp" PYTHONIC_NS_BEGIN namespace numpy { /* this is still a numpy_expr, because operator::mod_ forwards to * operator% which is correctly overloaded */ USING_FUNCTOR(mod, operator_::functor::mod); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/mod/000077500000000000000000000000001416264035500226435ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/mod/accumulate.hpp000066400000000000000000000002731416264035500255010ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_MOD_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_MOD_ACCUMULATE_HPP #define UFUNC_NAME mod #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/multiply.hpp000066400000000000000000000010361416264035500244540ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_MULTIPLY_HPP #define PYTHONIC_INCLUDE_NUMPY_MULTIPLY_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/operator_/mul.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME multiply #define NUMPY_NARY_FUNC_SYM pythonic::operator_::mul #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/multiply/000077500000000000000000000000001416264035500237435ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/multiply/accumulate.hpp000066400000000000000000000003121416264035500265730ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_MULTIPLY_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_MULTIPLY_ACCUMULATE_HPP #define UFUNC_NAME multiply #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/multiply/reduce.hpp000066400000000000000000000003741416264035500257270ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_MULTIPLY_REDUCE_HPP #define PYTHONIC_INCLUDE_NUMPY_MULTIPLY_REDUCE_HPP #define UFUNC_NAME multiply #define UFUNC_INAME imul #include "pythonic/include/numpy/ufunc_reduce.hpp" #undef UFUNC_NAME #undef UFUNC_INAME #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/nan.hpp000066400000000000000000000003401416264035500233460ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NAN_HPP #define PYTHONIC_INCLUDE_NUMPY_NAN_HPP #include PYTHONIC_NS_BEGIN namespace numpy { double const nan = std::numeric_limits::quiet_NaN(); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/nan_to_num.hpp000066400000000000000000000011101416264035500247230ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NANTONUM_HPP #define PYTHONIC_INCLUDE_NUMPY_NANTONUM_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/numpy/isnan.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template I nan_to_num(I const &a); } #define NUMPY_NARY_FUNC_NAME nan_to_num #define NUMPY_NARY_FUNC_SYM wrapper::nan_to_num #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/nanargmax.hpp000066400000000000000000000006101416264035500245460ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NANARGMAX_HPP #define PYTHONIC_INCLUDE_NUMPY_NANARGMAX_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/numpy/isnan.hpp" PYTHONIC_NS_BEGIN namespace numpy { template long nanargmax(E const &expr); DEFINE_FUNCTOR(pythonic::numpy, nanargmax); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/nanargmin.hpp000066400000000000000000000005341416264035500245510ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NANARGMIN_HPP #define PYTHONIC_INCLUDE_NUMPY_NANARGMIN_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template long nanargmin(E const &expr); DEFINE_FUNCTOR(pythonic::numpy, nanargmin); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/nanmax.hpp000066400000000000000000000006111416264035500240550ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NANMAX_HPP #define PYTHONIC_INCLUDE_NUMPY_NANMAX_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/numpy/isnan.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename E::dtype nanmax(E const &expr); DEFINE_FUNCTOR(pythonic::numpy, nanmax); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/nanmin.hpp000066400000000000000000000006111416264035500240530ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NANMIN_HPP #define PYTHONIC_INCLUDE_NUMPY_NANMIN_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/numpy/isnan.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename E::dtype nanmin(E const &expr); DEFINE_FUNCTOR(pythonic::numpy, nanmin); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/nansum.hpp000066400000000000000000000010251416264035500240740ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NANSUM_HPP #define PYTHONIC_INCLUDE_NUMPY_NANSUM_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template void _nansum(E begin, E end, F &sum, utils::int_<1>); template void _nansum(E begin, E end, F &sum, utils::int_); template typename E::dtype nansum(E const &expr); DEFINE_FUNCTOR(pythonic::numpy, nansum); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ndarray.hpp000066400000000000000000000015471416264035500242440ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NDARRAY_HPP #define PYTHONIC_INCLUDE_NUMPY_NDARRAY_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/nested_container.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> ndarray(pS const &shape, dtype d = dtype()); template types::ndarray> ndarray(long size, dtype d = dtype()); template types::ndarray>> ndarray(std::integral_constant, dtype d = dtype()); DEFINE_FUNCTOR(pythonic::numpy, ndarray); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ndarray/000077500000000000000000000000001416264035500235245ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ndarray/astype.hpp000066400000000000000000000006301416264035500255410ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NDARRAY_ASTYPE_HPP #define PYTHONIC_INCLUDE_NUMPY_NDARRAY_ASTYPE_HPP #include "pythonic/include/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace ndarray { template auto astype(E &&e, dtype d) -> decltype(asarray(std::forward(e), d)); DEFINE_FUNCTOR(pythonic::numpy::ndarray, astype); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ndarray/fill.hpp000066400000000000000000000010211416264035500251550ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NDARRAY_FILL_HPP #define PYTHONIC_INCLUDE_NUMPY_NDARRAY_FILL_HPP #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/None.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace ndarray { template types::none_type fill(E &&e, F f); template types::none_type fill(types::ndarray &e, F f); DEFINE_FUNCTOR(pythonic::numpy::ndarray, fill); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ndarray/flatten.hpp000066400000000000000000000007531416264035500256770ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NDARRAY_FLATTEN_HPP #define PYTHONIC_INCLUDE_NUMPY_NDARRAY_FLATTEN_HPP #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace ndarray { template types::ndarray> flatten(types::ndarray const &a); NUMPY_EXPR_TO_NDARRAY0_DECL(flatten); DEFINE_FUNCTOR(pythonic::numpy::ndarray, flatten); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ndarray/item.hpp000066400000000000000000000012251416264035500251730ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NDARRAY_ITEM_HPP #define PYTHONIC_INCLUDE_NUMPY_NDARRAY_ITEM_HPP #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace ndarray { template T item(types::ndarray const &expr, long i); template auto item(E &&expr, types::array const &i) -> decltype(expr[i]); // only for compatibility purpose, very bad impl template typename std::decay::type::dtype item(E &&expr, long i); DEFINE_FUNCTOR(pythonic::numpy::ndarray, item); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ndarray/reshape.hpp000066400000000000000000000022621416264035500256660ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NDARRAY_RESHAPE_HPP #define PYTHONIC_INCLUDE_NUMPY_NDARRAY_RESHAPE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_conversion.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace ndarray { template typename std::enable_if::value, types::ndarray>::type reshape(types::ndarray const &expr, NpS const &new_shape); template typename std::enable_if::value, types::ndarray>>::type reshape(types::ndarray const &expr, NpS const &new_shape); template auto reshape(types::ndarray const &expr, S0 i0, S1 i1, S const &... indices) -> decltype(reshape(expr, types::pshape{i0, i1, indices...})); NUMPY_EXPR_TO_NDARRAY0_DECL(reshape); DEFINE_FUNCTOR(pythonic::numpy::ndarray, reshape); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ndarray/sort.hpp000066400000000000000000000010521416264035500252220ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NDARRAY_SORT_HPP #define PYTHONIC_INCLUDE_NUMPY_NDARRAY_SORT_HPP #include "pythonic/include/numpy/sort.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace ndarray { template types::none_type sort(E &&expr, types::none_type); template types::none_type sort(E &&expr, long axis, types::none_type = {}); template types::none_type sort(E &&expr, long axis, types::str const &kind); DEFINE_FUNCTOR(pythonic::numpy::ndarray, sort); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ndarray/tofile.hpp000066400000000000000000000011741416264035500255220ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NDARRAY_TOFILE_HPP #define PYTHONIC_INCLUDE_NUMPY_NDARRAY_TOFILE_HPP #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_conversion.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/str.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace ndarray { template void tofile(types::ndarray const &expr, types::str const &file_name, types::str const &sep = "", types::str const &format = ""); NUMPY_EXPR_TO_NDARRAY0_DECL(tofile); DEFINE_FUNCTOR(pythonic::numpy::ndarray, tofile); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ndarray/tolist.hpp000066400000000000000000000020451416264035500255540ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NDARRAY_TOLIST_HPP #define PYTHONIC_INCLUDE_NUMPY_NDARRAY_TOLIST_HPP #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_conversion.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace ndarray { template struct tolist_type { using type = types::list::type>; }; template struct tolist_type { using type = types::list; }; template typename std::enable_if::value == 1, types::list>::type tolist(types::ndarray const &expr); template typename std::enable_if< std::tuple_size::value != 1, typename tolist_type::value>::type>::type tolist(types::ndarray const &expr); NUMPY_EXPR_TO_NDARRAY0_DECL(tolist); DEFINE_FUNCTOR(pythonic::numpy::ndarray, tolist); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ndarray/tostring.hpp000066400000000000000000000010431416264035500261040ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NDARRAY_TOSTRING_HPP #define PYTHONIC_INCLUDE_NUMPY_NDARRAY_TOSTRING_HPP #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_conversion.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/str.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace ndarray { template types::str tostring(types::ndarray const &expr); NUMPY_EXPR_TO_NDARRAY0_DECL(tostring); DEFINE_FUNCTOR(pythonic::numpy::ndarray, tostring); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ndenumerate.hpp000066400000000000000000000027101416264035500251040ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NDENUMERATE_HPP #define PYTHONIC_INCLUDE_NUMPY_NDENUMERATE_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { template struct ndenumerate_iterator : std::iterator< std::random_access_iterator_tag, std::tuple, typename E::dtype>> { long index; E const &expr; typename E::dtype *iter; ndenumerate_iterator(); ndenumerate_iterator(E const &expr, long first); std::tuple, typename E::dtype> operator*() const; ndenumerate_iterator &operator++(); ndenumerate_iterator &operator+=(long n); bool operator!=(ndenumerate_iterator const &other) const; bool operator<(ndenumerate_iterator const &other) const; long operator-(ndenumerate_iterator const &other) const; }; template struct _ndenumerate : ndenumerate_iterator { using iterator = ndenumerate_iterator; E expr; // we need to keep one ref over the enumerated sequence alive iterator end_iter; _ndenumerate(); _ndenumerate(E const &expr); iterator &begin(); iterator const &begin() const; iterator end() const; }; template _ndenumerate> ndenumerate(types::ndarray const &expr); NUMPY_EXPR_TO_NDARRAY0_DECL(ndenumerate); DEFINE_FUNCTOR(pythonic::numpy, ndenumerate); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ndim.hpp000066400000000000000000000005741416264035500235320ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NDIM_HPP #define PYTHONIC_INCLUDE_NUMPY_NDIM_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto ndim(E const &e) -> decltype(builtins::getattr(types::attr::NDIM{}, e)); DEFINE_FUNCTOR(pythonic::numpy, ndim) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ndindex.hpp000066400000000000000000000031341416264035500242270ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NDINDEX_HPP #define PYTHONIC_INCLUDE_NUMPY_NDINDEX_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/tuple.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { template struct ndindex_iterator : std::iterator< std::random_access_iterator_tag, types::array, ptrdiff_t, types::array *, types::array /* reference_type, but no reference is possible*/> { long index; types::array shape; ndindex_iterator(); ndindex_iterator(types::array const &shape, long first); types::array operator*() const; ndindex_iterator &operator++(); ndindex_iterator &operator+=(long n); bool operator!=(ndindex_iterator const &other) const; bool operator<(ndindex_iterator const &other) const; long operator-(ndindex_iterator const &other) const; }; template struct _ndindex : ndindex_iterator { using iterator = ndindex_iterator; types::array shape; iterator end_iter; _ndindex(); _ndindex(types::array const &shape); iterator &begin(); iterator const &begin() const; iterator end() const; }; template _ndindex ndindex(Types... args); template _ndindex ndindex(types::array const &args); template _ndindex ndindex(types::pshape const &args); DEFINE_FUNCTOR(pythonic::numpy, ndindex); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/negative.hpp000066400000000000000000000007501416264035500244010ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NEGATIVE_HPP #define PYTHONIC_INCLUDE_NUMPY_NEGATIVE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/operator_/neg.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME negative #define NUMPY_NARY_FUNC_SYM pythonic::operator_::neg #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/negative/000077500000000000000000000000001416264035500236665ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/negative/accumulate.hpp000066400000000000000000000003121416264035500265160ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NEGATIVE_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_NEGATIVE_ACCUMULATE_HPP #define UFUNC_NAME negative #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/newaxis.hpp000066400000000000000000000003571416264035500242600ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NEWAXIS_HPP #define PYTHONIC_INCLUDE_NUMPY_NEWAXIS_HPP #include "pythonic/include/types/NoneType.hpp" PYTHONIC_NS_BEGIN namespace numpy { auto const &newaxis = builtins::None; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/nextafter.hpp000066400000000000000000000007501416264035500245770ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NEXTAFTER_HPP #define PYTHONIC_INCLUDE_NUMPY_NEXTAFTER_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME nextafter #define NUMPY_NARY_FUNC_SYM std::nextafter #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/nextafter/000077500000000000000000000000001416264035500240645ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/nextafter/accumulate.hpp000066400000000000000000000003151416264035500267170ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NEXTAFTER_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_NEXTAFTER_ACCUMULATE_HPP #define UFUNC_NAME nextafter #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/nonzero.hpp000066400000000000000000000006411416264035500242700ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NONZERO_HPP #define PYTHONIC_INCLUDE_NUMPY_NONZERO_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto nonzero(E const &expr) -> types::array>, E::value>; DEFINE_FUNCTOR(pythonic::numpy, nonzero) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/not_equal.hpp000066400000000000000000000007461416264035500245730ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NOTEQUAL_HPP #define PYTHONIC_INCLUDE_NUMPY_NOTEQUAL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/operator_/ne.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME not_equal #define NUMPY_NARY_FUNC_SYM pythonic::operator_::ne #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/not_equal/000077500000000000000000000000001416264035500240535ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/not_equal/accumulate.hpp000066400000000000000000000003151416264035500267060ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_NOT_EQUAL_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_NOT_EQUAL_ACCUMULATE_HPP #define UFUNC_NAME not_equal #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ones.hpp000066400000000000000000000015141416264035500235420ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ONES_HPP #define PYTHONIC_INCLUDE_NUMPY_ONES_HPP #include "pythonic/include/numpy/float64.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> ones(pS const &shape, dtype d = dtype()); template types::ndarray> ones(long size, dtype d = dtype()); template types::ndarray>> ones(std::integral_constant, dtype d = dtype()); DEFINE_FUNCTOR(pythonic::numpy, ones); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ones_like.hpp000066400000000000000000000011621416264035500245450ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ONESLIKE_HPP #define PYTHONIC_INCLUDE_NUMPY_ONESLIKE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/ones.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto ones_like(E const &expr, dtype d = dtype()) -> decltype(ones(sutils::getshape(expr), d)); template auto ones_like(E const &expr, types::none_type d = builtins::None) -> decltype(ones(sutils::getshape(expr), types::dtype_t())); DEFINE_FUNCTOR(pythonic::numpy, ones_like) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/outer.hpp000066400000000000000000000020051416264035500237300ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_OUTER_HPP #define PYTHONIC_INCLUDE_NUMPY_OUTER_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/builtins/None.hpp" #include "pythonic/include/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray() + std::declval()), types::pshape> outer(types::ndarray const &a, types::ndarray const &b); template auto outer(types::ndarray const &a, E1 const &b) -> decltype(outer(a, asarray(b))); template auto outer(E0 const &a, types::ndarray const &b) -> decltype(outer(asarray(a), b)); template auto outer(E0 const &a, E1 const &b) -> decltype(outer(asarray(a), asarray(b))); DEFINE_FUNCTOR(pythonic::numpy, outer); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/partial_sum.hpp000066400000000000000000000026241416264035500251210ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_PARTIAL_SUM_HPP #define PYTHONIC_INCLUDE_NUMPY_PARTIAL_SUM_HPP #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template using result_dtype = types::dtype_t()( std::declval::type::dtype>(), std::declval::type::dtype>()))>; template > types::ndarray> partial_sum(E const &expr, dtype d = dtype()); template > auto partial_sum(E const &expr, long axis, dtype d = dtype()) -> typename std::enable_if(expr))>::type; template > using partial_sum_type = types::ndarray>; template > using partial_sum_type2 = types::ndarray>; template > typename std::enable_if>::type partial_sum(E const &expr, long axis, dtype d = dtype()); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/pi.hpp000066400000000000000000000003271416264035500232070ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_PI_HPP #define PYTHONIC_INCLUDE_NUMPY_PI_HPP #include "pythonic/include/math/pi.hpp" PYTHONIC_NS_BEGIN namespace numpy { double constexpr pi = math::pi; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/place.hpp000066400000000000000000000014701416264035500236630ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_PLACE_HPP #define PYTHONIC_INCLUDE_NUMPY_PLACE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/builtins/None.hpp" #include "pythonic/include/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::none_type place(types::ndarray &expr, types::ndarray const &mask, F const &values); template types::none_type place(types::ndarray &expr, M const &mask, F const &values); template types::none_type place(E &, M const &, F const &); DEFINE_FUNCTOR(pythonic::numpy, place); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/power.hpp000066400000000000000000000011351416264035500237310ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_POWER_HPP #define PYTHONIC_INCLUDE_NUMPY_POWER_HPP #include "pythonic/include/types/numpy_op_helper.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME power #define NUMPY_NARY_FUNC_SYM xsimd::pow // no need to adapt_type here, as it may turn a**2 into a**2.f #define NUMPY_NARY_RESHAPE_MODE reshape_type #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/power/000077500000000000000000000000001416264035500232205ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/power/accumulate.hpp000066400000000000000000000003011416264035500260460ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_POWER_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_POWER_ACCUMULATE_HPP #define UFUNC_NAME power #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/prod.hpp000066400000000000000000000007211416264035500235410ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_PROD_HPP #define PYTHONIC_INCLUDE_NUMPY_PROD_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/reduce.hpp" #include "pythonic/include/operator_/imul.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto prod(Args &&... args) -> decltype( reduce(std::forward(args)...)); DEFINE_FUNCTOR(pythonic::numpy, prod); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/product.hpp000066400000000000000000000003411416264035500242530ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_PRODUCT_HPP #define PYTHONIC_INCLUDE_NUMPY_PRODUCT_HPP #include "pythonic/include/numpy/prod.hpp" PYTHONIC_NS_BEGIN namespace numpy { USING_FUNCTOR(product, prod); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ptp.hpp000066400000000000000000000007201416264035500233770ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_PTP_HPP #define PYTHONIC_INCLUDE_NUMPY_PTP_HPP #include "pythonic/include/numpy/min.hpp" #include "pythonic/include/numpy/max.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto ptp(E const &expr, long axis) -> decltype(max(expr, axis) - min(expr, axis)); template auto ptp(E const &expr) -> decltype(max(expr) - min(expr)); DEFINE_FUNCTOR(pythonic::numpy, ptp); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/put.hpp000066400000000000000000000013611416264035500234060ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_PUT_HPP #define PYTHONIC_INCLUDE_NUMPY_PUT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_conversion.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if::value, types::none_type>::type put(types::ndarray &expr, F const &ind, E const &v); template types::none_type put(types::ndarray &expr, long int ind, T const &v); template types::none_type put(E &, M const &, V const &); DEFINE_FUNCTOR(pythonic::numpy, put); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/putmask.hpp000066400000000000000000000012001416264035500242520ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_PUTMASK_HPP #define PYTHONIC_INCLUDE_NUMPY_PUTMASK_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/numpy/asarray.hpp" #include "pythonic/include/builtins/None.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::none_type putmask(types::ndarray &expr, E const &mask, F const &values); template types::none_type putmask(E &, M const &, F const &); DEFINE_FUNCTOR(pythonic::numpy, putmask); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/rad2deg.hpp000066400000000000000000000011561416264035500241100ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RAD2DEG_HPP #define PYTHONIC_INCLUDE_NUMPY_RAD2DEG_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/numpy/pi.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template auto rad2deg(T const &val) -> decltype(val * 180 / pi) { return val * 180 / pi; } } #define NUMPY_NARY_FUNC_NAME rad2deg #define NUMPY_NARY_FUNC_SYM wrapper::rad2deg #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/radians.hpp000066400000000000000000000005671416264035500242260ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RADIANS_HPP #define PYTHONIC_INCLUDE_NUMPY_RADIANS_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/numpy/deg2rad.hpp" PYTHONIC_NS_BEGIN namespace numpy { USING_FUNCTOR(radians, deg2rad); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/000077500000000000000000000000001416264035500233445ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/binomial.hpp000066400000000000000000000012631416264035500256510ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_BINOMIAL_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_BINOMIAL_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/types/numpy_expr.hpp" #include "pythonic/include/numpy/random/generator.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray binomial(double n, double p, pS const &shape); auto binomial(double n, double p, long size) -> decltype(binomial(n, p, types::array{{size}})); long binomial(double n, double p, types::none_type d = types::none_type()); DEFINE_FUNCTOR(pythonic::numpy::random, binomial); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/bytes.hpp000066400000000000000000000005531416264035500252060ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_BYTES_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_BYTES_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/str.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { types::str bytes(long length); DEFINE_FUNCTOR(pythonic::numpy::random, bytes); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/chisquare.hpp000066400000000000000000000012741416264035500260450ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_CHISQUARE_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_CHISQUARE_HPP #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/tuple.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray chisquare(double df, pS const &shape); auto chisquare(double df, long size) -> decltype(chisquare(df, types::array{{size}})); double chisquare(double df, types::none_type size = {}); DEFINE_FUNCTOR(pythonic::numpy::random, chisquare); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/choice.hpp000066400000000000000000000031071416264035500253100ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_CHOICE_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_CHOICE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/random/randint.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/tuple.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray choice(long max, pS const &shape, bool replace, P const &p); template types::ndarray> choice(long max, long size, bool replace, P &&p); template auto choice(long max, T &&size) -> decltype(randint(0, max, std::forward(size))); long choice(long max); template typename T::dtype choice(T const &a); template types::ndarray choice(T const &a, pS const &shape); template types::ndarray> choice(T &&a, long size); template types::ndarray choice(T const &a, pS const &shape, bool replace, P const &p); template types::ndarray> choice(T &&a, long size, bool replace, P &&p); DEFINE_FUNCTOR(pythonic::numpy::random, choice); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/dirichlet.hpp000066400000000000000000000013101416264035500260170ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_DIRICHLET_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_DIRICHLET_HPP #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/tuple.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray dirichlet(double alpha, pS const &shape); auto dirichlet(double alpha, long size) -> decltype(dirichlet(alpha, types::array{{size}})); double dirichlet(double alpha, types::none_type size = {}); DEFINE_FUNCTOR(pythonic::numpy::random, dirichlet); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/exponential.hpp000066400000000000000000000013341416264035500264040ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_EXPONENTIAL_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_EXPONENTIAL_HPP #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/tuple.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray exponential(double scale, pS const &shape); auto exponential(double scale, long size) -> decltype(exponential(scale, types::array{{size}})); double exponential(double scale = 1.0, types::none_type size = {}); DEFINE_FUNCTOR(pythonic::numpy::random, exponential); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/f.hpp000066400000000000000000000013011416264035500242750ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_F_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_F_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/tuple.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray f(double dfnum, double dfden, pS const &shape); auto f(double dfnum, double dfden, long size) -> decltype(f(dfnum, dfden, types::array{{size}})); double f(double dfnum, double dfden, types::none_type size = {}); DEFINE_FUNCTOR(pythonic::numpy::random, f); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/gamma.hpp000066400000000000000000000014451416264035500251430ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_GAMMA_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_GAMMA_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/tuple.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray gamma(double shape, double scale, pS const &array_shape); auto gamma(double shape, double scale, long size) -> decltype(gamma(shape, scale, types::array{{size}})); double gamma(double shape = 0.0, double scale = 1.0, types::none_type size = {}); DEFINE_FUNCTOR(pythonic::numpy::random, gamma); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/generator.hpp000066400000000000000000000052161416264035500260470ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_GENERATOR_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_GENERATOR_HPP #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { namespace details { /* * PCG Random Number Generation for C. * * Copyright 2014 Melissa O'Neill * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or *implied. * See the License for the specific language governing permissions and * limitations under the License. * * For additional information about the PCG random number generation *scheme, * including its license and other licensing options, visit * * http://www.pcg-random.org */ class pcg { uint64_t state; static constexpr uint64_t inc = 0xda3e39cb94b95bdbULL; public: using result_type = uint32_t; static constexpr result_type min() { return 0; } static constexpr result_type max() { return std::numeric_limits::max(); } friend bool operator==(pcg const &self, pcg const &other) { return self.state == other.state; } friend bool operator!=(pcg const &self, pcg const &other) { return self.state != other.state; } pcg() : state(0) { } explicit pcg(std::random_device &rd) { seed(rd()); } void seed(uint64_t value = 0) { state = value; (void)operator()(); } result_type operator()() { uint64_t oldstate = state; state = oldstate * 6364136223846793005ULL + inc; uint32_t xorshifted = uint32_t(((oldstate >> 18u) ^ oldstate) >> 27u); int rot = oldstate >> 59u; return (xorshifted >> rot) | (xorshifted << ((-rot) & 31)); } void discard(std::size_t n) { for (std::size_t i = 0; i < n; ++i) operator()(); } private: }; std::random_device rd; pcg generator(rd); } // namespace details } // namespace random } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/geometric.hpp000066400000000000000000000012661416264035500260400ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_GEOMETRIC_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_GEOMETRIC_HPP #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/tuple.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray geometric(double p, pS const &shape); auto geometric(double p, long size) -> decltype(geometric(p, types::array{{size}})); double geometric(double, types::none_type size = {}); DEFINE_FUNCTOR(pythonic::numpy::random, geometric); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/gumbel.hpp000066400000000000000000000014401416264035500253270ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_GUMBEL_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_GUMBEL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/tuple.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray gumbel(double loc, double scale, pS const &shape); auto gumbel(double loc, double scale, long size) -> decltype(gumbel(loc, scale, types::array{{size}})); double gumbel(double loc = 0.0, double scale = 1.0, types::none_type size = {}); DEFINE_FUNCTOR(pythonic::numpy::random, gumbel); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/laplace.hpp000066400000000000000000000014511416264035500254570ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_LAPLACE_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_LAPLACE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/tuple.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray laplace(double loc, double scale, pS const &shape); auto laplace(double loc, double scale, long size) -> decltype(laplace(loc, scale, types::array{{size}})); double laplace(double loc = 0.0, double scale = 1.0, types::none_type size = {}); DEFINE_FUNCTOR(pythonic::numpy::random, laplace); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/logistic.hpp000066400000000000000000000014621416264035500256750ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_LOGISTIC_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_LOGISTIC_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/tuple.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray logistic(double loc, double scale, pS const &shape); auto logistic(double loc, double scale, long size) -> decltype(logistic(loc, scale, types::array{{size}})); double logistic(double loc = 0.0, double scale = 1.0, types::none_type size = {}); DEFINE_FUNCTOR(pythonic::numpy::random, logistic); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/lognormal.hpp000066400000000000000000000014771416264035500260600ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_LOGNORMAL_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_LOGNORMAL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/tuple.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray lognormal(double mean, double sigma, pS const &shape); auto lognormal(double mean, double sigma, long size) -> decltype(lognormal(mean, sigma, types::array{{size}})); double lognormal(double mean = 0.0, double sigma = 1.0, types::none_type size = {}); DEFINE_FUNCTOR(pythonic::numpy::random, lognormal); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/logseries.hpp000066400000000000000000000013001416264035500260430ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_LOGSERIES_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_LOGSERIES_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/tuple.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray logseries(double loc, pS const &shape); auto logseries(double loc, long size) -> decltype(logseries(loc, types::array{{size}})); double logseries(double loc, types::none_type size = {}); DEFINE_FUNCTOR(pythonic::numpy::random, logseries); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/negative_binomial.hpp000066400000000000000000000015011416264035500275260ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_NEGATIVE_BINOMIAL_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_NEGATIVE_BINOMIAL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/tuple.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray negative_binomial(double n, double p, pS const &shape); auto negative_binomial(double n, double p, long size) -> decltype(negative_binomial(n, p, types::array{{size}})); double negative_binomial(double n, double p, types::none_type size = {}); DEFINE_FUNCTOR(pythonic::numpy::random, negative_binomial); } } PYTHONIC_NS_END #endifpythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/normal.hpp000066400000000000000000000014401416264035500253440ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_NORMAL_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_NORMAL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/tuple.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray normal(double loc, double scale, pS const &shape); auto normal(double loc, double scale, long size) -> decltype(normal(loc, scale, types::array{{size}})); double normal(double loc = 0.0, double scale = 1.0, types::none_type size = {}); DEFINE_FUNCTOR(pythonic::numpy::random, normal); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/pareto.hpp000066400000000000000000000012651416264035500253530ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_PARETO_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_PARETO_HPP #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/tuple.hpp" #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray pareto(double a, pS const &shape); auto pareto(double a, long size) -> decltype(pareto(a, types::array{{size}})); double pareto(double a, types::none_type size = {}); DEFINE_FUNCTOR(pythonic::numpy::random, pareto); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/poisson.hpp000066400000000000000000000012701416264035500255470ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_POISSON_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_POISSON_HPP #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/tuple.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray poisson(double lam, pS const &shape); auto poisson(double lam, long size) -> decltype(poisson(lam, types::array{{size}})); double poisson(double lam = 1.0, types::none_type size = {}); DEFINE_FUNCTOR(pythonic::numpy::random, poisson); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/power.hpp000066400000000000000000000012551416264035500252140ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_POWER_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_POWER_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/tuple.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray power(double a, pS const &shape); auto power(double a, long size) -> decltype(power(a, types::array{{size}})); double power(double a, types::none_type size = {}); DEFINE_FUNCTOR(pythonic::numpy::random, power); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/rand.hpp000066400000000000000000000007061416264035500250040ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_RAND_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_RAND_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray> rand(T... shape); double rand(); DEFINE_FUNCTOR(pythonic::numpy::random, rand); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/randint.hpp000066400000000000000000000021461416264035500255170ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_RANDINT_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_RANDINT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/tuple.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template typename std::enable_if::value, types::ndarray>::type randint(long min, long max, pS const &shape); template typename std::enable_if::value, types::ndarray>>::type randint(long min, long max, pS const &shape); template auto randint(long max, types::none_type, pS const &shape) -> decltype(randint(0, max, shape)); long randint(long min, long max); long randint(long max, types::none_type = {}); auto randint(long min, long max, long size) -> decltype(randint(min, max, types::array{{size}})); DEFINE_FUNCTOR(pythonic::numpy::random, randint); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/randn.hpp000066400000000000000000000010021416264035500251500ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_RANDN_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_RANDN_HPP #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray> randn(T... shape); double randn(); DEFINE_FUNCTOR(pythonic::numpy::random, randn); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/random.hpp000066400000000000000000000015641416264035500253430ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_RANDOM_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_RANDOM_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/tuple.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray random(pS const &shape); auto random(long size) -> decltype(random(types::array{{size}})); template auto random(std::integral_constant) -> decltype(random(types::array, 1>{})) { return random(types::array, 1>{}); } double random(types::none_type d = types::none_type()); DEFINE_FUNCTOR(pythonic::numpy::random, random); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/random_integers.hpp000066400000000000000000000011461416264035500272370ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_RANDOM_INTEGERS_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_RANDOM_INTEGERS_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/random/randint.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template auto random_integers(long min, long max, T &&size) -> decltype(randint(min, max, std::forward(size))); long random_integers(long max); long random_integers(long min, long max); DEFINE_FUNCTOR(pythonic::numpy::random, random_integers); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/random_sample.hpp000066400000000000000000000005271416264035500267020ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_RANDOM_SAMPLE_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_RANDOM_SAMPLE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/random/random.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { USING_FUNCTOR(random_sample, random); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/ranf.hpp000066400000000000000000000004741416264035500250100ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_RANF_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_RANF_HPP #include "pythonic/include/numpy/random/random.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { USING_FUNCTOR(ranf, random); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/rayleigh.hpp000066400000000000000000000013371416264035500256650ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_RAYLEIGH_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_RAYLEIGH_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/tuple.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray rayleigh(double scale, pS const &array_shape); auto rayleigh(double scale, long size) -> decltype(rayleigh(scale, types::array{{size}})); double rayleigh(double scale = 1.0, types::none_type size = {}); DEFINE_FUNCTOR(pythonic::numpy::random, rayleigh); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/sample.hpp000066400000000000000000000005021416264035500253330ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_SAMPLE_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_SAMPLE_HPP #include "pythonic/include/numpy/random/random.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { USING_FUNCTOR(sample, random); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/seed.hpp000066400000000000000000000005751416264035500250040ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_SEED_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_SEED_HPP #include "pythonic/include/numpy/random/generator.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { types::none_type seed(long s); types::none_type seed(types::none_type _ = {}); DEFINE_FUNCTOR(pythonic::numpy::random, seed); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/shuffle.hpp000066400000000000000000000007111416264035500255100ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_SHUFFLE_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_SHUFFLE_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/numpy/random/generator.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::none_type shuffle(T &seq); DEFINE_FUNCTOR(pythonic::numpy::random, shuffle); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/standard_exponential.hpp000066400000000000000000000013411416264035500302620ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_STANDARD_EXPONENTIAL_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_STANDARD_EXPONENTIAL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/tuple.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray standard_exponential(pS const &shape); auto standard_exponential(long size) -> decltype(standard_exponential(types::array{{size}})); double standard_exponential(types::none_type d = {}); DEFINE_FUNCTOR(pythonic::numpy::random, standard_exponential); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/standard_gamma.hpp000066400000000000000000000013301416264035500270140ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_STANDARD_GAMMA_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_STANDARD_GAMMA_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/tuple.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray standard_gamma(double s, pS const &shape); auto standard_gamma(double s, long size) -> decltype(standard_gamma(s, types::array{{size}})); double standard_gamma(double s, types::none_type d = {}); DEFINE_FUNCTOR(pythonic::numpy::random, standard_gamma); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/standard_normal.hpp000066400000000000000000000012761416264035500272330ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_STANDARD_NORMAL_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_STANDARD_NORMAL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/tuple.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray standard_normal(pS const &shape); auto standard_normal(long size) -> decltype(standard_normal(types::array{{size}})); double standard_normal(types::none_type d = {}); DEFINE_FUNCTOR(pythonic::numpy::random, standard_normal); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/uniform.hpp000066400000000000000000000015441416264035500255400ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_UNIFORM_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_UNIFORM_HPP #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/tuple.hpp" #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray uniform(double low, double high, pS const &array_shape); auto uniform(double low, double high, long size) -> decltype(uniform(low, high, types::array{{size}})); double uniform(double low = 0.0, double high = 1.0, types::none_type size = {}); DEFINE_FUNCTOR(pythonic::numpy::random, uniform); } // namespace random } // namespace numpy PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/random/weibull.hpp000066400000000000000000000012521416264035500255200ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RANDOM_WEIBULL_HPP #define PYTHONIC_INCLUDE_NUMPY_RANDOM_WEIBULL_HPP #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/tuple.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray weibull(double a, pS const &shape); auto weibull(double a, long size) -> decltype(weibull(a, types::array{{size}})); double weibull(double a, types::none_type size = {}); DEFINE_FUNCTOR(pythonic::numpy::random, weibull); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ravel.hpp000066400000000000000000000007051416264035500237100ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RAVEL_HPP #define PYTHONIC_INCLUDE_NUMPY_RAVEL_HPP #include "pythonic/include/numpy/ndarray/reshape.hpp" #include "pythonic/include/utils/numpy_conversion.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> ravel(types::ndarray const &expr); NUMPY_EXPR_TO_NDARRAY0_DECL(ravel); DEFINE_FUNCTOR(pythonic::numpy, ravel); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/real.hpp000066400000000000000000000012221416264035500235150ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_REAL_HPP #define PYTHONIC_INCLUDE_NUMPY_REAL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/asarray.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/list.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto real(E &&expr) -> decltype(builtins::getattr(types::attr::REAL{}, std::forward(expr))); template auto real(types::list const &expr) -> decltype(real(numpy::functor::asarray{}(expr))); DEFINE_FUNCTOR(pythonic::numpy, real); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/reciprocal.hpp000066400000000000000000000011501416264035500247150ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RECIPROCAL_HPP #define PYTHONIC_INCLUDE_NUMPY_RECIPROCAL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template auto reciprocal(T const &val) -> decltype(static_cast(1.) / val) { return static_cast(1.) / val; } } #define NUMPY_NARY_FUNC_NAME reciprocal #define NUMPY_NARY_FUNC_SYM wrapper::reciprocal #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/reduce.hpp000066400000000000000000000071641416264035500240540ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_REDUCE_HPP #define PYTHONIC_INCLUDE_NUMPY_REDUCE_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/builtins/None.hpp" #include PYTHONIC_NS_BEGIN namespace operator_ { namespace functor { struct imax; struct imin; } } namespace numpy { namespace { template struct reduce_result_type_helper { using type = typename T::type; }; template struct reduce_result_type_helper { using type = typename std::conditional< std::is_integral::type>::value && (sizeof(typename types::dtype_of::type) < sizeof(long)) && !std::is_same::value && !std::is_same::value, typename std::conditional< std::is_same::type, bool>::value, long, typename std::conditional< std::is_signed::type>::value, long, unsigned long>::type>::type, typename types::dtype_of::type>::type; }; template using reduce_result_type = typename reduce_result_type_helper::type; } template typename std::enable_if< std::is_scalar::value || types::is_complex::value, E>::type reduce(E const &expr, types::none_type _ = types::none_type()); template typename std::enable_if< std::is_scalar::value || types::is_complex::value, E>::type reduce(E const &array, long axis); template typename std::enable_if::value, reduce_result_type>::type reduce(E const &expr, types::none_type axis = {}, dtype d = {}); template reduce_result_type reduce(types::numpy_texpr const &expr, types::none_type axis = {}, dtype d = {}) { return reduce(expr.arg, axis, d); } template typename std::enable_if>::type reduce(E const &array, long axis, dtype d = {}, types::none_type out = {}); template typename std::enable_if>::type reduce(E const &array, long axis, types::none_type dtype, Out &&out); namespace { template using reduced_type = types::ndarray, types::array>; } template typename std::enable_if>::type reduce(E const &array, long axis, dtype d = {}, types::none_type out = types::none_type()); template reduced_type reduce(types::numpy_texpr const &array, long axis, types::none_type dtype = types::none_type(), types::none_type out = types::none_type()) { return reduce(array.arg, (axis + 1) % 2); } template typename std::enable_if>::type reduce(E const &array, long axis, types::none_type dtype, Out &&out); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/remainder.hpp000066400000000000000000000013331416264035500245430ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_REMAINDER_HPP #define PYTHONIC_INCLUDE_NUMPY_REMAINDER_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template auto remainder(T0 const &x, T1 const &y) -> decltype(x - y * xsimd::floor(x / y)) { return x - y * xsimd::floor(x / y); } } #define NUMPY_NARY_FUNC_NAME remainder #define NUMPY_NARY_FUNC_SYM wrapper::remainder #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/remainder/000077500000000000000000000000001416264035500240325ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/remainder/accumulate.hpp000066400000000000000000000003151416264035500266650ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_REMAINDER_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_REMAINDER_ACCUMULATE_HPP #define UFUNC_NAME remainder #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/repeat.hpp000066400000000000000000000014131416264035500240540ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_REPEAT_HPP #define PYTHONIC_INCLUDE_NUMPY_REPEAT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_conversion.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/builtins/None.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray::value>> repeat(types::ndarray const &expr, long repeats, long axis); template types::ndarray> repeat(types::ndarray const &expr, long repeats, types::none_type axis = types::none_type{}); NUMPY_EXPR_TO_NDARRAY0_DECL(repeat); DEFINE_FUNCTOR(pythonic::numpy, repeat); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/resize.hpp000066400000000000000000000004751416264035500241040ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RESIZE_HPP #define PYTHONIC_INCLUDE_NUMPY_RESIZE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/ndarray/reshape.hpp" PYTHONIC_NS_BEGIN namespace numpy { USING_FUNCTOR(resize, pythonic::numpy::ndarray::functor::reshape); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/right_shift.hpp000066400000000000000000000010411416264035500251030ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RIGHTSHIFT_HPP #define PYTHONIC_INCLUDE_NUMPY_RIGHTSHIFT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/operator_/rshift.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME right_shift #define NUMPY_NARY_FUNC_SYM operator_::rshift #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/right_shift/000077500000000000000000000000001416264035500243765ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/right_shift/accumulate.hpp000066400000000000000000000003231416264035500272300ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RIGHT_SHIFT_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_RIGHT_SHIFT_ACCUMULATE_HPP #define UFUNC_NAME right_shift #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/rint.hpp000066400000000000000000000010701416264035500235470ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_RINT_HPP #define PYTHONIC_INCLUDE_NUMPY_RINT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template T rint(T const &v); template std::complex rint(std::complex const &v); } #define NUMPY_NARY_FUNC_NAME rint #define NUMPY_NARY_FUNC_SYM wrapper::rint #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/roll.hpp000066400000000000000000000011641416264035500235470ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ROLL_HPP #define PYTHONIC_INCLUDE_NUMPY_ROLL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_conversion.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray roll(types::ndarray const &expr, long shift); template types::ndarray roll(types::ndarray const &expr, long shift, long axis); NUMPY_EXPR_TO_NDARRAY0_DECL(roll); DEFINE_FUNCTOR(pythonic::numpy, roll); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/rollaxis.hpp000066400000000000000000000007651416264035500244420ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ROLLAXIS_HPP #define PYTHONIC_INCLUDE_NUMPY_ROLLAXIS_HPP #include "pythonic/include/numpy/transpose.hpp" #include "pythonic/include/numpy/copy.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray::value>> rollaxis(types::ndarray const &a, long axis, long start = 0); NUMPY_EXPR_TO_NDARRAY0_DECL(rollaxis); DEFINE_FUNCTOR(pythonic::numpy, rollaxis); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/rot90.hpp000066400000000000000000000010731416264035500235530ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ROT90_HPP #define PYTHONIC_INCLUDE_NUMPY_ROT90_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_conversion.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/numpy/copy.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray::value>> rot90(types::ndarray const &expr, int k = 1); NUMPY_EXPR_TO_NDARRAY0_DECL(rot90) DEFINE_FUNCTOR(pythonic::numpy, rot90); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/round.hpp000066400000000000000000000003401416264035500237210ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ROUND_HPP #define PYTHONIC_INCLUDE_NUMPY_ROUND_HPP #include "pythonic/include/numpy/around.hpp" PYTHONIC_NS_BEGIN namespace numpy { USING_FUNCTOR(round, around); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/round_.hpp000066400000000000000000000003431416264035500240630ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ROUND__HPP #define PYTHONIC_INCLUDE_NUMPY_ROUND__HPP #include "pythonic/include/numpy/around.hpp" PYTHONIC_NS_BEGIN namespace numpy { USING_FUNCTOR(round_, around); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/searchsorted.hpp000066400000000000000000000015531416264035500252670ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_SEARCHSORTED_HPP #define PYTHONIC_INCLUDE_NUMPY_SEARCHSORTED_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_conversion.hpp" #include "pythonic/include/utils/int_.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/str.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if::value, long>::type searchsorted(U const &a, T const &v, types::str const &side = "left"); template typename std::enable_if< types::is_numexpr_arg::value, types::ndarray>>::type searchsorted(T const &a, E const &v, types::str const &side = "left"); DEFINE_FUNCTOR(pythonic::numpy, searchsorted); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/select.hpp000066400000000000000000000037631416264035500240650ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_SELECT_HPP #define PYTHONIC_INCLUDE_NUMPY_SELECT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/int_.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> select(C const &condlist, L const &choicelist, typename L::dtype _default = 0); template typename std::enable_if< std::tuple_size::value == std::tuple_size::value, types::ndarray::value>>>::type select(types::list> const &condlist, types::list> const &choicelist, T _default = 0); template typename std::enable_if::value == std::tuple_size::value, types::ndarray>::type select(types::static_list, M> const &condlist, types::static_list, M> const &choicelist, T _default = 0); template typename std::enable_if::value == std::tuple_size::value, types::ndarray>::type select(types::static_list, M> const &condlist, types::list> const &choicelist, T _default = 0); template typename std::enable_if::value == std::tuple_size::value, types::ndarray>::type select(types::list> const &condlist, types::static_list, M> const &choicelist, T _default = 0); DEFINE_FUNCTOR(pythonic::numpy, select); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/setdiff1d.hpp000066400000000000000000000011071416264035500244450ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_SETDIFF1D_HPP #define PYTHONIC_INCLUDE_NUMPY_SETDIFF1D_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray::type, typename types::dtype_of::type>::type, types::pshape> setdiff1d(T const &ar1, U const &ar2, bool assume_unique = false); DEFINE_FUNCTOR(pythonic::numpy, setdiff1d); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/shape.hpp000066400000000000000000000007161416264035500237010ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_SHAPE_HPP #define PYTHONIC_INCLUDE_NUMPY_SHAPE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto shape(types::ndarray const &e) -> decltype(e._shape); template auto shape(E const &e) -> decltype(sutils::getshape(e)); DEFINE_FUNCTOR(pythonic::numpy, shape) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/short_.hpp000066400000000000000000000011621416264035500240730ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_SHORT__HPP #define PYTHONIC_INCLUDE_NUMPY_SHORT__HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { short short_(); template short short_(V v); } #define NUMPY_NARY_FUNC_NAME short_ #define NUMPY_NARY_FUNC_SYM details::short_ #define NUMPY_NARY_EXTRA_METHOD using type = short; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/sign.hpp000066400000000000000000000006741416264035500235440ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_SIGN_HPP #define PYTHONIC_INCLUDE_NUMPY_SIGN_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME sign #define NUMPY_NARY_FUNC_SYM xsimd::sign #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/signbit.hpp000066400000000000000000000007101416264035500242320ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_SIGNBIT_HPP #define PYTHONIC_INCLUDE_NUMPY_SIGNBIT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME signbit #define NUMPY_NARY_FUNC_SYM xsimd::signbit #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/sin.hpp000066400000000000000000000006701416264035500233710ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_SIN_HPP #define PYTHONIC_INCLUDE_NUMPY_SIN_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME sin #define NUMPY_NARY_FUNC_SYM xsimd::sin #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/sinh.hpp000066400000000000000000000006741416264035500235450ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_SINH_HPP #define PYTHONIC_INCLUDE_NUMPY_SINH_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME sinh #define NUMPY_NARY_FUNC_SYM xsimd::sinh #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/size.hpp000066400000000000000000000005401416264035500235460ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_SIZE_HPP #define PYTHONIC_INCLUDE_NUMPY_SIZE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto size(E const &e) -> decltype(e.flat_size()); DEFINE_FUNCTOR(pythonic::numpy, size) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/sometrue.hpp000066400000000000000000000003431416264035500244400ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_SOMETRUE_HPP #define PYTHONIC_INCLUDE_NUMPY_SOMETRUE_HPP #include "pythonic/include/numpy/any.hpp" PYTHONIC_NS_BEGIN namespace numpy { USING_FUNCTOR(sometrue, any); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/sort.hpp000066400000000000000000000014051416264035500235640ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_SORT_HPP #define PYTHONIC_INCLUDE_NUMPY_SORT_HPP #include #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/str.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> sort(E const &expr, types::none_type); template types::ndarray> sort(E const &expr, long axis = -1); template types::ndarray> sort(E const &expr, long axis, types::str const &kind); NUMPY_EXPR_TO_NDARRAY0_DECL(sort); DEFINE_FUNCTOR(pythonic::numpy, sort); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/sort_complex.hpp000066400000000000000000000004341416264035500253140ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_SORTCOMPLEX_HPP #define PYTHONIC_INCLUDE_NUMPY_SORTCOMPLEX_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/sort.hpp" PYTHONIC_NS_BEGIN namespace numpy { USING_FUNCTOR(sort_complex, sort) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/spacing.hpp000066400000000000000000000011271416264035500242220ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_SPACING_HPP #define PYTHONIC_INCLUDE_NUMPY_SPACING_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template auto spacing(T const &v) -> decltype(std::nextafter(v, 1) - v) { return std::nextafter(v, 1) - v; } } #define NUMPY_NARY_FUNC_NAME spacing #define NUMPY_NARY_FUNC_SYM wrapper::spacing #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/split.hpp000066400000000000000000000014631416264035500237340ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_SPLIT_HPP #define PYTHONIC_INCLUDE_NUMPY_SPLIT_HPP #include "pythonic/include/numpy/array_split.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::list::value>>> split(types::ndarray const &a, long nb_split); template typename std::enable_if< types::is_iterable::value, types::list::value>>>>::type split(types::ndarray const &a, I const &split_mask); template types::list>> split(E const &a, I const &); DEFINE_FUNCTOR(pythonic::numpy, split); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/sqrt.hpp000066400000000000000000000006751416264035500235760ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_SQRT_HPP #define PYTHONIC_INCLUDE_NUMPY_SQRT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME sqrt #define NUMPY_NARY_FUNC_SYM xsimd::sqrt #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/square.hpp000066400000000000000000000015171416264035500241010ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_SQUARE_HPP #define PYTHONIC_INCLUDE_NUMPY_SQUARE_HPP #include "pythonic/include/types/numpy_op_helper.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/utils/functor.hpp" #include namespace wrapper { } PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template auto square(T const &arg) -> decltype(arg *arg) { return arg * arg; } template std::complex square(std::complex const &arg) { T r = arg.real(), i = arg.imag(); auto t = r * i; auto r2 = r * r; auto i2 = i * i; return {r2 - i2, t + t}; } } #define NUMPY_NARY_FUNC_NAME square #define NUMPY_NARY_FUNC_SYM wrapper::square #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/stack.hpp000066400000000000000000000015071416264035500237050ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_STACK_HPP #define PYTHONIC_INCLUDE_NUMPY_STACK_HPP #include PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> stack(ArraySequence const &args, long axis = 0); namespace details { template using stack_helper_t = typename __combined::type...>::type; } template types::ndarray::dtype, types::array::value + 1>> stack(std::tuple const &args, long axis = 0); DEFINE_FUNCTOR(pythonic::numpy, stack); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/std_.hpp000066400000000000000000000006151416264035500235300ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_STD_HPP #define PYTHONIC_INCLUDE_NUMPY_STD_HPP #include "pythonic/include/numpy/var.hpp" #include "pythonic/include/numpy/sqrt.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto std_(Args &&... args) -> decltype(functor::sqrt{}(var(std::forward(args)...))); DEFINE_FUNCTOR(pythonic::numpy, std_); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/subtract.hpp000066400000000000000000000010351416264035500244230ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_SUBTRACT_HPP #define PYTHONIC_INCLUDE_NUMPY_SUBTRACT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/operator_/sub.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME subtract #define NUMPY_NARY_FUNC_SYM pythonic::operator_::sub #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/subtract/000077500000000000000000000000001416264035500237135ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/subtract/accumulate.hpp000066400000000000000000000003121416264035500265430ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_SUBTRACT_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_SUBTRACT_ACCUMULATE_HPP #define UFUNC_NAME subtract #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/sum.hpp000066400000000000000000000010351416264035500234000ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_SUM_HPP #define PYTHONIC_INCLUDE_NUMPY_SUM_HPP #include "pythonic/include/numpy/reduce.hpp" #include "pythonic/include/operator_/iadd.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto sum(Args &&... args) -> decltype(reduce(std::forward(args)...)) { return reduce(std::forward(args)...); } DEFINE_FUNCTOR(pythonic::numpy, sum); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/swapaxes.hpp000066400000000000000000000006751416264035500244400ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_SWAPAXES_HPP #define PYTHONIC_INCLUDE_NUMPY_SWAPAXES_HPP #include "pythonic/include/numpy/transpose.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto swapaxes(T &&a, int axis1, int axis2) -> decltype(functor::transpose{}( std::forward(a), std::declval::type::value>>())); DEFINE_FUNCTOR(pythonic::numpy, swapaxes); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/take.hpp000066400000000000000000000004771416264035500235310ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_TAKE_HPP #define PYTHONIC_INCLUDE_NUMPY_TAKE_HPP PYTHONIC_NS_BEGIN namespace numpy { template auto take(T &&expr, F &&indices) -> decltype(std::forward(expr)[std::forward(indices)]); DEFINE_FUNCTOR(pythonic::numpy, take); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/tan.hpp000066400000000000000000000006701416264035500233620ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_TAN_HPP #define PYTHONIC_INCLUDE_NUMPY_TAN_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME tan #define NUMPY_NARY_FUNC_SYM xsimd::tan #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/tanh.hpp000066400000000000000000000006741416264035500235360ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_TANH_HPP #define PYTHONIC_INCLUDE_NUMPY_TANH_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME tanh #define NUMPY_NARY_FUNC_SYM xsimd::tanh #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/tile.hpp000066400000000000000000000010451416264035500235320ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_TILE_HPP #define PYTHONIC_INCLUDE_NUMPY_TILE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> tile(E const &expr, long reps); template types::ndarray> tile(E const &expr, types::array const &reps); DEFINE_FUNCTOR(pythonic::numpy, tile); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/trace.hpp000066400000000000000000000004731416264035500236770ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_TRACE_HPP #define PYTHONIC_INCLUDE_NUMPY_TRACE_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename T::dtype trace(T const &expr, int offset = 0); DEFINE_FUNCTOR(pythonic::numpy, trace) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/transpose.hpp000066400000000000000000000045621416264035500246220ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_TRANSPOSE_HPP #define PYTHONIC_INCLUDE_NUMPY_TRANSPOSE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_conversion.hpp" #include "pythonic/include/utils/nested_container.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_expr.hpp" PYTHONIC_NS_BEGIN namespace numpy { template E transpose(types::numpy_texpr const &arr) { return arr.arg; } template typename std::enable_if>::type transpose(E const &arr) { return {arr}; } template typename std::enable_if::type transpose(E const &arr) { return arr; } template typename std::enable_if< (std::tuple_size::value > 2), types::ndarray::value>>>::type transpose(types::ndarray const &a); template types::ndarray::value>> transpose(types::ndarray const &a, types::array const &t); template types::ndarray> transpose(types::ndarray const &a, long index, Args const &... indices) { return transpose( a, types::array{{index, (long)indices...}}); } template auto _transpose(types::numpy_expr const &expr, utils::index_sequence) -> decltype(Op{}(transpose(std::get(expr.args)...))) { return Op{}(transpose(std::get(expr.args)...)); } template auto transpose(types::numpy_expr const &expr) -> decltype(_transpose(expr, utils::make_index_sequence())) { return _transpose(expr, utils::make_index_sequence()); } template auto transpose(E const &expr) -> typename std::enable_if< (E::value > 2), decltype(transpose(types::ndarray{ expr}))>::type { return transpose( types::ndarray{expr}); } DEFINE_FUNCTOR(pythonic::numpy, transpose); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/tri.hpp000066400000000000000000000007521416264035500233770ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_TRI_HPP #define PYTHONIC_INCLUDE_NUMPY_TRI_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/numpy/float64.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> tri(long N, long M = -1, long k = 0, dtype d = dtype()); DEFINE_FUNCTOR(pythonic::numpy, tri) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/tril.hpp000066400000000000000000000007341416264035500235530ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_TRIL_HPP #define PYTHONIC_INCLUDE_NUMPY_TRIL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_conversion.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray tril(types::ndarray const &expr, int k = 0); NUMPY_EXPR_TO_NDARRAY0_DECL(tril) DEFINE_FUNCTOR(pythonic::numpy, tril) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/trim_zeros.hpp000066400000000000000000000006701416264035500247750ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_TRIMZEROS_HPP #define PYTHONIC_INCLUDE_NUMPY_TRIMZEROS_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/numpy_gexpr.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::numpy_gexpr trim_zeros(T const &expr, types::str const &trim = "fb"); DEFINE_FUNCTOR(pythonic::numpy, trim_zeros) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/triu.hpp000066400000000000000000000007341416264035500235640ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_TRIU_HPP #define PYTHONIC_INCLUDE_NUMPY_TRIU_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_conversion.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray triu(types::ndarray const &expr, int k = 0); NUMPY_EXPR_TO_NDARRAY0_DECL(triu) DEFINE_FUNCTOR(pythonic::numpy, triu) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/true_divide.hpp000066400000000000000000000011211416264035500250730ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_TRUEDIVIDE_HPP #define PYTHONIC_INCLUDE_NUMPY_TRUEDIVIDE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/operator_/div.hpp" PYTHONIC_NS_BEGIN namespace numpy { // FIXME: this is ! always a true_divide... #define NUMPY_NARY_FUNC_NAME true_divide #define NUMPY_NARY_FUNC_SYM pythonic::operator_::div #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/true_divide/000077500000000000000000000000001416264035500243675ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/numpy/true_divide/accumulate.hpp000066400000000000000000000003231416264035500272210ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_TRUE_DIVIDE_ACCUMULATE_HPP #define PYTHONIC_INCLUDE_NUMPY_TRUE_DIVIDE_ACCUMULATE_HPP #define UFUNC_NAME true_divide #include "pythonic/include/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/trunc.hpp000066400000000000000000000007001416264035500237250ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_TRUNC_HPP #define PYTHONIC_INCLUDE_NUMPY_TRUNC_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME trunc #define NUMPY_NARY_FUNC_SYM xsimd::trunc #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ubyte.hpp000066400000000000000000000012041416264035500237220ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_UBYTE_HPP #define PYTHONIC_INCLUDE_NUMPY_UBYTE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { unsigned char ubyte(); template unsigned char ubyte(V v); } #define NUMPY_NARY_FUNC_NAME ubyte #define NUMPY_NARY_FUNC_SYM details::ubyte #define NUMPY_NARY_EXTRA_METHOD using type = unsigned char; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ufunc_accumulate.hpp000066400000000000000000000013271416264035500261230ustar00rootroot00000000000000#ifndef UFUNC_NAME #error missing UFUNC_NAME #endif // clang-format off #include INCLUDE_FILE(pythonic/include/numpy,UFUNC_NAME) // clang-format on #include "pythonic/include/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace UFUNC_NAME { template > auto accumulate(T &&a, long axis = 0, dtype d = dtype()) -> decltype(partial_sum(std::forward(a), axis, d)); DEFINE_FUNCTOR(pythonic::numpy::UFUNC_NAME, accumulate); } } PYTHONIC_NS_END pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ufunc_reduce.hpp000066400000000000000000000020511416264035500252420ustar00rootroot00000000000000#ifndef UFUNC_NAME #error missing UFUNC_NAME #endif #ifndef UFUNC_INAME #error missing UFUNC_INAME #endif // clang-format off #include INCLUDE_FILE(pythonic/include/operator_,UFUNC_INAME) // clang-format on #include "pythonic/include/numpy/reduce.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace UFUNC_NAME { template auto reduce(Arg &&arg) -> decltype(numpy::reduce( std::forward(arg), 0L)) { return numpy::reduce( std::forward(arg), 0L); } template auto reduce(Args &&... args) -> typename std::enable_if< sizeof...(Args) != 1, decltype(numpy::reduce( std::forward(args)...))>::type { return numpy::reduce( std::forward(args)...); } DEFINE_FUNCTOR(pythonic::numpy::UFUNC_NAME, reduce); } } PYTHONIC_NS_END pythran-0.10.0+ds2/pythran/pythonic/include/numpy/uint.hpp000066400000000000000000000011751416264035500235600ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_UINT_HPP #define PYTHONIC_INCLUDE_NUMPY_UINT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { unsigned long uint(); template unsigned long uint(V v); } #define NUMPY_NARY_FUNC_NAME uint #define NUMPY_NARY_FUNC_SYM details::uint #define NUMPY_NARY_EXTRA_METHOD using type = unsigned long; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/uint16.hpp000066400000000000000000000011731416264035500237250ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_UINT16_HPP #define PYTHONIC_INCLUDE_NUMPY_UINT16_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { uint16_t uint16(); template uint16_t uint16(V v); } #define NUMPY_NARY_FUNC_NAME uint16 #define NUMPY_NARY_FUNC_SYM details::uint16 #define NUMPY_NARY_EXTRA_METHOD using type = uint16_t; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/uint32.hpp000066400000000000000000000011201416264035500237130ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_UINT32_HPP #define PYTHONIC_INCLUDE_NUMPY_UINT32_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { uint32_t uint32(); template uint32_t uint32(V v); } #define NUMPY_NARY_FUNC_NAME uint32 #define NUMPY_NARY_FUNC_SYM details::uint32 #define NUMPY_NARY_EXTRA_METHOD using type = uint32_t; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/uint64.hpp000066400000000000000000000011201416264035500237200ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_UINT64_HPP #define PYTHONIC_INCLUDE_NUMPY_UINT64_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { uint64_t uint64(); template uint64_t uint64(V v); } #define NUMPY_NARY_FUNC_NAME uint64 #define NUMPY_NARY_FUNC_SYM details::uint64 #define NUMPY_NARY_EXTRA_METHOD using type = uint64_t; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/uint8.hpp000066400000000000000000000011621416264035500236440ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_UINT8_HPP #define PYTHONIC_INCLUDE_NUMPY_UINT8_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { uint8_t uint8(); template uint8_t uint8(V v); } #define NUMPY_NARY_FUNC_NAME uint8 #define NUMPY_NARY_FUNC_SYM details::uint8 #define NUMPY_NARY_EXTRA_METHOD using type = uint8_t; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/uintc.hpp000066400000000000000000000011651416264035500237220ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_UINTC_HPP #define PYTHONIC_INCLUDE_NUMPY_UINTC_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { unsigned uintc(); template unsigned uintc(V v); } #define NUMPY_NARY_FUNC_NAME uintc #define NUMPY_NARY_FUNC_SYM details::uintc #define NUMPY_NARY_EXTRA_METHOD using type = unsigned; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/uintp.hpp000066400000000000000000000011701416264035500237330ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_UINTP_HPP #define PYTHONIC_INCLUDE_NUMPY_UINTP_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { uintptr_t uintp(); template uintptr_t uintp(V v); } #define NUMPY_NARY_FUNC_NAME uintp #define NUMPY_NARY_FUNC_SYM details::uintp #define NUMPY_NARY_EXTRA_METHOD using type = uintptr_t; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ulonglong.hpp000066400000000000000000000012531416264035500246020ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ULONGLONG_HPP #define PYTHONIC_INCLUDE_NUMPY_ULONGLONG_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { unsigned long long ulonglong(); template unsigned long long ulonglong(V v); } #define NUMPY_NARY_FUNC_NAME ulonglong #define NUMPY_NARY_FUNC_SYM details::ulonglong #define NUMPY_NARY_EXTRA_METHOD using type = unsigned long long; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/union1d.hpp000066400000000000000000000007241416264035500241550ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_UNION1D_HPP #define PYTHONIC_INCLUDE_NUMPY_UNION1D_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray< typename __combined::type, types::pshape> union1d(E const &e, F const &f); DEFINE_FUNCTOR(pythonic::numpy, union1d) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/unique.hpp000066400000000000000000000105661416264035500241130ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_UNIQUE_HPP #define PYTHONIC_INCLUDE_NUMPY_UNIQUE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/tuple.hpp" #include "pythonic/include/types/immediate.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> unique(E const &expr); template std::tuple>, types::ndarray>> unique(E const &expr, types::true_immediate return_index); template types::ndarray> unique(E const &expr, types::false_immediate return_index); template std::tuple>, types::ndarray>> unique(E const &expr, types::false_immediate return_index, types::true_immediate return_inverse); template types::ndarray> unique(E const &expr, types::false_immediate return_index, types::false_immediate return_inverse); template std::tuple>, types::ndarray>> unique(E const &expr, types::true_immediate return_index, types::false_immediate return_inverse); template std::tuple>, types::ndarray>, types::ndarray>> unique(E const &expr, types::true_immediate return_index, types::true_immediate return_inverse); template std::tuple>, types::ndarray>, types::ndarray>, types::ndarray>> unique(E const &expr, types::true_immediate return_index, types::true_immediate return_inverse, types::true_immediate return_counts); template std::tuple>, types::ndarray>, types::ndarray>> unique(E const &expr, types::true_immediate return_index, types::true_immediate return_inverse, types::false_immediate return_counts); template std::tuple>, types::ndarray>> unique(E const &expr, types::true_immediate return_index, types::false_immediate return_inverse, types::false_immediate return_counts); template std::tuple>, types::ndarray>, types::ndarray>> unique(E const &expr, types::true_immediate return_index, types::false_immediate return_inverse, types::true_immediate return_counts); template std::tuple>, types::ndarray>> unique(E const &expr, types::false_immediate return_index, types::true_immediate return_inverse, types::false_immediate return_counts); template std::tuple>, types::ndarray>, types::ndarray>> unique(E const &expr, types::false_immediate return_index, types::true_immediate return_inverse, types::true_immediate return_counts); template types::ndarray> unique(E const &expr, types::false_immediate return_index, types::false_immediate return_inverse, types::false_immediate return_counts); template std::tuple>, types::ndarray>> unique(E const &expr, types::false_immediate return_index, types::false_immediate return_inverse, types::true_immediate return_counts); DEFINE_FUNCTOR(pythonic::numpy, unique) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/unravel_index.hpp000066400000000000000000000010361416264035500254400ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_UNRAVEL_INDEX_HPP #define PYTHONIC_INCLUDE_NUMPY_UNRAVEL_INDEX_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/str.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if::value, types::array::value>>::type unravel_index(E const &expr, S const &shape, types::str const &order = "C"); DEFINE_FUNCTOR(pythonic::numpy, unravel_index); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/unwrap.hpp000066400000000000000000000010241416264035500241060ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_UNWRAP_HPP #define PYTHONIC_INCLUDE_NUMPY_UNWRAP_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/int_.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/numpy/pi.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray unwrap(E const &expr, double discont = pi); DEFINE_FUNCTOR(pythonic::numpy, unwrap) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/ushort.hpp000066400000000000000000000012151416264035500241200ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_USHORT_HPP #define PYTHONIC_INCLUDE_NUMPY_USHORT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { unsigned short ushort(); template unsigned short ushort(V v); } #define NUMPY_NARY_FUNC_NAME ushort #define NUMPY_NARY_FUNC_SYM details::ushort #define NUMPY_NARY_EXTRA_METHOD using type = unsigned short; #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/var.hpp000066400000000000000000000022221416264035500233630ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_VAR_HPP #define PYTHONIC_INCLUDE_NUMPY_VAR_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/builtins/None.hpp" #include "pythonic/include/numpy/add.hpp" #include "pythonic/include/numpy/mean.hpp" #include "pythonic/include/numpy/sum.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { template using var_type = typename std::conditional< std::is_integral::value, double, decltype(std::real(std::declval()))>::type; template auto var(E const &expr, types::none_type axis = builtins::None, types::none_type dtype = builtins::None, types::none_type out = builtins::None, long ddof = 0) -> decltype(var_type(std::real(mean(expr)))); template auto var(E const &expr, long axis, types::none_type dtype = builtins::None, types::none_type out = builtins::None, long ddof = 0) -> typename assignable() * mean(expr, axis))>::type; DEFINE_FUNCTOR(pythonic::numpy, var); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/vdot.hpp000066400000000000000000000010611416264035500235470ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_VDOT_HPP #define PYTHONIC_INCLUDE_NUMPY_VDOT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/numpy/dot.hpp" #include "pythonic/include/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto vdot(U const &u, V const &v) -> decltype(functor::dot{}(functor::asarray{}(u).flat(), functor::asarray{}(v).flat())); DEFINE_FUNCTOR(pythonic::numpy, vdot); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/vstack.hpp000066400000000000000000000021061416264035500240670ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_VSTACK_HPP #define PYTHONIC_INCLUDE_NUMPY_VSTACK_HPP #include PYTHONIC_NS_BEGIN namespace numpy { namespace impl { template using vstack_helper = decltype(concatenate(std::declval(), 0)); } template auto vstack(ArraySequence &&seq) -> typename std::enable_if<(impl::vstack_helper::value > 1), impl::vstack_helper>::type; // according to the numpy.vstack doc: // Equivalent to ``np.concatenate(tup, axis=0)`` if `tup` contains arrays // that // are at least 2-dimensional. // // the enable if is there to match this behavior template auto vstack(ArraySequence &&seq) -> typename std::enable_if< (impl::vstack_helper::value == 1), decltype(std::declval>().reshape( std::declval>()))>::type; DEFINE_FUNCTOR(pythonic::numpy, vstack); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/where.hpp000066400000000000000000000020521416264035500237060ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_WHERE_HPP #define PYTHONIC_INCLUDE_NUMPY_WHERE_HPP #include "pythonic/include/numpy/asarray.hpp" #include "pythonic/include/numpy/nonzero.hpp" #include "pythonic/include/numpy/copy.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace impl { template typename __combined::type where(E const &cond, F const &true_, G const &false_); } #define NUMPY_NARY_EXTRA_METHOD \ template \ auto operator()(E && expr)->decltype(nonzero{}(std::forward(expr))) \ { \ return nonzero{}(std::forward(expr)); \ } #define NUMPY_NARY_FUNC_NAME where #define NUMPY_NARY_FUNC_SYM impl::where #define NUMPY_NARY_RESHAPE_MODE reshape_type #include "pythonic/include/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/zeros.hpp000066400000000000000000000015211416264035500237360ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ZEROS_HPP #define PYTHONIC_INCLUDE_NUMPY_ZEROS_HPP #include "pythonic/include/numpy/float64.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> zeros(pS const &shape, dtype d = dtype()); template types::ndarray> zeros(long size, dtype d = dtype()); template types::ndarray>> zeros(std::integral_constant, dtype d = dtype()); DEFINE_FUNCTOR(pythonic::numpy, zeros); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/numpy/zeros_like.hpp000066400000000000000000000011731416264035500247450ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_NUMPY_ZEROSLIKE_HPP #define PYTHONIC_INCLUDE_NUMPY_ZEROSLIKE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/zeros.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto zeros_like(E const &expr, dtype d = dtype()) -> decltype(zeros(sutils::getshape(expr), d)); template auto zeros_like(E const &expr, types::none_type d = builtins::None) -> decltype(zeros(sutils::getshape(expr), types::dtype_t())); DEFINE_FUNCTOR(pythonic::numpy, zeros_like) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/omp/000077500000000000000000000000001416264035500215075ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/omp/get_num_threads.hpp000066400000000000000000000004571416264035500253760ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OMP_GET_NUM_THREADS_HPP #define PYTHONIC_INCLUDE_OMP_GET_NUM_THREADS_HPP #include #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace omp { long get_num_threads(); DEFINE_FUNCTOR(pythonic::omp, get_num_threads); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/omp/get_thread_num.hpp000066400000000000000000000004521416264035500252060ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OMP_GET_THREAD_NUM_HPP #define PYTHONIC_INCLUDE_OMP_GET_THREAD_NUM_HPP #include #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace omp { long get_thread_num(); DEFINE_FUNCTOR(pythonic::omp, get_thread_num); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/omp/get_wtick.hpp000066400000000000000000000004251416264035500242010ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OMP_GET_WTICK_HPP #define PYTHONIC_INCLUDE_OMP_GET_WTICK_HPP #include #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace omp { long get_wtick(); DEFINE_FUNCTOR(pythonic::omp, get_wtick); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/omp/get_wtime.hpp000066400000000000000000000004261416264035500242060ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OMP_GET_WTIME_HPP #define PYTHONIC_INCLUDE_OMP_GET_WTIME_HPP #include #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace omp { long get_wtime(); DEFINE_FUNCTOR(pythonic::omp, get_wtime); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/omp/in_parallel.hpp000066400000000000000000000004351416264035500245040ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OMP_IN_PARALLEL_HPP #define PYTHONIC_INCLUDE_OMP_IN_PARALLEL_HPP #include #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace omp { bool in_parallel(); DEFINE_FUNCTOR(pythonic::omp, in_parallel); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/omp/set_nested.hpp000066400000000000000000000004421416264035500243550ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OMP_SET_NESTED_HPP #define PYTHONIC_INCLUDE_OMP_SET_NESTED_HPP #include #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace omp { void set_nested(long val); DEFINE_FUNCTOR(pythonic::omp, set_nested); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/000077500000000000000000000000001416264035500227065ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__abs__.hpp000066400000000000000000000004501416264035500247570ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_ABS__HPP #define PYTHONIC_INCLUDE_OPERATOR_ABS__HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/builtins/abs.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__abs__, builtins::functor::abs); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__add__.hpp000066400000000000000000000004261416264035500247450ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_ADD__HPP #define PYTHONIC_INCLUDE_OPERATOR_ADD__HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/operator_/add.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__add__, add); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__and__.hpp000066400000000000000000000003531416264035500247560ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_AND__HPP #define PYTHONIC_INCLUDE_OPERATOR_AND__HPP #include "pythonic/include/operator_/and_.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__and__, and_); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__concat__.hpp000066400000000000000000000003701416264035500254620ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_CONCAT__HPP #define PYTHONIC_INCLUDE_OPERATOR_CONCAT__HPP #include "pythonic/include/operator_/concat.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__concat__, concat); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__contains__.hpp000066400000000000000000000004011416264035500260240ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_CONTAINS__HPP #define PYTHONIC_INCLUDE_OPERATOR_CONTAINS__HPP #include "pythonic/include/operator_/contains.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__contains__, contains); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__delitem__.hpp000066400000000000000000000003751416264035500256430ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_DELITEM__HPP #define PYTHONIC_INCLUDE_OPERATOR_DELITEM__HPP #include "pythonic/include/operator_/delitem.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__delitem__, delitem); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__div__.hpp000066400000000000000000000003511416264035500247740ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_DIV__HPP #define PYTHONIC_INCLUDE_OPERATOR_DIV__HPP #include "pythonic/include/operator_/div.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__div__, div); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__eq__.hpp000066400000000000000000000003441416264035500246210ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_EQ__HPP #define PYTHONIC_INCLUDE_OPERATOR_EQ__HPP #include "pythonic/include/operator_/eq.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__eq__, eq); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__floordiv__.hpp000066400000000000000000000004011416264035500260320ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_FLOORDIV__HPP #define PYTHONIC_INCLUDE_OPERATOR_FLOORDIV__HPP #include "pythonic/include/operator_/floordiv.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__floordiv__, floordiv); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__ge__.hpp000066400000000000000000000003431416264035500246060ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_GE__HPP #define PYTHONIC_INCLUDE_OPERATOR_GE__HPP #include "pythonic/include/operator_/ge.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__ge__, ge); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__getitem__.hpp000066400000000000000000000003741416264035500256550ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_GETITEM__HPP #define PYTHONIC_INCLUDE_OPERATOR_GETITEM__HPP #include "pythonic/include/operator_/getitem.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__getitem__, getitem); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__gt__.hpp000066400000000000000000000003431416264035500246250ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_GT__HPP #define PYTHONIC_INCLUDE_OPERATOR_GT__HPP #include "pythonic/include/operator_/gt.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__gt__, gt); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__iadd__.hpp000066400000000000000000000003551416264035500251170ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_IADD__HPP #define PYTHONIC_INCLUDE_OPERATOR_IADD__HPP #include "pythonic/include/operator_/iadd.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__iadd__, iadd); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__iand__.hpp000066400000000000000000000003561416264035500251320ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_IAND__HPP #define PYTHONIC_INCLUDE_OPERATOR_IAND__HPP #include "pythonic/include/operator_/iand.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__iand__, iand); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__iconcat__.hpp000066400000000000000000000003751416264035500256400ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_ICONCAT__HPP #define PYTHONIC_INCLUDE_OPERATOR_ICONCAT__HPP #include "pythonic/include/operator_/iconcat.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__iconcat__, iconcat); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__idiv__.hpp000066400000000000000000000003561416264035500251520ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_IDIV__HPP #define PYTHONIC_INCLUDE_OPERATOR_IDIV__HPP #include "pythonic/include/operator_/idiv.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__idiv__, idiv); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__ifloordiv__.hpp000066400000000000000000000004061416264035500262100ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_IFLOORDIV__HPP #define PYTHONIC_INCLUDE_OPERATOR_IFLOORDIV__HPP #include "pythonic/include/operator_/ifloordiv.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__ifloordiv__, ifloordiv); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__ilshift__.hpp000066400000000000000000000003751416264035500256620ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_ILSHIFT__HPP #define PYTHONIC_INCLUDE_OPERATOR_ILSHIFT__HPP #include "pythonic/include/operator_/ilshift.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__ilshift__, ilshift); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__imod__.hpp000066400000000000000000000003561416264035500251470ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_IMOD__HPP #define PYTHONIC_INCLUDE_OPERATOR_IMOD__HPP #include "pythonic/include/operator_/imod.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__imod__, imod); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__imul__.hpp000066400000000000000000000003551416264035500251640ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_IMUL__HPP #define PYTHONIC_INCLUDE_OPERATOR_IMUL__HPP #include "pythonic/include/operator_/imul.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__imul__, imul); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__inv__.hpp000066400000000000000000000003561416264035500250130ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_INV__HPP #define PYTHONIC_INCLUDE_OPERATOR_INV__HPP #include "pythonic/include/operator_/invert.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__inv__, invert); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__invert__.hpp000066400000000000000000000003671416264035500255300ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_INVERT__HPP #define PYTHONIC_INCLUDE_OPERATOR_INVERT__HPP #include "pythonic/include/operator_/invert.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__invert__, invert); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__ior__.hpp000066400000000000000000000003501416264035500250020ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_IOR__HPP #define PYTHONIC_INCLUDE_OPERATOR_IOR__HPP #include "pythonic/include/operator_/ior.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__ior__, ior); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__ipow__.hpp000066400000000000000000000003551416264035500251740ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_IPOW__HPP #define PYTHONIC_INCLUDE_OPERATOR_IPOW__HPP #include "pythonic/include/operator_/ipow.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__ipow__, ipow); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__irshift__.hpp000066400000000000000000000003741416264035500256670ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_IRSHIFT__HPP #define PYTHONIC_INCLUDE_OPERATOR_IRSHIFT__HPP #include "pythonic/include/operator_/irshift.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__irshift__, irshift); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__isub__.hpp000066400000000000000000000003551416264035500251600ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_ISUB__HPP #define PYTHONIC_INCLUDE_OPERATOR_ISUB__HPP #include "pythonic/include/operator_/isub.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__isub__, isub); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__itruediv__.hpp000066400000000000000000000004011416264035500260410ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_ITRUEDIV__HPP #define PYTHONIC_INCLUDE_OPERATOR_ITRUEDIV__HPP #include "pythonic/include/operator_/itruediv.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__itruediv__, itruediv); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__ixor__.hpp000066400000000000000000000003551416264035500251770ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_IXOR__HPP #define PYTHONIC_INCLUDE_OPERATOR_IXOR__HPP #include "pythonic/include/operator_/ixor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__ixor__, ixor); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__le__.hpp000066400000000000000000000003431416264035500246130ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_LE__HPP #define PYTHONIC_INCLUDE_OPERATOR_LE__HPP #include "pythonic/include/operator_/le.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__le__, le); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__lshift__.hpp000066400000000000000000000003671416264035500255120ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_LSHIFT__HPP #define PYTHONIC_INCLUDE_OPERATOR_LSHIFT__HPP #include "pythonic/include/operator_/lshift.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__lshift__, lshift); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__lt__.hpp000066400000000000000000000003431416264035500246320ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_LT__HPP #define PYTHONIC_INCLUDE_OPERATOR_LT__HPP #include "pythonic/include/operator_/lt.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__lt__, lt); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__matmul__.hpp000066400000000000000000000003641416264035500255150ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_MATMUL__HPP #define PYTHONIC_INCLUDE_OPERATOR_MATMUL__HPP #include "pythonic/include/operator_/matmul.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__mul__, matmul); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__mod__.hpp000066400000000000000000000003501416264035500247700ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_MOD__HPP #define PYTHONIC_INCLUDE_OPERATOR_MOD__HPP #include "pythonic/include/operator_/mod.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__mod__, mod); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__mul__.hpp000066400000000000000000000003501416264035500250060ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_MUL__HPP #define PYTHONIC_INCLUDE_OPERATOR_MUL__HPP #include "pythonic/include/operator_/mul.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__mul__, mul); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__ne__.hpp000066400000000000000000000003431416264035500246150ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_NE__HPP #define PYTHONIC_INCLUDE_OPERATOR_NE__HPP #include "pythonic/include/operator_/ne.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__ne__, ne); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__neg__.hpp000066400000000000000000000003501416264035500247620ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_NEG__HPP #define PYTHONIC_INCLUDE_OPERATOR_NEG__HPP #include "pythonic/include/operator_/neg.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__neg__, neg); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__not__.hpp000066400000000000000000000003521416264035500250130ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_NOT__HPP #define PYTHONIC_INCLUDE_OPERATOR_NOT__HPP #include "pythonic/include/operator_/not_.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__not__, not_); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__or__.hpp000066400000000000000000000003451416264035500246350ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_OR__HPP #define PYTHONIC_INCLUDE_OPERATOR_OR__HPP #include "pythonic/include/operator_/or_.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__or__, or_); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__pos__.hpp000066400000000000000000000003501416264035500250120ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_POS__HPP #define PYTHONIC_INCLUDE_OPERATOR_POS__HPP #include "pythonic/include/operator_/pos.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__pos__, pos); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__rshift__.hpp000066400000000000000000000003671416264035500255200ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_RSHIFT__HPP #define PYTHONIC_INCLUDE_OPERATOR_RSHIFT__HPP #include "pythonic/include/operator_/rshift.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__rshift__, rshift); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__sub__.hpp000066400000000000000000000003501416264035500250020ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_SUB__HPP #define PYTHONIC_INCLUDE_OPERATOR_SUB__HPP #include "pythonic/include/operator_/sub.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__sub__, sub); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__truediv__.hpp000066400000000000000000000004511416264035500256750ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_TRUEDIV__HPP #define PYTHONIC_INCLUDE_OPERATOR_TRUEDIV__HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/operator_/truediv.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__truediv__, truediv) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/__xor__.hpp000066400000000000000000000004271416264035500250260ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_XOR__HPP #define PYTHONIC_INCLUDE_OPERATOR_XOR__HPP #include "pythonic/include/operator_/xor_.hpp" #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(__xor__, xor_); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/abs.hpp000066400000000000000000000004421416264035500241640ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_ABS_HPP #define PYTHONIC_INCLUDE_OPERATOR_ABS_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/builtins/abs.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(abs, builtins::functor::abs); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/add.hpp000066400000000000000000000007031416264035500241470ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_ADD_HPP #define PYTHONIC_INCLUDE_OPERATOR_ADD_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/operator_/overloads.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto add(A &&a, B &&b) -> decltype(std::forward(a) + std::forward(b)); DEFINE_ALL_OPERATOR_OVERLOADS_DECL(add, +) DEFINE_FUNCTOR(pythonic::operator_, add); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/and_.hpp000066400000000000000000000007061416264035500243230ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_AND_HPP #define PYTHONIC_INCLUDE_OPERATOR_AND_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/operator_/overloads.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto and_(A &&a, B &&b) -> decltype(std::forward(a) & std::forward(b)); DEFINE_ALL_OPERATOR_OVERLOADS_DECL(and_, &) DEFINE_FUNCTOR(pythonic::operator_, and_); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/concat.hpp000066400000000000000000000005641416264035500246730ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_CONCAT_HPP #define PYTHONIC_INCLUDE_OPERATOR_CONCAT_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto concat(A &&a, B &&b) -> decltype(std::forward(a) + std::forward(b)); DEFINE_FUNCTOR(pythonic::operator_, concat); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/contains.hpp000066400000000000000000000006521416264035500252400ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_CONTAINS_HPP #define PYTHONIC_INCLUDE_OPERATOR_CONTAINS_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/builtins/in.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto contains(A &&a, B &&b) -> decltype(in(std::forward(a), std::forward(b))); DEFINE_FUNCTOR(pythonic::operator_, contains); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/countOf.hpp000066400000000000000000000005211416264035500250320ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_COUNTOF_HPP #define PYTHONIC_INCLUDE_OPERATOR_COUNTOF_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace operator_ { template long countOf(A &&a, B &&b); DEFINE_FUNCTOR(pythonic::operator_, countOf); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/delitem.hpp000066400000000000000000000005661416264035500250510ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_DELITEM_HPP #define PYTHONIC_INCLUDE_OPERATOR_DELITEM_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/builtins/None.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template types::none_type delitem(A &&a, B &&b); DEFINE_FUNCTOR(pythonic::operator_, delitem); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/div.hpp000066400000000000000000000012101416264035500241730ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_DIV_HPP #define PYTHONIC_INCLUDE_OPERATOR_DIV_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/operator_/overloads.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto div(A &&a, B &&b) // for ndarrays -> typename std::enable_if< !std::is_fundamental::type>::value || !std::is_fundamental::type>::value, decltype(std::forward(a) / std::forward(b))>::type; double div(double a, double b); DEFINE_FUNCTOR(pythonic::operator_, div); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/eq.hpp000066400000000000000000000006101416264035500240210ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_EQ_HPP #define PYTHONIC_INCLUDE_OPERATOR_EQ_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto eq(A &&a, B &&b) -> decltype(std::forward(a) == std::forward(b)); bool eq(char const *a, char const *b); DEFINE_FUNCTOR(pythonic::operator_, eq); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/floordiv.hpp000066400000000000000000000004751416264035500252510ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_FLOORDIV_HPP #define PYTHONIC_INCLUDE_OPERATOR_FLOORDIV_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/floor_divide.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(floordiv, numpy::functor::floor_divide); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/ge.hpp000066400000000000000000000006061416264035500240140ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_GE_HPP #define PYTHONIC_INCLUDE_OPERATOR_GE_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto ge(A &&a, B &&b) -> decltype(std::forward(a) >= std::forward(b)); bool ge(char const *, char const *); DEFINE_FUNCTOR(pythonic::operator_, ge); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/getitem.hpp000066400000000000000000000005661416264035500250640ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_GETITEM_HPP #define PYTHONIC_INCLUDE_OPERATOR_GETITEM_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto getitem(A &&a, B &&b) -> decltype(std::forward(a)[std::forward(b)]); DEFINE_FUNCTOR(pythonic::operator_, getitem); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/gt.hpp000066400000000000000000000006051416264035500240320ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_GT_HPP #define PYTHONIC_INCLUDE_OPERATOR_GT_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto gt(A &&a, B &&b) -> decltype(std::forward(a) > std::forward(b)); bool gt(char const *, char const *); DEFINE_FUNCTOR(pythonic::operator_, gt); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/iadd.hpp000066400000000000000000000013601416264035500243200ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_IADD_HPP #define PYTHONIC_INCLUDE_OPERATOR_IADD_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/list.hpp" #include "pythonic/include/types/set.hpp" #include "pythonic/include/types/dict.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto iadd(types::empty_list, types::list const &b) -> decltype(b); template auto iadd(types::empty_dict, types::dict const &b) -> decltype(b); template auto iadd(types::empty_set, types::set const &b) -> decltype(b); } PYTHONIC_NS_END #define OPERATOR_NAME iadd #define OPERATOR_SYMBOL + #define OPERATOR_ISYMBOL += #include "pythonic/include/operator_/icommon.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/iand.hpp000066400000000000000000000003421416264035500243310ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_IAND_HPP #define PYTHONIC_INCLUDE_OPERATOR_IAND_HPP #define OPERATOR_NAME iand #define OPERATOR_SYMBOL & #define OPERATOR_ISYMBOL &= #include "pythonic/include/operator_/icommon.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/icommon.hpp000066400000000000000000000020401416264035500250540ustar00rootroot00000000000000#ifndef OPERATOR_NAME #error OPERATOR_NAME ! defined #endif #ifndef OPERATOR_SYMBOL #error OPERATOR_SYMBOL ! defined #endif #ifndef OPERATOR_ISYMBOL #error OPERATOR_ISYMBOL ! defined #endif #include "pythonic/utils/functor.hpp" #ifdef USE_XSIMD #include #endif PYTHONIC_NS_BEGIN namespace operator_ { template auto OPERATOR_NAME(bool, A &&a, B &&b, ...) -> decltype(std::forward(a) OPERATOR_SYMBOL std::forward(b)); template auto OPERATOR_NAME(bool, A &&a, B &&b, std::nullptr_t) -> decltype(std::forward(a) OPERATOR_ISYMBOL std::forward(b)); template auto OPERATOR_NAME(A &&a, B &&b) -> decltype(OPERATOR_NAME(true, std::forward(a), std::forward(b), nullptr)) { return OPERATOR_NAME(true, std::forward(a), std::forward(b), nullptr); } DEFINE_FUNCTOR(pythonic::operator_, OPERATOR_NAME); } PYTHONIC_NS_END #undef OPERATOR_NAME #undef OPERATOR_SYMBOL #undef OPERATOR_ISYMBOL pythran-0.10.0+ds2/pythran/pythonic/include/operator_/iconcat.hpp000066400000000000000000000013321416264035500250360ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_ICONCAT_HPP #define PYTHONIC_INCLUDE_OPERATOR_ICONCAT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/list.hpp" #include "pythonic/include/types/set.hpp" #include "pythonic/include/types/dict.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template A iconcat(A a, B const &b); template auto iconcat(types::empty_list a, types::list b) -> decltype(b); template auto iconcat(types::empty_dict a, types::dict b) -> decltype(b); template auto iconcat(types::empty_set a, types::set b) -> decltype(b); DEFINE_FUNCTOR(pythonic::operator_, iconcat); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/idiv.hpp000066400000000000000000000003421416264035500243510ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_IDIV_HPP #define PYTHONIC_INCLUDE_OPERATOR_IDIV_HPP #define OPERATOR_NAME idiv #define OPERATOR_SYMBOL / #define OPERATOR_ISYMBOL /= #include "pythonic/include/operator_/icommon.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/ifloordiv.hpp000066400000000000000000000007241416264035500254170ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_IFLOORDIV_HPP #define PYTHONIC_INCLUDE_OPERATOR_IFLOORDIV_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/operator_/mod.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template A ifloordiv(A &&a, B &&b); template auto ifloordiv(A const &a, B &&b) -> decltype((a - mod(a, b)) / b); DEFINE_FUNCTOR(pythonic::operator_, ifloordiv); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/ilshift.hpp000066400000000000000000000003551416264035500250640ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_ILSHIFT_HPP #define PYTHONIC_INCLUDE_OPERATOR_ILSHIFT_HPP #define OPERATOR_NAME ilshift #define OPERATOR_SYMBOL << #define OPERATOR_ISYMBOL <<= #include "pythonic/include/operator_/icommon.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/imatmul.hpp000066400000000000000000000006411416264035500250700ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_IMATMUL_HPP #define PYTHONIC_INCLUDE_OPERATOR_IMATMUL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/dot.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template A imatmul(A const &a, B &&b); template A imatmul(A &a, B &&b); DEFINE_FUNCTOR(pythonic::operator_, imatmul); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/imax.hpp000066400000000000000000000015421416264035500243570ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_IMAX_HPP #define PYTHONIC_INCLUDE_OPERATOR_IMAX_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/maximum.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto imax(A &&a, B &&b) -> typename std::enable_if< std::is_const::value || !std::is_assignable::value, decltype(numpy::functor::maximum{}(std::forward(a), std::forward(b)))>::type; template auto imax(A &&a, B &&b) -> typename std::enable_if< !std::is_const::value && std::is_assignable::value, decltype(a = numpy::functor::maximum{}(std::forward(a), std::forward(b)))>::type; DEFINE_FUNCTOR(pythonic::operator_, imax); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/imin.hpp000066400000000000000000000015421416264035500243550ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_IMIN_HPP #define PYTHONIC_INCLUDE_OPERATOR_IMIN_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/minimum.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto imin(A &&a, B &&b) -> typename std::enable_if< std::is_const::value || !std::is_assignable::value, decltype(numpy::functor::minimum{}(std::forward(a), std::forward(b)))>::type; template auto imin(A &&a, B &&b) -> typename std::enable_if< !std::is_const::value && std::is_assignable::value, decltype(a = numpy::functor::minimum{}(std::forward(a), std::forward(b)))>::type; DEFINE_FUNCTOR(pythonic::operator_, imin); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/imod.hpp000066400000000000000000000005511416264035500243500ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_IMOD_HPP #define PYTHONIC_INCLUDE_OPERATOR_IMOD_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template A &imod(A &a, B &&b); template A imod(A const &a, B &&b); DEFINE_FUNCTOR(pythonic::operator_, imod); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/imul.hpp000066400000000000000000000003421416264035500243640ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_IMUL_HPP #define PYTHONIC_INCLUDE_OPERATOR_IMUL_HPP #define OPERATOR_NAME imul #define OPERATOR_SYMBOL * #define OPERATOR_ISYMBOL *= #include "pythonic/include/operator_/icommon.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/indexOf.hpp000066400000000000000000000004751416264035500250210ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_INDEXOF_HPP #define PYTHONIC_INCLUDE_OPERATOR_INDEXOF_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template long indexOf(A &&a, B &&b); DEFINE_FUNCTOR(pythonic::operator_, indexOf); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/inv.hpp000066400000000000000000000004261416264035500242150ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_INV_HPP #define PYTHONIC_INCLUDE_OPERATOR_INV_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/operator_/invert.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(inv, invert); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/invert.hpp000066400000000000000000000005121416264035500247240ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_INVERT_HPP #define PYTHONIC_INCLUDE_OPERATOR_INVERT_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto invert(A &&a) -> decltype(~std::forward(a)); DEFINE_FUNCTOR(pythonic::operator_, invert); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/ior.hpp000066400000000000000000000003371416264035500242130ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_IOR_HPP #define PYTHONIC_INCLUDE_OPERATOR_IOR_HPP #define OPERATOR_NAME ior #define OPERATOR_SYMBOL | #define OPERATOR_ISYMBOL |= #include "pythonic/include/operator_/icommon.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/ipow.hpp000066400000000000000000000006261416264035500244010ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_IPOW_HPP #define PYTHONIC_INCLUDE_OPERATOR_IPOW_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/builtins/pow.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template A ipow(A const &a, B &&b); template A &ipow(A &a, B &&b); DEFINE_FUNCTOR(pythonic::operator_, ipow); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/irshift.hpp000066400000000000000000000003551416264035500250720ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_IRSHIFT_HPP #define PYTHONIC_INCLUDE_OPERATOR_IRSHIFT_HPP #define OPERATOR_NAME irshift #define OPERATOR_SYMBOL >> #define OPERATOR_ISYMBOL >>= #include "pythonic/include/operator_/icommon.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/is_.hpp000066400000000000000000000007161416264035500241750ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_IS_HPP #define PYTHONIC_INCLUDE_OPERATOR_IS_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/builtins/id.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto is_(A &&a, B &&b) -> decltype(builtins::id(std::forward(a)) == builtins::id(std::forward(b))); DEFINE_FUNCTOR(pythonic::operator_, is_); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/is_not.hpp000066400000000000000000000006611416264035500247150ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_ISNOT_HPP #define PYTHONIC_INCLUDE_OPERATOR_ISNOT_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto is_not(A &&a, B &&b) -> decltype(builtins::id(std::forward(a)) != builtins::id(std::forward(b))); DEFINE_FUNCTOR(pythonic::operator_, is_not); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/isub.hpp000066400000000000000000000003421416264035500243600ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_ISUB_HPP #define PYTHONIC_INCLUDE_OPERATOR_ISUB_HPP #define OPERATOR_NAME isub #define OPERATOR_SYMBOL - #define OPERATOR_ISYMBOL -= #include "pythonic/include/operator_/icommon.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/itemgetter.hpp000066400000000000000000000024001416264035500255640ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_ITEMGETTER_HPP #define PYTHONIC_INCLUDE_OPERATOR_ITEMGETTER_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/tuple.hpp" #include "pythonic/include/utils/int_.hpp" PYTHONIC_NS_BEGIN namespace operator_ { struct itemgetter_return { long i; itemgetter_return(long const &item = -1); template auto operator()(A const &a) const -> decltype(a[i]); }; itemgetter_return itemgetter(long item); template struct itemgetter_tuple_return { std::tuple items; itemgetter_tuple_return(Types... items); itemgetter_tuple_return(); template void helper(T &t, A const &a, utils::int_) const; template void helper(T &t, A const &a, utils::int_<0>) const; template auto operator()(A const &a) const -> std::tuple()])>::type>::type...>; }; template itemgetter_tuple_return itemgetter(long const &item1, long const &item2, L... items); DEFINE_FUNCTOR(pythonic::operator_, itemgetter); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/itruediv.hpp000066400000000000000000000014551416264035500252570ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_ITRUEDIV_HPP #define PYTHONIC_INCLUDE_OPERATOR_ITRUEDIV_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/operator_/truediv.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto itruediv(A const &a, B &&b) -> decltype(truediv(a, std::forward(b))); template auto itruediv(A &a, B &&b) -> typename std::enable_if< std::is_same(b)))>::value, A &>::type; template auto itruediv(A &a, B &&b) -> typename std::enable_if< !std::is_same(b)))>::value, decltype(truediv(a, std::forward(b)))>::type; DEFINE_FUNCTOR(pythonic::operator_, itruediv); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/ixor.hpp000066400000000000000000000003421416264035500243770ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_IXOR_HPP #define PYTHONIC_INCLUDE_OPERATOR_IXOR_HPP #define OPERATOR_NAME ixor #define OPERATOR_SYMBOL ^ #define OPERATOR_ISYMBOL ^= #include "pythonic/include/operator_/icommon.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/le.hpp000066400000000000000000000006161416264035500240220ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_LE_HPP #define PYTHONIC_INCLUDE_OPERATOR_LE_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto le(A &&a, B &&b) -> decltype(std::forward(a) <= std::forward(b)); bool le(char const *self, char const *other); DEFINE_FUNCTOR(pythonic::operator_, le); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/lshift.hpp000066400000000000000000000007341416264035500247140ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_LSHIFT_HPP #define PYTHONIC_INCLUDE_OPERATOR_LSHIFT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/operator_/overloads.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto lshift(A &&a, B &&b) -> decltype(std::forward(a) << std::forward(b)); DEFINE_ALL_OPERATOR_OVERLOADS_DECL(lshift, << ) DEFINE_FUNCTOR(pythonic::operator_, lshift); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/lt.hpp000066400000000000000000000006161416264035500240410ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_LT_HPP #define PYTHONIC_INCLUDE_OPERATOR_LT_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto lt(A &&a, B &&b) -> decltype(std::forward(a) < std::forward(b)); bool lt(char const *self, char const *other); DEFINE_FUNCTOR(pythonic::operator_, lt); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/matmul.hpp000066400000000000000000000007341416264035500247220ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_MATMUL_HPP #define PYTHONIC_INCLUDE_OPERATOR_MATMUL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/numpy/dot.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto matmul(A &&a, B &&b) -> decltype(numpy::functor::dot{}(std::forward(a), std::forward(b))); DEFINE_FUNCTOR(pythonic::operator_, matmul); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/mod.hpp000066400000000000000000000016361416264035500242040ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_MOD_HPP #define PYTHONIC_INCLUDE_OPERATOR_MOD_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto mod(A &&a, B &&b) -> typename std::enable_if< std::is_fundamental::type>::value && std::is_fundamental::type>::value, decltype(std::forward(a) % std::forward(b))>::type; inline double mod(double a, long b); inline double mod(double a, double b); template auto mod(A &&a, B &&b) // for ndarrays -> typename std::enable_if< !std::is_fundamental::type>::value || !std::is_fundamental::type>::value, decltype(std::forward(a) % std::forward(b))>::type; DEFINE_FUNCTOR(pythonic::operator_, mod); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/mul.hpp000066400000000000000000000007041416264035500242150ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_MUL_HPP #define PYTHONIC_INCLUDE_OPERATOR_MUL_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/operator_/overloads.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto mul(A &&a, B &&b) -> decltype(std::forward(a) * std::forward(b)); DEFINE_ALL_OPERATOR_OVERLOADS_DECL(mul, *) DEFINE_FUNCTOR(pythonic::operator_, mul); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/ne.hpp000066400000000000000000000006111416264035500240170ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_NE_HPP #define PYTHONIC_INCLUDE_OPERATOR_NE_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto ne(A &&a, B &&b) -> decltype(std::forward(a) != std::forward(b)); bool ne(char const *a, char const *b); DEFINE_FUNCTOR(pythonic::operator_, ne); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/neg.hpp000066400000000000000000000004761416264035500241770ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_NEG_HPP #define PYTHONIC_INCLUDE_OPERATOR_NEG_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto neg(A &&a) -> decltype(-std::forward(a)); DEFINE_FUNCTOR(pythonic::operator_, neg); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/not_.hpp000066400000000000000000000005741416264035500243640ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_NOT_HPP #define PYTHONIC_INCLUDE_OPERATOR_NOT_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto not_(T &&a) -> decltype(!std::forward(a)); template bool not_(std::complex const &a); DEFINE_FUNCTOR(pythonic::operator_, not_); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/or_.hpp000066400000000000000000000007031416264035500241760ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_OR_HPP #define PYTHONIC_INCLUDE_OPERATOR_OR_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/operator_/overloads.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto or_(A &&a, B &&b) -> decltype(std::forward(a) | std::forward(b)); DEFINE_ALL_OPERATOR_OVERLOADS_DECL(or_, | ) DEFINE_FUNCTOR(pythonic::operator_, or_); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/overloads.hpp000066400000000000000000000025601416264035500254200ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_OVERLOADS_HPP #define PYTHONIC_INCLUDE_OPERATOR_OVERLOADS_HPP #define PYTHONIC_OPERATOR_OVERLOAD_DECL(type, opname, op) \ type opname(type a, type b); // workaround the fact that char and short computations are done using int in C, // while they are done at their respective type in numpy #define DEFINE_ALL_OPERATOR_OVERLOADS_DECL(opname, op) \ PYTHONIC_OPERATOR_OVERLOAD_DECL(bool, opname, op) \ PYTHONIC_OPERATOR_OVERLOAD_DECL(unsigned char, opname, op) \ PYTHONIC_OPERATOR_OVERLOAD_DECL(char, opname, op) \ PYTHONIC_OPERATOR_OVERLOAD_DECL(signed char, opname, op) \ PYTHONIC_OPERATOR_OVERLOAD_DECL(unsigned short, opname, op) \ PYTHONIC_OPERATOR_OVERLOAD_DECL(signed short, opname, op) \ PYTHONIC_OPERATOR_OVERLOAD_DECL(unsigned int, opname, op) \ PYTHONIC_OPERATOR_OVERLOAD_DECL(signed int, opname, op) \ PYTHONIC_OPERATOR_OVERLOAD_DECL(unsigned long, opname, op) \ PYTHONIC_OPERATOR_OVERLOAD_DECL(signed long, opname, op) \ PYTHONIC_OPERATOR_OVERLOAD_DECL(unsigned long long, opname, op) \ PYTHONIC_OPERATOR_OVERLOAD_DECL(signed long long, opname, op) #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/pos.hpp000066400000000000000000000004371416264035500242240ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_POS_HPP #define PYTHONIC_INCLUDE_OPERATOR_POS_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template A pos(A const &a); DEFINE_FUNCTOR(pythonic::operator_, pos); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/pow.hpp000066400000000000000000000003771416264035500242330ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_POW_HPP #define PYTHONIC_INCLUDE_OPERATOR_POW_HPP #include "pythonic/include/builtins/pow.hpp" PYTHONIC_NS_BEGIN namespace operator_ { USING_FUNCTOR(pow, pythonic::builtins::functor::pow); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/rshift.hpp000066400000000000000000000007331416264035500247210ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_RSHIFT_HPP #define PYTHONIC_INCLUDE_OPERATOR_RSHIFT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/operator_/overloads.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto rshift(A &&a, B &&b) -> decltype(std::forward(a) >> std::forward(b)); DEFINE_ALL_OPERATOR_OVERLOADS_DECL(rshift, >> ) DEFINE_FUNCTOR(pythonic::operator_, rshift); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/sub.hpp000066400000000000000000000007041416264035500242110ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_SUB_HPP #define PYTHONIC_INCLUDE_OPERATOR_SUB_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/operator_/overloads.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto sub(A &&a, B &&b) -> decltype(std::forward(a) - std::forward(b)); DEFINE_ALL_OPERATOR_OVERLOADS_DECL(sub, -) DEFINE_FUNCTOR(pythonic::operator_, sub); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/truediv.hpp000066400000000000000000000005771416264035500251120ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_TRUEDIV_HPP #define PYTHONIC_INCLUDE_OPERATOR_TRUEDIV_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto truediv(A &&a, B &&b) -> decltype(std::forward(a) / (double)std::forward(b)); DEFINE_FUNCTOR(pythonic::operator_, truediv); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/truth.hpp000066400000000000000000000004271416264035500245700ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_TRUTH_HPP #define PYTHONIC_INCLUDE_OPERATOR_TRUTH_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { bool truth(bool const &a); DEFINE_FUNCTOR(pythonic::operator_, truth); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/operator_/xor_.hpp000066400000000000000000000007061416264035500243710ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OPERATOR_XOR_HPP #define PYTHONIC_INCLUDE_OPERATOR_XOR_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/operator_/overloads.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto xor_(A &&a, B &&b) -> decltype(std::forward(a) ^ std::forward(b)); DEFINE_ALL_OPERATOR_OVERLOADS_DECL(xor_, ^) DEFINE_FUNCTOR(pythonic::operator_, xor_); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/os/000077500000000000000000000000001416264035500213355ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/os/path/000077500000000000000000000000001416264035500222715ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/os/path/join.hpp000066400000000000000000000006021416264035500237370ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_OS_PATH_JOIN_HPP #define PYTHONIC_INCLUDE_OS_PATH_JOIN_HPP #include "pythonic/types/str.hpp" PYTHONIC_NS_BEGIN namespace os { namespace path { template T join(T &&head); template types::str join(T &&head, Types &&... tail); DEFINE_FUNCTOR(pythonic::os::path, join); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/random/000077500000000000000000000000001416264035500221745ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/random/choice.hpp000066400000000000000000000005541416264035500241430ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_RANDOM_CHOICE_HPP #define PYTHONIC_INCLUDE_RANDOM_CHOICE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/random/random.hpp" PYTHONIC_NS_BEGIN namespace random { template typename Seq::value_type choice(Seq const &seq); DEFINE_FUNCTOR(pythonic::random, choice); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/random/expovariate.hpp000066400000000000000000000005201416264035500252310ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_RANDOM_EXPOVARIATE_HPP #define PYTHONIC_INCLUDE_RANDOM_EXPOVARIATE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/random/random.hpp" PYTHONIC_NS_BEGIN namespace random { double expovariate(double l); DEFINE_FUNCTOR(pythonic::random, expovariate); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/random/gauss.hpp000066400000000000000000000005101416264035500240230ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_RANDOM_GAUSS_HPP #define PYTHONIC_INCLUDE_RANDOM_GAUSS_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/random/random.hpp" PYTHONIC_NS_BEGIN namespace random { double gauss(double mu, double sigma); DEFINE_FUNCTOR(pythonic::random, gauss); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/random/randint.hpp000066400000000000000000000005101416264035500243400ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_RANDOM_RANDINT_HPP #define PYTHONIC_INCLUDE_RANDOM_RANDINT_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/random/randrange.hpp" PYTHONIC_NS_BEGIN namespace random { long randint(long a, long b); DEFINE_FUNCTOR(pythonic::random, randint); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/random/random.hpp000066400000000000000000000005041416264035500241640ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_RANDOM_RANDOM_HPP #define PYTHONIC_INCLUDE_RANDOM_RANDOM_HPP #include "pythonic/include/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace random { static std::mt19937 __random_generator; double random(); DEFINE_FUNCTOR(pythonic::random, random); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/random/randrange.hpp000066400000000000000000000006651416264035500246550ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_RANDOM_RANDRANGE_HPP #define PYTHONIC_INCLUDE_RANDOM_RANDRANGE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/random/random.hpp" #include PYTHONIC_NS_BEGIN namespace random { long randrange(long stop); long randrange(long start, long stop); long randrange(long start, long stop, long step); DEFINE_FUNCTOR(pythonic::random, randrange) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/random/sample.hpp000066400000000000000000000010571416264035500241710ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_RANDOM_SAMPLE_HPP #define PYTHONIC_INCLUDE_RANDOM_SAMPLE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/random/random.hpp" #include "pythonic/include/types/list.hpp" PYTHONIC_NS_BEGIN namespace random { template types::list::type>:: type::iterator>::value_type> sample(Iterable &&s, size_t k); DEFINE_FUNCTOR(pythonic::random, sample); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/random/seed.hpp000066400000000000000000000006061416264035500236270ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_RANDOM_SEED_HPP #define PYTHONIC_INCLUDE_RANDOM_SEED_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/builtins/None.hpp" #include "pythonic/include/random/random.hpp" PYTHONIC_NS_BEGIN namespace random { types::none_type seed(long s); types::none_type seed(); DEFINE_FUNCTOR(pythonic::random, seed); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/random/shuffle.hpp000066400000000000000000000007461416264035500243500ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_RANDOM_SHUFFLE_HPP #define PYTHONIC_INCLUDE_RANDOM_SHUFFLE_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/random/random.hpp" #include "pythonic/include/types/NoneType.hpp" PYTHONIC_NS_BEGIN namespace random { template types::none_type shuffle(T &seq); template types::none_type shuffle(T &seq, function &&randf); DEFINE_FUNCTOR(pythonic::random, shuffle) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/random/uniform.hpp000066400000000000000000000005121416264035500243620ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_RANDOM_UNIFORM_HPP #define PYTHONIC_INCLUDE_RANDOM_UNIFORM_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/random/random.hpp" PYTHONIC_NS_BEGIN namespace random { double uniform(double a, double b); DEFINE_FUNCTOR(pythonic::random, uniform); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/scipy/000077500000000000000000000000001416264035500220435ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/scipy/special/000077500000000000000000000000001416264035500234635ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/scipy/special/binom.hpp000066400000000000000000000010721416264035500253000ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_SCIPY_SPECIAL_BINOM_HPP #define PYTHONIC_INCLUDE_SCIPY_SPECIAL_BINOM_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template double binom(T0 n, T1 k); } #define NUMPY_NARY_FUNC_NAME binom #define NUMPY_NARY_FUNC_SYM details::binom #include "pythonic/include/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/scipy/special/gamma.hpp000066400000000000000000000007561416264035500252660ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_SCIPY_SPECIAL_GAMMA_HPP #define PYTHONIC_INCLUDE_SCIPY_SPECIAL_GAMMA_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace scipy { namespace special { #define NUMPY_NARY_FUNC_NAME gamma #define NUMPY_NARY_FUNC_SYM xsimd::tgamma #include "pythonic/include/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/scipy/special/gammaln.hpp000066400000000000000000000007641416264035500256170ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_SCIPY_SPECIAL_GAMMALN_HPP #define PYTHONIC_INCLUDE_SCIPY_SPECIAL_GAMMALN_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace scipy { namespace special { #define NUMPY_NARY_FUNC_NAME gammaln #define NUMPY_NARY_FUNC_SYM xsimd::lgamma #include "pythonic/include/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/scipy/special/hankel1.hpp000066400000000000000000000012001416264035500255100ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_SCIPY_SPECIAL_HANKEL1_HPP #define PYTHONIC_INCLUDE_SCIPY_SPECIAL_HANKEL1_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/complex.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template std::complex hankel1(T0 x, T1 y); } #define NUMPY_NARY_FUNC_NAME hankel1 #define NUMPY_NARY_FUNC_SYM details::hankel1 #include "pythonic/include/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/scipy/special/hankel2.hpp000066400000000000000000000012001416264035500255110ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_SCIPY_SPECIAL_HANKEL2_HPP #define PYTHONIC_INCLUDE_SCIPY_SPECIAL_HANKEL2_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/types/complex.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template std::complex hankel2(T0 x, T1 y); } #define NUMPY_NARY_FUNC_NAME hankel2 #define NUMPY_NARY_FUNC_SYM details::hankel2 #include "pythonic/include/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/scipy/special/iv.hpp000066400000000000000000000010531416264035500246110ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_SCIPY_SPECIAL_IV_HPP #define PYTHONIC_INCLUDE_SCIPY_SPECIAL_IV_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template double iv(T0 x, T1 y); } #define NUMPY_NARY_FUNC_NAME iv #define NUMPY_NARY_FUNC_SYM details::iv #include "pythonic/include/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/scipy/special/ivp.hpp000066400000000000000000000010601416264035500247670ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_SCIPY_SPECIAL_IVP_HPP #define PYTHONIC_INCLUDE_SCIPY_SPECIAL_IVP_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template double ivp(T0 x, T1 y); } #define NUMPY_NARY_FUNC_NAME ivp #define NUMPY_NARY_FUNC_SYM details::ivp #include "pythonic/include/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/scipy/special/jv.hpp000066400000000000000000000010531416264035500246120ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_SCIPY_SPECIAL_JV_HPP #define PYTHONIC_INCLUDE_SCIPY_SPECIAL_JV_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template double jv(T0 x, T1 y); } #define NUMPY_NARY_FUNC_NAME jv #define NUMPY_NARY_FUNC_SYM details::jv #include "pythonic/include/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/scipy/special/jvp.hpp000066400000000000000000000010601416264035500247700ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_SCIPY_SPECIAL_JVP_HPP #define PYTHONIC_INCLUDE_SCIPY_SPECIAL_JVP_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template double jvp(T0 x, T1 y); } #define NUMPY_NARY_FUNC_NAME jvp #define NUMPY_NARY_FUNC_SYM details::jvp #include "pythonic/include/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/scipy/special/kv.hpp000066400000000000000000000010531416264035500246130ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_SCIPY_SPECIAL_KV_HPP #define PYTHONIC_INCLUDE_SCIPY_SPECIAL_KV_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template double kv(T0 x, T1 y); } #define NUMPY_NARY_FUNC_NAME kv #define NUMPY_NARY_FUNC_SYM details::kv #include "pythonic/include/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/scipy/special/kvp.hpp000066400000000000000000000010601416264035500247710ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_SCIPY_SPECIAL_KVP_HPP #define PYTHONIC_INCLUDE_SCIPY_SPECIAL_KVP_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template double kvp(T0 x, T1 y); } #define NUMPY_NARY_FUNC_NAME kvp #define NUMPY_NARY_FUNC_SYM details::kvp #include "pythonic/include/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/scipy/special/spherical_jn.hpp000066400000000000000000000011661416264035500266410ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_SCIPY_SPECIAL_SPHERICAL_JN_HPP #define PYTHONIC_INCLUDE_SCIPY_SPECIAL_SPHERICAL_JN_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template double spherical_jn(T0 v, T1 x, bool derivative = false); } #define NUMPY_NARY_FUNC_NAME spherical_jn #define NUMPY_NARY_FUNC_SYM details::spherical_jn #include "pythonic/include/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/scipy/special/spherical_yn.hpp000066400000000000000000000011661416264035500266600ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_SCIPY_SPECIAL_SPHERICAL_YN_HPP #define PYTHONIC_INCLUDE_SCIPY_SPECIAL_SPHERICAL_YN_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template double spherical_yn(T0 v, T1 x, bool derivative = false); } #define NUMPY_NARY_FUNC_NAME spherical_yn #define NUMPY_NARY_FUNC_SYM details::spherical_yn #include "pythonic/include/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/scipy/special/yv.hpp000066400000000000000000000010531416264035500246310ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_SCIPY_SPECIAL_YV_HPP #define PYTHONIC_INCLUDE_SCIPY_SPECIAL_YV_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template double yv(T0 x, T1 y); } #define NUMPY_NARY_FUNC_NAME yv #define NUMPY_NARY_FUNC_SYM details::yv #include "pythonic/include/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/scipy/special/yvp.hpp000066400000000000000000000010601416264035500250070ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_SCIPY_SPECIAL_YVP_HPP #define PYTHONIC_INCLUDE_SCIPY_SPECIAL_YVP_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template double yvp(T0 x, T1 y); } #define NUMPY_NARY_FUNC_NAME yvp #define NUMPY_NARY_FUNC_SYM details::yvp #include "pythonic/include/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/string/000077500000000000000000000000001416264035500222225ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/string/ascii_letters.hpp000066400000000000000000000004561416264035500255720ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_STRING_ASCII_LETTERS_HPP #define PYTHONIC_INCLUDE_STRING_ASCII_LETTERS_HPP #include "pythonic/types/str.hpp" PYTHONIC_NS_BEGIN namespace string { types::str constexpr ascii_letters( "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/string/ascii_lowercase.hpp000066400000000000000000000004221416264035500260650ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_STRING_ASCII_LOWERCASE_HPP #define PYTHONIC_INCLUDE_STRING_ASCII_LOWERCASE_HPP #include "pythonic/types/str.hpp" PYTHONIC_NS_BEGIN namespace string { types::str constexpr ascii_lowercase("abcdefghijklmnopqrstuvwxyz"); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/string/ascii_uppercase.hpp000066400000000000000000000004221416264035500260700ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_STRING_ASCII_UPPERCASE_HPP #define PYTHONIC_INCLUDE_STRING_ASCII_UPPERCASE_HPP #include "pythonic/types/str.hpp" PYTHONIC_NS_BEGIN namespace string { types::str constexpr ascii_uppercase("ABCDEFGHIJKLMNOPQRSTUVWXYZ"); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/string/digits.hpp000066400000000000000000000003471416264035500242220ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_STRING_DIGITS_HPP #define PYTHONIC_INCLUDE_STRING_DIGITS_HPP #include "pythonic/types/str.hpp" PYTHONIC_NS_BEGIN namespace string { types::str constexpr digits("0123456789"); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/string/find.hpp000066400000000000000000000005301416264035500236510ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_STRING_FIND_HPP #define PYTHONIC_INCLUDE_STRING_FIND_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/str.hpp" PYTHONIC_NS_BEGIN namespace string { template long find(types::str const &s, T &&val); DEFINE_FUNCTOR(pythonic::string, find); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/string/hexdigits.hpp000066400000000000000000000003741416264035500247270ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_STRING_HEXDIGITS_HPP #define PYTHONIC_INCLUDE_STRING_HEXDIGITS_HPP #include "pythonic/types/str.hpp" PYTHONIC_NS_BEGIN namespace string { types::str constexpr hexdigits("0123456789abcdefABCDEF"); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/string/octdigits.hpp000066400000000000000000000003561416264035500247300ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_STRING_OCTDIGITS_HPP #define PYTHONIC_INCLUDE_STRING_OCTDIGITS_HPP #include "pythonic/types/str.hpp" PYTHONIC_NS_BEGIN namespace string { types::str constexpr octdigits("01234567"); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/time/000077500000000000000000000000001416264035500216525ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/time/sleep.hpp000066400000000000000000000005041416264035500234720ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TIME_SLEEP_HPP #define PYTHONIC_INCLUDE_TIME_SLEEP_HPP #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/types/NoneType.hpp" PYTHONIC_NS_BEGIN namespace time { types::none_type sleep(double const value); DEFINE_FUNCTOR(pythonic::time, sleep) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/time/time.hpp000066400000000000000000000003661416264035500233260ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TIME_TIME_HPP #define PYTHONIC_INCLUDE_TIME_TIME_HPP #include "pythonic/include/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace time { double time(); DEFINE_FUNCTOR(pythonic::time, time) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/000077500000000000000000000000001416264035500220605ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/types/NoneType.hpp000066400000000000000000000257341416264035500243450ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_NONE_HPP #define PYTHONIC_INCLUDE_TYPES_NONE_HPP #include "pythonic/include/types/assignable.hpp" #include "pythonic/include/operator_/mod.hpp" #include PYTHONIC_NS_BEGIN namespace types { static const intptr_t NONE_ID = 0x1331; struct none_type { none_type(); intptr_t id() const; }; std::ostream &operator<<(std::ostream &os, none_type const &) { return os << "None"; } template ::value> struct none; /* Type adapator to simulate an option type * * see http://en.wikipedia.org/wiki/Option_type */ template struct none : T { bool is_none; // set to true if the type is none none(none_type const &); none() : T(), is_none{true} { } none(none const &other) = default; none(T const &arg) : T(arg), is_none(false) { } template none(OT const &arg) : none(T(arg)) { } bool operator==(none_type const &) const; template bool operator==(O const &t) const; bool operator!=(none_type const &) const; template bool operator!=(O const &t) const; explicit operator bool() const; intptr_t id() const; template friend std::ostream &operator<<(std::ostream &os, none const &); }; /* specialization of none for integral types we cannot derive from */ template struct none_data { explicit operator bool() const { return !static_cast

(this)->is_none && static_cast

(this)->data; } operator T() const { return static_cast

(this)->data; } }; template struct none_data { operator bool() const { return !static_cast

(this)->is_none && static_cast

(this)->data; } }; template struct none : none_data, T> { T data; template friend std::ostream &operator<<(std::ostream &, none const &); template friend T1 operator+(none const &t0, T1 const &t1); template friend T1 operator+(T1 const &t0, none const &t1); template friend none operator+(none const &t0, none const &t1); template friend bool operator>(none const &t0, T1 const &t1); template friend bool operator>(T1 const &t0, none const &t1); template friend none operator>(none const &t0, none const &t1); template friend bool operator>=(none const &t0, T1 const &t1); template friend bool operator>=(T1 const &t0, none const &t1); template friend none operator>=(none const &t0, none const &t1); template friend bool operator<(none const &t0, T1 const &t1); template friend bool operator<(T1 const &t0, none const &t1); template friend none operator<(none const &t0, none const &t1); template friend bool operator<=(none const &t0, T1 const &t1); template friend bool operator<=(T1 const &t0, none const &t1); template friend none operator<=(none const &t0, none const &t1); template friend T1 operator-(none const &t0, T1 const &t1); template friend T1 operator-(T1 const &t0, none const &t1); template friend none operator-(none const &t0, none const &t1); template friend T1 operator*(none const &t0, T1 const &t1); template friend T1 operator*(T1 const &t0, none const &t1); template friend none operator*(none const &t0, none const &t1); template friend T1 operator/(none const &t0, T1 const &t1); template friend T1 operator/(T1 const &t0, none const &t1); template friend none operator/(none const &t0, none const &t1); template none &operator+=(T1 other); template none &operator-=(T1 other); template none &operator*=(T1 other); template none &operator/=(T1 other); public: bool is_none; none(); none(none_type const &); none(T const &data); bool operator==(none_type const &) const; template bool operator==(O const &t) const; bool operator!=(none_type const &) const; template bool operator!=(O const &t) const; T &operator=(T const &t); intptr_t id() const; template operator none() { if (is_none) return {none_type{}}; else return {static_cast(data)}; } }; template T operator+(none const &t0, T const &t1); template T operator+(T const &t0, none const &t1); template none operator+(none const &t0, none const &t1); template bool operator>(none const &t0, T const &t1); template bool operator>(T const &t0, none const &t1); template none operator>(none const &t0, none const &t1); template bool operator>=(none const &t0, T const &t1); template bool operator>=(T const &t0, none const &t1); template none operator>=(none const &t0, none const &t1); template bool operator<(none const &t0, T const &t1); template bool operator<(T const &t0, none const &t1); template none operator<(none const &t0, none const &t1); template bool operator<=(none const &t0, T const &t1); template bool operator<=(T const &t0, none const &t1); template none operator<=(none const &t0, none const &t1); template T operator-(none const &t0, T const &t1); template T operator-(T const &t0, none const &t1); template none operator-(none const &t0, none const &t1); template T operator*(none const &t0, T const &t1); template T operator*(T const &t0, none const &t1); template none operator*(none const &t0, none const &t1); template T operator/(none const &t0, T const &t1); template T operator/(T const &t0, none const &t1); template none operator/(none const &t0, none const &t1); template decltype(operator_::mod(std::declval(), std::declval())) operator%(none const &t0, T1 const &t1); template decltype(operator_::mod(std::declval(), std::declval())) operator%(T0 const &t0, none const &t1); template none(), std::declval())), true> operator%(none const &t0, none const &t1); template std::ostream &operator<<(std::ostream &os, none const &v); template struct is_none { static const bool value = false; }; template struct is_none> { static const bool value = true; }; } template struct assignable> { using type = types::none::type>; }; PYTHONIC_NS_END namespace std { /* std::get overload */ template auto get(pythonic::types::none const &t) -> decltype(std::get((T0 const &)t)); template struct tuple_element> { using type = typename std::tuple_element::type; }; } /* type inference stuff { */ #include "pythonic/include/types/combined.hpp" template struct __combined, T1> { static_assert(!pythonic::types::is_none::value, "none of none should'nt exist"); using type = pythonic::types::none::type>; }; template struct __combined> { static_assert(!pythonic::types::is_none::value, "none of none should'nt exist"); using type = pythonic::types::none::type>; }; template struct __combined, pythonic::types::none> { static_assert(!pythonic::types::is_none::value, "none of none shouldn't exist"); static_assert(!pythonic::types::is_none::value, "none of none shouldn't exist"); using type = pythonic::types::none::type>; }; template struct __combined { static_assert(!pythonic::types::is_none::value, "none of none shouldn't exist"); using type = pythonic::types::none; }; template struct __combined> { static_assert(!pythonic::types::is_none::value, "none of none shouldn't exist"); using type = pythonic::types::none; }; template struct __combined { static_assert(!pythonic::types::is_none::value, "none of none shouldn't exist"); using type = pythonic::types::none; }; template struct __combined, pythonic::types::none_type> { static_assert(!pythonic::types::is_none::value, "none of none shouldn't exist"); using type = pythonic::types::none; }; template <> struct __combined { using type = pythonic::types::none_type; }; /* } */ #ifdef ENABLE_PYTHON_MODULE #include "pythonic/python/core.hpp" PYTHONIC_NS_BEGIN template <> struct to_python { static PyObject *convert(types::none_type); }; template struct to_python> { static PyObject *convert(types::none const &n); }; template <> struct from_python { static bool is_convertible(PyObject *obj); static types::none_type convert(PyObject *obj); }; PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/assignable.hpp000066400000000000000000000024561416264035500247100ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_ASSIGNABLE_HPP #define PYTHONIC_INCLUDE_TYPES_ASSIGNABLE_HPP #include PYTHONIC_NS_BEGIN struct dummy { }; template struct assignable { using type = T; }; template struct assignable : assignable { }; template struct assignable : assignable { }; template struct assignable : assignable { }; template struct assignable : assignable { }; template struct lazy : assignable { }; // very conservative template struct assignable_noescape : assignable { }; template struct assignable_noescape : assignable_noescape { }; template struct assignable_noescape : assignable_noescape { }; template struct assignable_noescape : assignable_noescape { }; template struct assignable_noescape : assignable_noescape { }; template struct returnable : assignable { }; template struct returnable : assignable::type> { }; template struct returnable : assignable::type> { }; template struct returnable : assignable::type> { }; PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/attr.hpp000066400000000000000000000016661416264035500235540ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_ATTR_HPP #define PYTHONIC_INCLUDE_TYPES_ATTR_HPP PYTHONIC_NS_BEGIN namespace types { namespace attr { /* exception attributes */ struct ARGS { }; struct ERRNO { }; struct STRERROR { }; struct FILENAME { }; /* complex attributes */ struct REAL { }; struct IMAG { }; /* file attributes */ struct CLOSED { }; struct MODE { }; struct NAME { }; struct NEWLINES { }; /* fileinfo attributes */ struct EPS { }; /* ndarray attributes */ struct SHAPE { }; struct NDIM { }; struct STRIDES { }; struct SIZE { }; struct ITEMSIZE { }; struct NBYTES { }; struct FLAT { }; struct DTYPE { }; struct T { }; /* slice attributes */ struct START { }; struct STOP { }; struct STEP { }; /* */ } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/bool.hpp000066400000000000000000000006201416264035500235220ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_BOOL_HPP #define PYTHONIC_INCLUDE_TYPES_BOOL_HPP #ifdef ENABLE_PYTHON_MODULE #include "pythonic/python/core.hpp" PYTHONIC_NS_BEGIN template <> struct to_python { static PyObject *convert(bool b); }; template <> struct from_python { static bool is_convertible(PyObject *obj); static bool convert(PyObject *obj); }; PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/cfun.hpp000066400000000000000000000015541416264035500235310ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_CFUN_HPP #define PYTHONIC_INCLUDE_TYPES_CFUN_HPP PYTHONIC_NS_BEGIN namespace types { template struct cfun; template struct cfun { using callable = void; cfun(ReturnType (*fun)(ArgsType...)); ReturnType (*ptr)(ArgsType...); ReturnType operator()(ArgsType... args) const; }; } PYTHONIC_NS_END #ifdef ENABLE_PYTHON_MODULE #include "pythonic/python/core.hpp" PYTHONIC_NS_BEGIN template struct to_python> { static PyObject *convert(types::cfun const &v); }; template struct from_python> { static bool is_convertible(PyObject *obj); static types::cfun convert(PyObject *obj); }; PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/combined.hpp000066400000000000000000000210711416264035500243520ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_COMBINED_HPP #define PYTHONIC_INCLUDE_TYPES_COMBINED_HPP #include "pythonic/include/types/traits.hpp" PYTHONIC_NS_BEGIN namespace types { template struct variant_functor; } PYTHONIC_NS_END /* type inference stuff */ template struct __combined; template struct __combined { using type = T; }; template struct __combined { // This is less efficient that doing a binary split, but it's not equivalent // as the lhs dominates the rhs (a.k.a __combined is neither commutative nor // associative) using type = typename __combined::type, T2, Types...>::type; }; template struct __combined { // callable -> functor template static pythonic::types::variant_functor get(std::integral_constant); // operator+ exists -> deduce type template static decltype(std::declval() + std::declval()) get(std::integral_constant); // operator+ does not exists -> pick first one, better than error // note that this is needed because broadcasting is too complex to be modeled // by our clumsy type inference scheme // so we sometime endup with __combined, int> which // only makes sense when broadcasting // fortunately, broadcasting is only supported by ndarray, && we already // ignore __combined for ndarray // so the only thing to do in such situations is « ! throw an error » template static F0 get(...); using type = typename std::conditional< std::is_same::value, T0, decltype(get(std::integral_constant< bool, pythonic::types::is_callable::value && pythonic::types::is_callable::value>()))>::type; }; template struct __combined { using type = typename std::add_const::type>::type; }; template struct __combined { using type = typename std::add_const::type>::type; }; template struct __combined { using type = typename __combined::type; }; template struct __combined { using type = typename __combined::type; }; template struct __combined { using type = typename __combined::type; }; template struct __combined { using type = typename __combined::type; }; template struct __combined { using type = typename __combined::type; }; template struct __combined { using type = typename __combined::type; }; template struct __combined { using type = typename __combined::type; }; template struct __combined { using type = typename __combined::type; }; template struct __combined { using type = typename __combined::type; }; template struct __combined { using type = typename __combined::type; }; template struct __combined { using type = typename __combined::type; }; template struct __combined { using type = typename __combined::type; }; template struct __combined { using type = typename __combined::type; }; template struct __combined { using type = typename __combined::type; }; template struct __combined { using type = typename __combined::type; }; template struct __combined { using type = typename __combined::type; }; template struct __combined { using type = typename std::add_lvalue_reference< typename __combined::type>::type; }; template struct __combined { using type = typename std::add_rvalue_reference< typename __combined::type>::type; }; template struct __combined { using type = typename std::add_const::type>::type; }; template struct __combined { using type = typename std::add_lvalue_reference< typename std::add_const::type>::type>::type; }; template class container { public: using value_type = typename std::remove_cv::type>::type; private: container(); }; template class indexable_container { public: using key_type = typename std::remove_cv::type>::type; using value_type = typename std::remove_cv::type>::type; private: indexable_container(); }; template class dict_container { public: using value_type = typename std::remove_cv::type>::type; private: dict_container(); }; template class indexable { public: using type = typename std::remove_cv::type>::type; private: indexable(); }; template class indexable_dict { public: using type = typename std::remove_cv::type>::type; private: indexable_dict(); }; template struct __combined, indexable_container> { using type = indexable_container::type, typename __combined::type>; }; template struct __combined, indexable> { using type = indexable::type>; }; template struct __combined, container> { using type = indexable_container; }; template struct __combined, indexable> { using type = indexable_container; }; template struct __combined, container> { using type = indexable_container::type>; }; template struct __combined, indexable_container> { using type = indexable_container::type>; }; template struct __combined, indexable> { using type = indexable_container::type, V1>; }; template struct __combined, indexable_container> { using type = indexable_container::type, V1>; }; template struct __combined, container> { using type = container::type>; }; /* special handling for functors * as it's based on a trait, template specialization cannot be used * so we rely on operator+ specialization * { */ template struct __combined> { using type = pythonic::types::variant_functor; }; template struct __combined, T> { using type = pythonic::types::variant_functor; }; template struct __combined, pythonic::types::variant_functor> { using type = pythonic::types::variant_functor; }; /* } */ /* mimic numpy behavior { */ #define SCALAR_COMBINER(Type) \ template <> \ struct __combined { \ using type = Type; \ }; SCALAR_COMBINER(bool) SCALAR_COMBINER(uint8_t) SCALAR_COMBINER(int8_t) SCALAR_COMBINER(uint16_t) SCALAR_COMBINER(int16_t) SCALAR_COMBINER(uint32_t) SCALAR_COMBINER(int32_t) SCALAR_COMBINER(uint64_t) SCALAR_COMBINER(int64_t) #undef SCALAR_COMBINER #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/complex.hpp000066400000000000000000000131531416264035500242430ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_COMPLEX_HPP #define PYTHONIC_INCLUDE_TYPES_COMPLEX_HPP #include #if defined(_OPENMP) #pragma omp declare reduction(+ : std::complex < float > : omp_out += omp_in) #pragma omp declare reduction(* : std::complex < float > : omp_out *= omp_in) #pragma omp declare reduction(+ : std::complex < double > : omp_out += omp_in) #pragma omp declare reduction(* : std::complex < double > : omp_out *= omp_in) #pragma omp declare reduction(+ : std::complex < long double > : omp_out += \ omp_in) #pragma omp declare reduction(* : std::complex < long double > : omp_out *= \ omp_in) #endif PYTHONIC_NS_BEGIN namespace numpy { namespace functor { struct complex64; struct complex128; struct complex256; } } PYTHONIC_NS_END namespace std { template using complex_broadcast_t = typename std::enable_if< std::is_scalar::value && !std::is_same::value, std::complex::type>>::type; template using complex_bool_t = typename std::enable_if< std::is_scalar::value && !std::is_same::value, bool>::type; template complex_broadcast_t operator+(std::complex self, S other); template complex_broadcast_t operator+(S self, std::complex other); template complex_broadcast_t operator-(std::complex self, S other); template complex_broadcast_t operator-(S self, std::complex other); template complex_broadcast_t operator*(std::complex self, S other); template complex_broadcast_t operator*(S self, std::complex other); template complex_broadcast_t operator/(std::complex self, S other); template complex_broadcast_t operator/(S self, std::complex other); template complex_bool_t operator==(std::complex self, S other); template complex_bool_t operator==(S self, std::complex other); template complex_bool_t operator!=(std::complex self, S other); template complex_bool_t operator!=(S self, std::complex other); template bool operator<(std::complex self, std::complex other); template bool operator<=(std::complex self, std::complex other); template bool operator>(std::complex self, std::complex other); template bool operator>=(std::complex self, std::complex other); template bool operator&&(std::complex self, std::complex other); template bool operator||(std::complex self, std::complex other); template bool operator!(std::complex self); template struct hash> { size_t operator()(std::complex const &x) const; }; } PYTHONIC_NS_BEGIN namespace builtins { template T getattr(types::attr::REAL, std::complex const &self); template T getattr(types::attr::IMAG, std::complex const &self); numpy::functor::complex64 getattr(types::attr::DTYPE, std::complex const &self); numpy::functor::complex128 getattr(types::attr::DTYPE, std::complex const &self); numpy::functor::complex256 getattr(types::attr::DTYPE, std::complex const &self); } PYTHONIC_NS_END /* for type inference { */ #include "pythonic/include/types/combined.hpp" template struct __combined, std::complex> { using type = std::complex; }; template struct __combined, indexable> { using type = std::complex; }; template struct __combined, std::complex> { using type = std::complex::type>; }; /* } */ #define STD_COMPLEX_IMPLICT_OPERATOR_CAST(op) \ template \ auto operator op(std::complex const &lhs, std::complex const &rhs) \ ->std::complex::type> \ { \ using ctype = std::complex::type>; \ return ctype \ { \ lhs \ } \ op ctype{rhs}; \ } STD_COMPLEX_IMPLICT_OPERATOR_CAST(+) STD_COMPLEX_IMPLICT_OPERATOR_CAST(-) STD_COMPLEX_IMPLICT_OPERATOR_CAST(*) STD_COMPLEX_IMPLICT_OPERATOR_CAST(/ ) STD_COMPLEX_IMPLICT_OPERATOR_CAST(== ) STD_COMPLEX_IMPLICT_OPERATOR_CAST(!= ) #ifdef ENABLE_PYTHON_MODULE #include "pythonic/python/core.hpp" PYTHONIC_NS_BEGIN template struct to_python> { static PyObject *convert(std::complex const &c); }; template struct from_python> { static bool is_convertible(PyObject *obj); static std::complex convert(PyObject *obj); }; PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/complex128.hpp000066400000000000000000000002231416264035500244700ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_COMPLEX128_HPP #define PYTHONIC_INCLUDE_TYPES_COMPLEX128_HPP #include "pythonic/include/types/complex.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/complex256.hpp000066400000000000000000000002231416264035500244720ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_COMPLEX256_HPP #define PYTHONIC_INCLUDE_TYPES_COMPLEX256_HPP #include "pythonic/include/types/complex.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/complex64.hpp000066400000000000000000000002211416264035500244050ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_COMPLEX64_HPP #define PYTHONIC_INCLUDE_TYPES_COMPLEX64_HPP #include "pythonic/include/types/complex.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/dict.hpp000066400000000000000000000372051416264035500235230ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_DICT_HPP #define PYTHONIC_INCLUDE_TYPES_DICT_HPP #include "pythonic/include/types/assignable.hpp" #include "pythonic/include/types/tuple.hpp" #include "pythonic/include/types/empty_iterator.hpp" #include "pythonic/include/utils/shared_ref.hpp" #include "pythonic/include/utils/iterator.hpp" #include "pythonic/include/utils/reserve.hpp" #include "pythonic/include/builtins/None.hpp" #include #include #include #include #include #include PYTHONIC_NS_BEGIN namespace types { static const size_t DEFAULT_DICT_CAPACITY = 64; struct empty_dict; template struct item_iterator_adaptator : public I { using value_type = make_tuple_t< typename std::remove_cv::type, typename I::value_type::second_type>; using pointer = value_type *; using reference = value_type &; item_iterator_adaptator() = default; item_iterator_adaptator(I const &i); value_type operator*() const; }; template struct key_iterator_adaptator : public I { using value_type = typename I::value_type::first_type; using pointer = typename I::value_type::first_type *; using reference = typename I::value_type::first_type &; key_iterator_adaptator(); key_iterator_adaptator(I const &i); value_type operator*() const; }; template struct value_iterator_adaptator : public I { using value_type = typename I::value_type::second_type; using pointer = typename I::value_type::second_type *; using reference = typename I::value_type::second_type &; value_iterator_adaptator(); value_iterator_adaptator(I const &i); value_type operator*() const; }; template struct dict_items { using iterator = typename D::item_const_iterator; using value_type = typename iterator::value_type; D data; dict_items(); dict_items(D const &d); iterator begin() const; iterator end() const; long size() const; }; template struct dict_keys { using iterator = typename D::key_const_iterator; using value_type = typename iterator::value_type; D data; dict_keys(); dict_keys(D const &d); iterator begin() const; iterator end() const; long size() const; }; template struct dict_values { using iterator = typename D::value_const_iterator; using value_type = typename iterator::value_type; D data; dict_values(); dict_values(D const &d); iterator begin() const; iterator end() const; long size() const; }; template class dict { // data holder using _key_type = typename std::remove_cv::type>::type; using _value_type = typename std::remove_cv::type>::type; using container_type = std::unordered_map<_key_type, _value_type>; utils::shared_ref data; template friend class dict; public: // types using reference = typename container_type::reference; using const_reference = typename container_type::const_reference; using iterator = utils::comparable_iterator< key_iterator_adaptator>; using const_iterator = utils::comparable_iterator< key_iterator_adaptator>; using item_iterator = utils::comparable_iterator< item_iterator_adaptator>; using item_const_iterator = utils::comparable_iterator< item_iterator_adaptator>; using key_iterator = utils::comparable_iterator< key_iterator_adaptator>; using key_const_iterator = utils::comparable_iterator< key_iterator_adaptator>; using value_iterator = utils::comparable_iterator< value_iterator_adaptator>; using value_const_iterator = utils::comparable_iterator< value_iterator_adaptator>; using size_type = typename container_type::size_type; using difference_type = typename container_type::difference_type; using value_type = typename container_type::value_type; using allocator_type = typename container_type::allocator_type; using pointer = typename container_type::pointer; using const_pointer = typename container_type::const_pointer; // constructors dict(); dict(empty_dict const &); dict(std::initializer_list l); dict(dict const &other); template dict(dict const &other); template dict(B begin, E end); // iterators iterator begin(); const_iterator begin() const; iterator end(); const_iterator end() const; item_iterator item_begin(); item_const_iterator item_begin() const; item_iterator item_end(); item_const_iterator item_end() const; key_iterator key_begin(); key_const_iterator key_begin() const; key_iterator key_end(); key_const_iterator key_end() const; value_iterator value_begin(); value_const_iterator value_begin() const; value_iterator value_end(); value_const_iterator value_end() const; // dict interface operator bool(); V &operator[](K const &key); template V &operator[](OtherKey const &key) { return (*this)[K(key)]; } V const &operator[](K const &key) const; template V const &operator[](OtherKey const &key) const { return (*this)[K(key)]; } V &fast(K const &key); V const &fast(K const &key) const; item_const_iterator find(K const &key) const; void clear(); dict copy() const; template typename __combined::type get(K const &key, W d) const; none get(K const &key) const; template V &setdefault(K const &key, W d); none &setdefault(K const &key); template void update(dict const &d); template void update(Iterable const &d); template typename __combined::type pop(K const &key, W d); V pop(K const &key); make_tuple_t popitem(); long size() const; dict_items> items() const; dict_keys> keys() const; dict_values> values() const; // type inference stuff template dict::type, typename __combined::type> operator+(dict const &); // id interface intptr_t id() const; template bool contains(T const &key) const; }; struct empty_dict { using value_type = void; using iterator = empty_iterator; using const_iterator = empty_iterator; template dict operator+(dict const &s); empty_dict operator+(empty_dict const &); operator bool() const; iterator begin() const; iterator end() const; template bool contains(V const &) const; }; template dict operator+(dict const &d, empty_dict); } template struct assignable> { using type = types::dict::type, typename assignable::type>; }; std::ostream &operator<<(std::ostream &os, types::empty_dict const &); template std::ostream &operator<<(std::ostream &os, std::pair const &p); template std::ostream &operator<<(std::ostream &os, types::dict const &v); PYTHONIC_NS_END /* overload std::get */ namespace std { template auto get(pythonic::types::dict &d) -> decltype(d[I]); template auto get(pythonic::types::dict const &d) -> decltype(d[I]); template struct tuple_element> { using type = V; }; } /* type inference stuff {*/ #include "pythonic/include/types/combined.hpp" #include "pythonic/include/types/list.hpp" template struct __combined, pythonic::types::empty_dict> { using type = dict_container; }; template struct __combined> { using type = dict_container; }; template struct __combined, pythonic::types::dict> { using type = pythonic::types::dict::type>; }; template struct __combined, container> { using type = pythonic::types::dict::type>; }; template struct __combined> { using type = pythonic::types::dict::type, typename std::tuple_element<1, T>::type>; }; template struct __combined> { using type = pythonic::types::dict::type, typename std::tuple_element<1, T>::type>; }; template struct __combined, pythonic::types::empty_dict> { using type = pythonic::types::dict::type, typename std::tuple_element<1, T>::type>; }; template struct __combined, pythonic::types::empty_dict> { using type = pythonic::types::dict::type, typename std::tuple_element<1, T>::type>; }; template struct __combined, pythonic::types::list> { using type = pythonic::types::dict< typename __combined::type>::type, typename __combined::type>::type>; }; template struct __combined, pythonic::types::static_list> { using type = pythonic::types::dict< typename __combined::type>::type, typename __combined::type>::type>; }; template struct __combined, pythonic::types::dict> { using type = pythonic::types::dict< typename __combined::type>::type, typename __combined::type>::type>; }; template struct __combined, pythonic::types::dict> { using type = pythonic::types::dict< typename __combined::type>::type, typename __combined::type>::type>; }; template struct __combined, pythonic::types::empty_dict> { using type = indexable_dict; }; template struct __combined> { using type = indexable_dict; }; template struct __combined, pythonic::types::empty_dict> { using type = indexable_dict; }; template struct __combined, indexable_dict> { using type = pythonic::types::dict::type, V1>; }; template struct __combined, pythonic::types::dict> { using type = pythonic::types::dict::type, V1>; }; template struct __combined> { using type = indexable_dict; }; template struct __combined, indexable> { using type = pythonic::types::dict::type, V>; }; template struct __combined, pythonic::types::dict> { using type = pythonic::types::dict::type, V>; }; template struct __combined> { using type = pythonic::types::dict; }; template struct __combined, indexable_container> { using type = pythonic::types::dict::type, typename __combined::type>; }; template struct __combined, pythonic::types::dict> { using type = pythonic::types::dict::type, typename __combined::type>; }; template struct __combined, pythonic::types::empty_dict> { using type = pythonic::types::dict; }; template struct __combined, dict_container> { using type = pythonic::types::dict; }; template struct __combined, indexable> { using type = pythonic::types::dict; }; template struct __combined, indexable_container> { using type = pythonic::types::dict::type>; }; template struct __combined, dict_container> { using type = pythonic::types::dict::type>; }; template struct __combined, indexable_container> { using type = pythonic::types::dict::type, W>; }; template struct __combined, indexable_dict> { using type = pythonic::types::dict::type, W>; }; template struct __combined, dict_container> { using type = pythonic::types::dict::type>; }; template struct __combined, pythonic::types::dict> { using type = pythonic::types::dict::type>; }; template struct __combined, container> { using type = pythonic::types::dict; }; template struct __combined, indexable> { using type = indexable_dict::type>; }; template struct __combined, indexable_dict> { using type = indexable_dict::type>; }; template struct __combined, indexable_dict> { using type = pythonic::types::dict; }; /* } */ #ifdef ENABLE_PYTHON_MODULE #include "pythonic/python/core.hpp" PYTHONIC_NS_BEGIN template struct to_python> { static PyObject *convert(types::dict const &v); }; template <> struct to_python { static PyObject *convert(types::empty_dict); }; template struct from_python> { static bool is_convertible(PyObject *obj); static types::dict convert(PyObject *obj); }; PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/dynamic_tuple.hpp000066400000000000000000000145661416264035500254420ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_DYNAMIC_TUPLE_HPP #define PYTHONIC_INCLUDE_TYPES_DYNAMIC_TUPLE_HPP #include "pythonic/include/types/assignable.hpp" #include "pythonic/include/types/traits.hpp" #include "pythonic/include/types/nditerator.hpp" #include "pythonic/include/types/tuple.hpp" #include "pythonic/include/utils/int_.hpp" #include "pythonic/include/utils/seq.hpp" #include "pythonic/include/utils/shared_ref.hpp" #include "pythonic/include/utils/nested_container.hpp" #include PYTHONIC_NS_BEGIN namespace types { template struct dynamic_tuple { using container_type = std::vector; utils::shared_ref data; using value_type = T; using pointer = value_type *; using const_pointer = const value_type *; using reference = value_type &; using const_reference = const value_type &; using iterator = typename container_type::const_iterator; using const_iterator = typename container_type::const_iterator; using size_type = std::size_t; using difference_type = std::ptrdiff_t; using reverse_iterator = typename container_type::reverse_iterator; using const_reverse_iterator = typename container_type::const_reverse_iterator; // minimal ndarray interface using dtype = typename utils::nested_container_value_type::type; static const size_t value = utils::nested_container_depth::value; static const bool is_vectorizable = true; static const bool is_strided = false; // flat_size implementation template long _flat_size(E const &e, utils::int_<1>) const; template long _flat_size(E const &e, utils::int_) const; long flat_size() const; dynamic_tuple() = default; dynamic_tuple(dynamic_tuple const &) = default; dynamic_tuple(dynamic_tuple &&) = default; dynamic_tuple &operator=(dynamic_tuple &&other) = default; dynamic_tuple &operator=(dynamic_tuple const &other) = default; template dynamic_tuple(Iter start, Iter end) : data(start, end) { } dynamic_tuple(std::initializer_list values) : data(values) { } // Iterators. const_iterator begin() const noexcept { return data->begin(); } const_iterator end() const noexcept { return data->end(); } const_reverse_iterator rbegin() const noexcept { return data->rbegin(); } const_reverse_iterator rend() const noexcept { return data->rend(); } // Capacity. size_type size() const noexcept { return data->size(); } constexpr bool empty() const noexcept { return data->empty(); } intptr_t id() const; // Element access. const_reference fast(long n) const { return (*data)[n]; } #ifdef USE_XSIMD using simd_iterator = const_simd_nditerator; using simd_iterator_nobroadcast = simd_iterator; template simd_iterator vbegin(vectorizer) const; template simd_iterator vend(vectorizer) const; #endif const_reference operator[](size_type __n) const { return (*data)[__n < 0 ? __n + size() : __n]; } reference operator[](size_type __n) { return (*data)[__n < 0 ? __n + size() : __n]; } // operator bool operator==(dynamic_tuple const &other) const; bool operator!=(dynamic_tuple const &other) const; bool operator<(dynamic_tuple const &other) const; bool operator<=(dynamic_tuple const &other) const; bool operator>(dynamic_tuple const &other) const; bool operator>=(dynamic_tuple const &other) const; dynamic_tuple operator+(dynamic_tuple const &other) const; dynamic_tuple operator[](slice const &s) const { auto ns = s.normalize(size()); dynamic_tuple res; res.data->reserve(ns.size()); for (auto i = ns.lower, step = ns.step, n = ns.upper; i != n; i += step) { res.data->emplace_back(fast(i)); } return res; } dynamic_tuple operator[](contiguous_slice const &s) const { auto ns = s.normalize(size()); return {begin() + ns.lower, begin() + ns.upper}; } dynamic_tuple operator[](fast_contiguous_slice const &s) const { auto ns = s.normalize(size()); return {begin() + ns.lower, begin() + ns.upper}; } using shape_t = typename shape_builder::type; template auto shape() const -> decltype(details::extract_shape(*this, utils::int_{})) { return details::extract_shape(*this, utils::int_{}); } template operator array_base() const { assert(N == size() && "compatible sizes"); array_base out; std::copy(begin(), end(), out.begin()); return out; } }; template std::ostream &operator<<(std::ostream &os, types::dynamic_tuple const &v) { os << '('; size_t n = v.size(); if (n) { os << v.fast(0); for (size_t i = 1; i < n; ++i) os << ", " << v.fast(i); } return os << ')'; } } PYTHONIC_NS_END namespace std { template typename pythonic::types::dynamic_tuple::const_reference get(pythonic::types::dynamic_tuple const &t) { return t[I]; } template typename pythonic::types::dynamic_tuple::reference get(pythonic::types::dynamic_tuple &t) { return t[I]; } template typename pythonic::types::dynamic_tuple::reference get(pythonic::types::dynamic_tuple &&t) { return t[I]; } template struct tuple_element> { using type = typename pythonic::types::dynamic_tuple::value_type; }; } /* specialize std::hash */ namespace std { template struct hash> { size_t operator()(pythonic::types::dynamic_tuple const &l) const; }; } #ifdef ENABLE_PYTHON_MODULE #include "pythonic/include/utils/seq.hpp" #include "pythonic/include/utils/fwd.hpp" #include "pythonic/python/core.hpp" PYTHONIC_NS_BEGIN template struct to_python> { static PyObject *convert(types::dynamic_tuple const &t); }; PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/empty_iterator.hpp000066400000000000000000000012421416264035500256370ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_EMPTY_ITERATOR_HPP #define PYTHONIC_INCLUDE_TYPES_EMPTY_ITERATOR_HPP #include PYTHONIC_NS_BEGIN namespace types { struct empty_iterator : std::iterator { // Empty iterator used, among other things, by empty_set empty_iterator(); empty_iterator(empty_iterator const &); bool operator==(empty_iterator const &) const; bool operator!=(empty_iterator const &) const; bool operator<(empty_iterator const &) const; empty_iterator &operator++(); empty_iterator &operator++(int); double operator*() const; void operator->() const; }; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/exceptions.hpp000066400000000000000000000211341416264035500247530ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_EXCEPTIONS_HPP #define PYTHONIC_INCLUDE_TYPES_EXCEPTIONS_HPP #include "pythonic/include/types/str.hpp" #include "pythonic/include/types/dynamic_tuple.hpp" #include "pythonic/include/types/attr.hpp" #include "pythonic/include/builtins/str.hpp" #include PYTHONIC_NS_BEGIN namespace types { class BaseException : public std::exception { public: BaseException(const BaseException &e) = default; template BaseException(Types const &... types); virtual ~BaseException() noexcept = default; dynamic_tuple args; }; // Use this to create a python exception class #define CLASS_EXCEPTION_DECL(name, parent) \ class name : public parent \ { \ public: \ name() = default; \ name(const name &e) = default; \ template \ name(Types const &... types) \ : parent(types...) \ { \ } \ virtual ~name() noexcept = default; \ }; CLASS_EXCEPTION_DECL(SystemExit, BaseException); CLASS_EXCEPTION_DECL(KeyboardInterrupt, BaseException); CLASS_EXCEPTION_DECL(GeneratorExit, BaseException); CLASS_EXCEPTION_DECL(Exception, BaseException); CLASS_EXCEPTION_DECL(StopIteration, Exception); CLASS_EXCEPTION_DECL(StandardError, Exception); CLASS_EXCEPTION_DECL(Warning, Exception); CLASS_EXCEPTION_DECL(BytesWarning, Warning); CLASS_EXCEPTION_DECL(UnicodeWarning, Warning); CLASS_EXCEPTION_DECL(ImportWarning, Warning); CLASS_EXCEPTION_DECL(FutureWarning, Warning); CLASS_EXCEPTION_DECL(UserWarning, Warning); CLASS_EXCEPTION_DECL(SyntaxWarning, Warning); CLASS_EXCEPTION_DECL(RuntimeWarning, Warning); CLASS_EXCEPTION_DECL(PendingDeprecationWarning, Warning); CLASS_EXCEPTION_DECL(DeprecationWarning, Warning); CLASS_EXCEPTION_DECL(BufferError, StandardError); CLASS_EXCEPTION_DECL(FileNotFoundError, StandardError); CLASS_EXCEPTION_DECL(ArithmeticError, StandardError); CLASS_EXCEPTION_DECL(AssertionError, StandardError); CLASS_EXCEPTION_DECL(AttributeError, StandardError); CLASS_EXCEPTION_DECL(EnvironmentError, StandardError); CLASS_EXCEPTION_DECL(EOFError, StandardError); CLASS_EXCEPTION_DECL(ImportError, StandardError); CLASS_EXCEPTION_DECL(LookupError, StandardError); CLASS_EXCEPTION_DECL(MemoryError, StandardError); CLASS_EXCEPTION_DECL(NameError, StandardError); CLASS_EXCEPTION_DECL(ReferenceError, StandardError); CLASS_EXCEPTION_DECL(RuntimeError, StandardError); CLASS_EXCEPTION_DECL(SyntaxError, StandardError); CLASS_EXCEPTION_DECL(SystemError, StandardError); CLASS_EXCEPTION_DECL(TypeError, StandardError); CLASS_EXCEPTION_DECL(ValueError, StandardError); CLASS_EXCEPTION_DECL(FloatingPointError, ArithmeticError); CLASS_EXCEPTION_DECL(OverflowError, ArithmeticError); CLASS_EXCEPTION_DECL(ZeroDivisionError, ArithmeticError); CLASS_EXCEPTION_DECL(IOError, EnvironmentError); CLASS_EXCEPTION_DECL(OSError, EnvironmentError); CLASS_EXCEPTION_DECL(WindowsError, OSError); CLASS_EXCEPTION_DECL(VMSError, OSError); CLASS_EXCEPTION_DECL(IndexError, LookupError); CLASS_EXCEPTION_DECL(KeyError, LookupError); CLASS_EXCEPTION_DECL(UnboundLocalError, NameError); CLASS_EXCEPTION_DECL(NotImplementedError, RuntimeError); CLASS_EXCEPTION_DECL(IndentationError, SyntaxError); CLASS_EXCEPTION_DECL(TabError, IndentationError); CLASS_EXCEPTION_DECL(UnicodeError, ValueError); } PYTHONIC_NS_END #include "pythonic/include/utils/functor.hpp" #define PYTHONIC_EXCEPTION_DECL(name) \ template \ types::name name(Types const &... args); \ \ DEFINE_FUNCTOR(pythonic::builtins, name); /* pythran attribute system { */ #define DECLARE_EXCEPTION_GETATTR(name) \ PYTHONIC_NS_BEGIN \ namespace builtins \ { \ types::none> \ getattr(types::attr::ARGS, types::name const &f); \ } \ PYTHONIC_NS_END #define DECLARE_EXCEPTION_GETATTR_FULL(name) \ PYTHONIC_NS_BEGIN \ namespace builtins \ { \ types::none> \ getattr(types::attr::ARGS, types::name const &e); \ types::none getattr(types::attr::ERRNO, types::name const &e); \ types::none getattr(types::attr::STRERROR, \ types::name const &e); \ types::none getattr(types::attr::FILENAME, \ types::name const &e); \ } \ PYTHONIC_NS_END DECLARE_EXCEPTION_GETATTR(BaseException); DECLARE_EXCEPTION_GETATTR(SystemExit); DECLARE_EXCEPTION_GETATTR(KeyboardInterrupt); DECLARE_EXCEPTION_GETATTR(GeneratorExit); DECLARE_EXCEPTION_GETATTR(Exception); DECLARE_EXCEPTION_GETATTR(StopIteration); DECLARE_EXCEPTION_GETATTR(StandardError); DECLARE_EXCEPTION_GETATTR(Warning); DECLARE_EXCEPTION_GETATTR(BytesWarning); DECLARE_EXCEPTION_GETATTR(UnicodeWarning); DECLARE_EXCEPTION_GETATTR(ImportWarning); DECLARE_EXCEPTION_GETATTR(FutureWarning); DECLARE_EXCEPTION_GETATTR(UserWarning); DECLARE_EXCEPTION_GETATTR(SyntaxWarning); DECLARE_EXCEPTION_GETATTR(RuntimeWarning); DECLARE_EXCEPTION_GETATTR(PendingDeprecationWarning); DECLARE_EXCEPTION_GETATTR(DeprecationWarning); DECLARE_EXCEPTION_GETATTR(BufferError); DECLARE_EXCEPTION_GETATTR(FileNotFoundError); DECLARE_EXCEPTION_GETATTR(ArithmeticError); DECLARE_EXCEPTION_GETATTR(AssertionError); DECLARE_EXCEPTION_GETATTR(AttributeError); DECLARE_EXCEPTION_GETATTR(EOFError); DECLARE_EXCEPTION_GETATTR(ImportError); DECLARE_EXCEPTION_GETATTR(LookupError); DECLARE_EXCEPTION_GETATTR(MemoryError); DECLARE_EXCEPTION_GETATTR(NameError); DECLARE_EXCEPTION_GETATTR(ReferenceError); DECLARE_EXCEPTION_GETATTR(RuntimeError); DECLARE_EXCEPTION_GETATTR(SyntaxError); DECLARE_EXCEPTION_GETATTR(SystemError); DECLARE_EXCEPTION_GETATTR(TypeError); DECLARE_EXCEPTION_GETATTR(ValueError); DECLARE_EXCEPTION_GETATTR(FloatingPointError); DECLARE_EXCEPTION_GETATTR(OverflowError); DECLARE_EXCEPTION_GETATTR(ZeroDivisionError); DECLARE_EXCEPTION_GETATTR(IndexError); DECLARE_EXCEPTION_GETATTR(KeyError); DECLARE_EXCEPTION_GETATTR(UnboundLocalError); DECLARE_EXCEPTION_GETATTR(NotImplementedError); DECLARE_EXCEPTION_GETATTR(IndentationError); DECLARE_EXCEPTION_GETATTR(TabError); DECLARE_EXCEPTION_GETATTR(UnicodeError); DECLARE_EXCEPTION_GETATTR_FULL(IOError); DECLARE_EXCEPTION_GETATTR_FULL(EnvironmentError); DECLARE_EXCEPTION_GETATTR_FULL(OSError); PYTHONIC_NS_BEGIN namespace types { std::ostream &operator<<(std::ostream &o, BaseException const &e); /* @brief Convert EnvironmentError to a string. * * The number of arguments used when creating the EnvironmentError impact * the resulting "type" || formatting of the chain. We aim to mimic python * behavior of course: * - only one arg, then assume it can be converted to string, * - two args, then the first one is the errno, the next one a string, * - three args, like two args, adding "filename" as third one (after ':') * - four || more args, the "tuple" used to construct the exception * */ std::ostream &operator<<(std::ostream &o, EnvironmentError const &e); } PYTHONIC_NS_END /* } */ #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/file.hpp000066400000000000000000000054341416264035500235160ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_FILE_HPP #define PYTHONIC_INCLUDE_TYPES_FILE_HPP #include "pythonic/include/types/assignable.hpp" #include "pythonic/include/utils/shared_ref.hpp" #include "pythonic/include/types/str.hpp" #include "pythonic/include/types/list.hpp" #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/attr.hpp" #include #include #include PYTHONIC_NS_BEGIN namespace types { class file; struct file_iterator : std::iterator { private: file *f; mutable bool set; mutable types::str curr; long position; public: using value_type = types::str; file_iterator(file &ref); file_iterator(); bool operator==(file_iterator const &f2) const; bool operator!=(file_iterator const &f2) const; bool operator<(file_iterator const &f2) const; file_iterator &operator++(); types::str operator*() const; }; struct _file { FILE *f; _file(); _file(types::str const &filename, types::str const &strmode = "r"); FILE *operator*() const; ~_file(); }; class file : public file_iterator { private: using container_type = _file; utils::shared_ref data; bool is_open; types::str mode, name, newlines; public: // Types using iterator = file_iterator; using value_type = types::str; // Constructors file(); file(types::str const &filename, types::str const &strmode = "r"); // Iterators iterator begin(); iterator end(); // Modifiers void open(types::str const &filename, types::str const &strmode); void close(); bool closed() const; types::str const &getmode() const; types::str const &getname() const; types::str const &getnewlines() const; bool eof(); void flush(); long fileno() const; bool isatty() const; types::str read(long size = -1); types::str readline(long size = std::numeric_limits::max()); types::list readlines(long sizehint = -1); void seek(long offset, long whence = SEEK_SET); long tell() const; void truncate(long size = -1); long write(types::str const &str); template void writelines(T const &seq); }; } PYTHONIC_NS_END /* pythran attribute system { */ PYTHONIC_NS_BEGIN namespace builtins { bool getattr(types::attr::CLOSED, types::file const &f); types::str const &getattr(types::attr::MODE, types::file const &f); types::str const &getattr(types::attr::NAME, types::file const &f); // Python seems to always return none... Doing the same. types::none_type getattr(types::attr::NEWLINES, types::file const &f); } PYTHONIC_NS_END /* } */ #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/finfo.hpp000066400000000000000000000010671416264035500236760ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_FINFO_HPP #define PYTHONIC_INCLUDE_TYPES_FINFO_HPP #include "pythonic/include/types/attr.hpp" #include PYTHONIC_NS_BEGIN namespace types { template struct finfo { T eps() const; }; template struct finfo> { T eps() const; }; } PYTHONIC_NS_END /* pythran attribute system { */ PYTHONIC_NS_BEGIN namespace builtins { template auto getattr(types::attr::EPS, pythonic::types::finfo const &f) -> decltype(f.eps()); } PYTHONIC_NS_END /* } */ #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/float.hpp000066400000000000000000000016121416264035500236760ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_FLOAT_HPP #define PYTHONIC_INCLUDE_TYPES_FLOAT_HPP #include "pythonic/include/types/attr.hpp" #include #ifdef ENABLE_PYTHON_MODULE #include "pythonic/python/core.hpp" PYTHONIC_NS_BEGIN template <> struct to_python { static PyObject *convert(long double d); }; template <> struct to_python { static PyObject *convert(double d); }; template <> struct to_python { static PyObject *convert(float d); }; template <> struct from_python { static bool is_convertible(PyObject *obj); static long double convert(PyObject *obj); }; template <> struct from_python { static bool is_convertible(PyObject *obj); static double convert(PyObject *obj); }; template <> struct from_python { static bool is_convertible(PyObject *obj); static float convert(PyObject *obj); }; PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/float128.hpp000066400000000000000000000001401416264035500241240ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_FLOAT128_HPP #define PYTHONIC_INCLUDE_TYPES_FLOAT128_HPP #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/float32.hpp000066400000000000000000000001361416264035500240430ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_FLOAT32_HPP #define PYTHONIC_INCLUDE_TYPES_FLOAT32_HPP #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/float64.hpp000066400000000000000000000001361416264035500240500ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_FLOAT64_HPP #define PYTHONIC_INCLUDE_TYPES_FLOAT64_HPP #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/generator.hpp000066400000000000000000000014531416264035500245620ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_GENERATOR_HPP #define PYTHONIC_INCLUDE_TYPES_GENERATOR_HPP #include #include PYTHONIC_NS_BEGIN namespace types { template struct generator_iterator : std::iterator { T the_generator; generator_iterator(); generator_iterator(T const &a_generator); generator_iterator &operator++(); typename T::result_type operator*() const; bool operator!=(generator_iterator const &other) const; bool operator==(generator_iterator const &other) const; bool operator<(generator_iterator const &other) const; }; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/immediate.hpp000066400000000000000000000011761416264035500245340ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_IMMEDIATE_HPP #define PYTHONIC_INCLUDE_TYPES_IMMEDIATE_HPP PYTHONIC_NS_BEGIN namespace types { template struct immediate { immediate() = default; immediate(immediate const &) = default; immediate(immediate &&) = default; operator T() const { return Val; } template ::type> immediate(std::integral_constant) { } }; using true_immediate = immediate; using false_immediate = immediate; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/int.hpp000066400000000000000000000042011416264035500233600ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_INT_HPP #define PYTHONIC_INCLUDE_TYPES_INT_HPP #include "pythonic/include/types/attr.hpp" PYTHONIC_NS_BEGIN namespace builtins { template typename std::enable_if::value, T>::value getattr(types::attr::REAL, T self); template typename std::enable_if::value, T>::value getattr(types::attr::IMAG, T self); } PYTHONIC_NS_END #ifdef ENABLE_PYTHON_MODULE #include "pythonic/python/core.hpp" PYTHONIC_NS_BEGIN #define PYTHONIC_INT_TO_PYTHON(TYPE) \ template <> \ struct to_python { \ static PyObject *convert(TYPE l); \ } PYTHONIC_INT_TO_PYTHON(char); PYTHONIC_INT_TO_PYTHON(unsigned char); PYTHONIC_INT_TO_PYTHON(signed char); PYTHONIC_INT_TO_PYTHON(unsigned short); PYTHONIC_INT_TO_PYTHON(signed short); PYTHONIC_INT_TO_PYTHON(unsigned int); PYTHONIC_INT_TO_PYTHON(signed int); PYTHONIC_INT_TO_PYTHON(unsigned long); PYTHONIC_INT_TO_PYTHON(signed long); PYTHONIC_INT_TO_PYTHON(unsigned long long); PYTHONIC_INT_TO_PYTHON(signed long long); #undef PYTHONIC_INT_TO_PYTHON #define PYTHONIC_INT_FROM_PYTHON(TYPE) \ template <> \ struct from_python { \ static bool is_convertible(PyObject *obj); \ static TYPE convert(PyObject *obj); \ } PYTHONIC_INT_FROM_PYTHON(unsigned char); PYTHONIC_INT_FROM_PYTHON(signed char); PYTHONIC_INT_FROM_PYTHON(unsigned short); PYTHONIC_INT_FROM_PYTHON(signed short); PYTHONIC_INT_FROM_PYTHON(unsigned int); PYTHONIC_INT_FROM_PYTHON(signed int); PYTHONIC_INT_FROM_PYTHON(unsigned long); PYTHONIC_INT_FROM_PYTHON(signed long); PYTHONIC_INT_FROM_PYTHON(unsigned long long); PYTHONIC_INT_FROM_PYTHON(signed long long); #undef PYTHONIC_INT_FROM_PYTHON PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/int16.hpp000066400000000000000000000001321416264035500235260ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_INT16_HPP #define PYTHONIC_INCLUDE_TYPES_INT16_HPP #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/int32.hpp000066400000000000000000000001321416264035500235240ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_INT32_HPP #define PYTHONIC_INCLUDE_TYPES_INT32_HPP #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/int64.hpp000066400000000000000000000001321416264035500235310ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_INT64_HPP #define PYTHONIC_INCLUDE_TYPES_INT64_HPP #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/int8.hpp000066400000000000000000000001301416264035500234450ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_INT8_HPP #define PYTHONIC_INCLUDE_TYPES_INT8_HPP #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/intc.hpp000066400000000000000000000001301416264035500235200ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_INTC_HPP #define PYTHONIC_INCLUDE_TYPES_INTC_HPP #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/intp.hpp000066400000000000000000000001301416264035500235350ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_INTP_HPP #define PYTHONIC_INCLUDE_TYPES_INTP_HPP #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/lazy.hpp000066400000000000000000000011041416264035500235440ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_LAZY_HPP #define PYTHONIC_INCLUDE_TYPES_LAZY_HPP PYTHONIC_NS_BEGIN namespace types { template using lazy_res_t = decltype((std::declval()())); template using lazy_res_decay_t = typename std::decay>::type; template using lazy_combined_t = typename std::conditional< std::is_same, lazy_res_t>::value, lazy_res_t, typename __combined, lazy_res_decay_t>::type>::type; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/list.hpp000066400000000000000000000504031416264035500235460ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_LIST_HPP #define PYTHONIC_INCLUDE_TYPES_LIST_HPP #include "pythonic/include/types/assignable.hpp" #include "pythonic/include/types/empty_iterator.hpp" #include "pythonic/include/types/nditerator.hpp" #include "pythonic/include/utils/shared_ref.hpp" #include "pythonic/include/utils/nested_container.hpp" #include "pythonic/include/utils/int_.hpp" #include "pythonic/include/utils/reserve.hpp" #include "pythonic/include/types/tuple.hpp" #include "pythonic/include/types/slice.hpp" #include "pythonic/include/types/vectorizable_type.hpp" #include #include #include #include #include PYTHONIC_NS_BEGIN namespace types { template using container = std::vector; static const size_t DEFAULT_LIST_CAPACITY = 16; /* forward declaration */ struct empty_list; template class list; template class sliced_list; template struct ndarray; template struct pshape; template struct is_list { static const bool value = false; }; template struct is_list> { static const bool value = true; }; template struct is_list> { static const bool value = true; }; template struct is_list> { static const bool value = true; }; /* for type disambiguification */ struct single_value { }; /* list view */ template class sliced_list { // data holder typedef typename std::remove_cv::type>::type _type; typedef container<_type> container_type; utils::shared_ref data; template friend class list; typename S::normalized_type slicing; public: // types typedef typename container_type::reference reference; typedef typename container_type::const_reference const_reference; typedef nditerator iterator; typedef const_nditerator const_iterator; typedef typename container_type::size_type size_type; typedef typename container_type::difference_type difference_type; typedef typename container_type::value_type value_type; typedef typename container_type::allocator_type allocator_type; typedef typename container_type::pointer pointer; typedef typename container_type::const_pointer const_pointer; typedef typename container_type::reverse_iterator reverse_iterator; typedef typename container_type::const_reverse_iterator const_reverse_iterator; // minimal ndarray interface typedef typename utils::nested_container_value_type::type dtype; static const size_t value = utils::nested_container_depth::value; static_assert(value != 0, "valid shape"); static const bool is_vectorizable = types::is_vectorizable_dtype::value && !std::is_same::value; static const bool is_strided = std::is_same::value; using shape_t = types::array; template auto shape() const -> decltype(details::extract_shape(*this, utils::int_{})) { return details::extract_shape(*this, utils::int_{}); } // constructor sliced_list(); sliced_list(sliced_list const &s); sliced_list(list const &other, S const &s); template sliced_list(utils::shared_ref const &other, Sn const &s); // assignment sliced_list &operator=(list const &); sliced_list &operator=(sliced_list const &); list operator+(list const &) const; template list operator+(array_base const &) const; template list::type> operator+(sliced_list const &) const; // iterators iterator begin(); const_iterator begin() const; iterator end(); const_iterator end() const; // size long size() const; explicit operator bool() const; // accessors const_reference fast(long i) const; const_reference operator[](long i) const; reference operator[](long i); template typename std::enable_if< is_slice::value, sliced_list() * std::declval())>>::type operator[](Sp s) const; template dtype load(long index0, long index1, Indices... indices) const { return fast(index0).load(index1, indices...); } dtype load(long index) const { return fast(index); } // comparison template bool operator==(list const &other) const; bool operator==(empty_list const &other) const; #ifdef USE_XSIMD using simd_iterator = const_simd_nditerator; using simd_iterator_nobroadcast = simd_iterator; template simd_iterator vbegin(vectorizer) const; template simd_iterator vend(vectorizer) const; #endif // other operations template bool contains(V const &v) const; intptr_t id() const; long count(T const &x) const; template friend std::ostream &operator<<(std::ostream &os, sliced_list const &v); }; /* list */ template class list { // data holder typedef typename std::remove_cv::type>::type _type; typedef container<_type> container_type; utils::shared_ref data; template friend class sliced_list; template friend class list; public: // types typedef typename container_type::value_type value_type; typedef typename container_type::reference reference; typedef typename container_type::const_reference const_reference; typedef typename container_type::iterator iterator; typedef typename container_type::const_iterator const_iterator; typedef typename container_type::size_type size_type; typedef typename container_type::difference_type difference_type; typedef typename container_type::allocator_type allocator_type; typedef typename container_type::pointer pointer; typedef typename container_type::const_pointer const_pointer; typedef typename container_type::reverse_iterator reverse_iterator; typedef typename container_type::const_reverse_iterator const_reverse_iterator; // minimal ndarray interface typedef typename utils::nested_container_value_type::type dtype; static const size_t value = utils::nested_container_depth::value; static const bool is_vectorizable = types::is_vectorizable::value; static const bool is_strided = false; // constructors list(); template list(InputIterator start, InputIterator stop); list(empty_list const &); list(size_type sz); list(T const &value, single_value, size_type sz = 1); list(std::initializer_list l); list(list &&other); list(list const &other); template list(list const &other); template list(sliced_list const &other); template list(static_list const &other) : list(other.begin(), other.end()) { } template list(numpy_gexpr, S...> const &other) : list(other.begin(), other.end()) { } list &operator=(list &&other); template list &operator=(list const &other); list &operator=(list const &other); list &operator=(empty_list const &); template list &operator=(array_base const &); template list &operator=(sliced_list const &other); template list & operator=(ndarray> const &); // implemented in ndarray.hpp template list &operator+=(sliced_list const &other); template list operator+(sliced_list const &other) const; template list operator+(array_base const &other) const; // io template friend std::ostream &operator<<(std::ostream &os, list const &v); // comparison template bool operator==(list const &other) const; bool operator==(empty_list const &) const; template bool operator!=(list const &other) const; bool operator!=(empty_list const &) const; // iterators iterator begin(); const_iterator begin() const; iterator end(); const_iterator end() const; reverse_iterator rbegin(); const_reverse_iterator rbegin() const; reverse_iterator rend(); const_reverse_iterator rend() const; // comparison bool operator<(list const &other) const; bool operator<=(list const &other) const; bool operator>(list const &other) const; bool operator>=(list const &other) const; // element access #ifdef USE_XSIMD using simd_iterator = const_simd_nditerator; using simd_iterator_nobroadcast = simd_iterator; template simd_iterator vbegin(vectorizer) const; template simd_iterator vend(vectorizer) const; #endif reference fast(long n); reference operator[](long n); const_reference fast(long n) const; const_reference operator[](long n) const; template typename std::enable_if::value, sliced_list>::type operator[](Sp const &s) const; template dtype load(long index0, long index1, Indices... indices) const { return fast(index0).load(index1, indices...); } dtype load(long index) const { return fast(index); } // modifiers template void push_back(Tp &&x); template void insert(long i, Tp &&x); void reserve(size_t n); void resize(size_t n); iterator erase(size_t n); T pop(long x = -1); // TODO: have to raise a valueError none_type remove(T const &x); // Misc // TODO: have to raise a valueError long index(T const &x) const; // list interface explicit operator bool() const; template list::type> operator+(list const &s) const; template list() + std::declval::value_type>())> operator+(sliced_list const &s) const; list operator+(empty_list const &) const; list operator*(long t) const; list const &operator*=(long t); template list &operator+=(F const &s); long size() const; template long _flat_size(E const &e, utils::int_<1>) const; template long _flat_size(E const &e, utils::int_) const; long flat_size() const; template bool contains(V const &v) const; intptr_t id() const; long count(T const &x) const; using shape_t = array; template long shape() const { if (I == 0) return size(); else return details::extract_shape(*this, utils::int_{}); } template operator array_base() const { assert(size() == N && "consistent size"); array_base res; std::copy(begin(), end(), res.begin()); return res; } }; template list operator*(static_list const &self, long t) { list res(self); res *= t; return res; } template list operator*(long t, static_list const &self) { return self * t; } template list::type> operator+(static_list const &l0, list const &l1) { list::type> out(l0.begin(), l0.end()); return out += l1; } /* empty list implementation */ struct empty_list { // minimal ndarray interface typedef char dtype; static const size_t value = 1; static const bool is_vectorizable = false; static const bool is_strided = false; using shape_t = types::array; typedef char value_type; typedef empty_iterator iterator; typedef empty_iterator const_iterator; #ifdef USE_XSIMD typedef empty_iterator simd_iterator; typedef empty_iterator simd_iterator_nobroadcast; #endif template list operator+(list const &s) const; template sliced_list operator+(sliced_list const &s) const; template static_list operator+(array_base const &s) const; empty_list operator+(empty_list const &) const; template typename std::enable_if::value, list>::type operator+(F s) const; explicit operator bool() const; template operator list() const; static constexpr long size(); template std::integral_constant shape() const { return {}; } char fast(long) const { return {}; } char operator[](long) const { return {}; } template typename std::enable_if::value, empty_list>::type operator[](S) const { return {}; } empty_iterator begin() const { return {}; } empty_iterator end() const { return {}; } }; std::ostream &operator<<(std::ostream &os, empty_list const &); template list operator+(static_list const &self, list const &other) { list res(self.begin(), self.end()); return res += other; } } namespace utils { /** * Reserve enough space to save all values generated from f. * * We use a dummy arguments (p) to reserve only when f have a * const_iterator type. */ template void reserve(types::list &l, From const &f, typename From::const_iterator *p = nullptr); } template struct assignable> { typedef types::list::type> type; }; template struct assignable> { typedef types::list::type> type; }; // to cope with std::vector specialization template <> struct returnable::reference> { using type = bool; }; PYTHONIC_NS_END /* overload std::get */ namespace std { template typename pythonic::types::list::reference get(pythonic::types::list &t); template typename pythonic::types::list::const_reference get(pythonic::types::list const &t); template typename pythonic::types::list::value_type get(pythonic::types::list &&t); template typename pythonic::types::sliced_list::reference get(pythonic::types::sliced_list &t); template typename pythonic::types::sliced_list::const_reference get(pythonic::types::sliced_list const &t); template typename pythonic::types::sliced_list::value_type get(pythonic::types::sliced_list &&t); template struct tuple_element> { typedef typename pythonic::types::list::value_type type; }; template struct tuple_element> { typedef typename pythonic::types::sliced_list::value_type type; }; } /* type inference stuff {*/ #include "pythonic/include/types/combined.hpp" template struct __combined, pythonic::types::empty_list> { typedef pythonic::types::list type; }; template struct __combined> { typedef pythonic::types::list type; }; template struct __combined, pythonic::types::list> { typedef pythonic::types::list::type> type; }; template struct __combined, container> { typedef pythonic::types::list::type> type; }; template struct __combined, pythonic::types::list> { typedef pythonic::types::list type; }; template struct __combined, indexable> { typedef pythonic::types::list type; }; template struct __combined, pythonic::types::list> { typedef pythonic::types::list::type> type; }; template struct __combined, indexable_container> { typedef pythonic::types::list::type> type; }; template struct __combined, pythonic::types::empty_list> { typedef pythonic::types::list type; }; template struct __combined> { typedef pythonic::types::list type; }; template struct __combined, pythonic::types::list> { typedef pythonic::types::list::type> type; }; template struct __combined, pythonic::types::empty_list> { typedef pythonic::types::list type; }; template struct __combined> { typedef pythonic::types::list type; }; template struct __combined, pythonic::types::list> { typedef pythonic::types::list::type> type; }; template struct __combined, pythonic::types::sliced_list> { typedef pythonic::types::list::type> type; }; template struct __combined, pythonic::types::empty_list> { typedef pythonic::types::list type; }; template struct __combined> { typedef pythonic::types::list type; }; template struct __combined, pythonic::types::list> { typedef pythonic::types::list::type> type; }; template struct __combined, pythonic::types::array_base> { typedef pythonic::types::list::type> type; }; /* } */ #ifdef ENABLE_PYTHON_MODULE PYTHONIC_NS_BEGIN template <> struct to_python::reference> { static PyObject *convert(typename std::vector::reference const &v); }; struct phantom_type; // ghost don't exist template <> struct to_python::const_reference>::value, phantom_type, typename std::vector::const_reference>::type> { static PyObject * convert(typename std::vector::const_reference const &v); }; template struct to_python> { static PyObject *convert(types::list const &v); }; template struct to_python> { static PyObject *convert(types::sliced_list const &v); }; template <> struct to_python { static PyObject *convert(types::empty_list const &); }; template struct from_python> { static bool is_convertible(PyObject *obj); static types::list convert(PyObject *obj); }; PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/ndarray.hpp000066400000000000000000001110351416264035500242320ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_NDARRAY_HPP #define PYTHONIC_INCLUDE_TYPES_NDARRAY_HPP #include "pythonic/include/types/assignable.hpp" #include "pythonic/include/types/empty_iterator.hpp" #include "pythonic/include/types/attr.hpp" #include "pythonic/include/utils/nested_container.hpp" #include "pythonic/include/utils/shared_ref.hpp" #include "pythonic/include/utils/reserve.hpp" #include "pythonic/include/utils/int_.hpp" #include "pythonic/include/utils/broadcast_copy.hpp" #include "pythonic/include/types/slice.hpp" #include "pythonic/include/types/tuple.hpp" #include "pythonic/include/types/list.hpp" #include "pythonic/include/types/raw_array.hpp" #include "pythonic/include/numpy/bool_.hpp" #include "pythonic/include/numpy/uint8.hpp" #include "pythonic/include/numpy/int8.hpp" #include "pythonic/include/numpy/uint16.hpp" #include "pythonic/include/numpy/int16.hpp" #include "pythonic/include/numpy/uint32.hpp" #include "pythonic/include/numpy/int32.hpp" #include "pythonic/include/numpy/uint64.hpp" #include "pythonic/include/numpy/int64.hpp" #include "pythonic/include/numpy/float32.hpp" #include "pythonic/include/numpy/float64.hpp" #include "pythonic/include/numpy/complex64.hpp" #include "pythonic/include/numpy/complex128.hpp" #include "pythonic/include/types/dynamic_tuple.hpp" #include "pythonic/include/types/vectorizable_type.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" #include "pythonic/include/types/numpy_expr.hpp" #include "pythonic/include/types/numpy_texpr.hpp" #include "pythonic/include/types/numpy_iexpr.hpp" #include "pythonic/include/types/numpy_gexpr.hpp" #include "pythonic/include/types/numpy_vexpr.hpp" #include "pythonic/include/utils/numpy_traits.hpp" #include "pythonic/include/utils/array_helper.hpp" #include "pythonic/include/types/pointer.hpp" #include "pythonic/include/builtins/len.hpp" #include #include #include #include #include #include #ifdef ENABLE_PYTHON_MODULE // Cython still uses the deprecated API, so we can't set this macro in this // case! #ifndef CYTHON_ABI #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #endif #include "numpy/arrayobject.h" #endif #ifdef USE_XSIMD #include #endif PYTHONIC_NS_BEGIN namespace types { template struct ndarray; template struct type_helper; /* Helper for dimension-specific part of ndarray * * Instead of specializing the whole ndarray class, the dimension-specific *behavior are stored here. * There are two specialization for this type: * - a specialization depending on the dimensionality (==1 || > 1) * - a specialization depending on the constness. * * The raw ndarray specialization implies a *swallow copy* of the *ndarray, && thus a refcount increase. * It is meant to be used when indexing an rvalue, as in *``np.zeros(10)[i]``. * * The ndarray const& specialization implies a *reference copy*. It is *used when indexing a lvalue, as in ``a[i]`` */ template struct type_helper> { static_assert(std::tuple_size::value != 1, "matching ok"); using type = numpy_iexpr>; using iterator = nditerator>; using const_iterator = const_nditerator>; type_helper() = delete; // Not intended to be instantiated static iterator make_iterator(ndarray &n, long i); static const_iterator make_iterator(ndarray const &n, long i); template static T *initialize_from_iterable(S &shape, T *from, Iter &&iter); static numpy_iexpr> get(ndarray &&self, long i); }; template struct type_helper const &> { static_assert(std::tuple_size::value != 1, "matching ok"); using type = numpy_iexpr const &>; using iterator = nditerator>; using const_iterator = const_nditerator>; type_helper() = delete; // Not intended to be instantiated static iterator make_iterator(ndarray &n, long i); static const_iterator make_iterator(ndarray const &n, long i); template static T *initialize_from_iterable(S &shape, T *from, Iter &&iter); static numpy_iexpr const &> get(ndarray const &self, long i); }; template struct type_helper>> { using type = T; using iterator = T *; using const_iterator = T const *; type_helper() = delete; // Not intended to be instantiated static iterator make_iterator(ndarray> &n, long i); static const_iterator make_iterator(ndarray> const &n, long i); template static T *initialize_from_iterable(S &shape, T *from, Iter &&iter); static type get(ndarray> &&self, long i); }; template struct type_helper> const &> { using type = T; using iterator = T *; using const_iterator = T const *; type_helper() = delete; // Not intended to be instantiated static iterator make_iterator(ndarray> &n, long i); static const_iterator make_iterator(ndarray> const &n, long i); template static T *initialize_from_iterable(S &shape, T *from, Iter &&iter); static type &get(ndarray> const &self, long i); }; template struct type_helper>> { using type = T; using iterator = T *; using const_iterator = T const *; type_helper() = delete; // Not intended to be instantiated static iterator make_iterator(ndarray> &n, long i); static const_iterator make_iterator(ndarray> const &n, long i); template static T *initialize_from_iterable(S &shape, T *from, Iter &&iter); static type get(ndarray> &&self, long i); }; template struct type_helper> const &> { using type = T; using iterator = T *; using const_iterator = T const *; type_helper() = delete; // Not intended to be instantiated static iterator make_iterator(ndarray> &n, long i); static const_iterator make_iterator(ndarray> const &n, long i); template static T *initialize_from_iterable(S &shape, T *from, Iter &&iter); static type &get(ndarray> const &self, long i); }; /* Multidimensional array of values * * An ndarray wraps a raw array pointers && manages multiple dimensions * casted overt the raw data. * The number of dimensions is fixed as well as the type of the underlying * data. * A shared pointer is used internally to mimic Python's behavior. * */ template struct ndarray { static const bool is_vectorizable = types::is_vectorizable::value; static const bool is_strided = false; /* types */ static constexpr size_t value = std::tuple_size::value; using dtype = T; using value_type = typename type_helper::type; using reference = value_type &; using const_reference = value_type const &; using iterator = typename type_helper::iterator; using const_iterator = typename type_helper::const_iterator; using flat_iterator = T *; using const_flat_iterator = T const *; using shape_t = pS; static_assert(std::tuple_size::value == value, "consistent shape size"); /* members */ utils::shared_ref> mem; // shared data pointer T *buffer; // pointer to the first data stored in the equivalent flat // array shape_t _shape; // shape of the multidimensional array sutils::concat_t, pshape>> _strides; // strides /* mem management */ void mark_memory_external(extern_type obj) { mem.external(obj); mem->forget(); } /* constructors */ ndarray(); ndarray(ndarray const &) = default; ndarray(ndarray &&) = default; /* assignment */ ndarray &operator=(ndarray const &other) = default; /* from other memory */ ndarray(utils::shared_ref> const &mem, pS const &shape); ndarray(utils::shared_ref> &&mem, pS const &shape); /* from other array */ template ndarray(ndarray const &other); template ndarray(ndarray const &other); /* from a seed */ ndarray(pS const &shape, none_type init); ndarray(pS const &shape, T init); /* from a foreign pointer */ template ndarray(T *data, S const *pshape, ownership o); ndarray(T *data, pS const &pshape, ownership o); #ifdef ENABLE_PYTHON_MODULE template ndarray(T *data, S const *pshape, PyObject *obj); ndarray(T *data, pS const &pshape, PyObject *obj); #endif template < class Iterable, class = typename std::enable_if< !is_array::type>::type>::value && is_iterable::type>::type>:: value, void>::type> ndarray(Iterable &&iterable); /* from a numpy expression */ template void initialize_from_expr(E const &expr); template ndarray(numpy_expr const &expr); template ndarray(numpy_texpr const &expr); template ndarray(numpy_texpr_2 const &expr); template ndarray(numpy_gexpr const &expr); template ndarray(numpy_iexpr const &expr); template ndarray(numpy_vexpr const &expr); /* update operators */ template ndarray &update_(Expr const &expr); template ndarray &operator+=(Expr const &expr); template ndarray &operator-=(Expr const &expr); template ndarray &operator*=(Expr const &expr); template ndarray &operator/=(Expr const &expr); template ndarray &operator&=(Expr const &expr); template ndarray &operator|=(Expr const &expr); template ndarray &operator^=(Expr const &expr); template void store(E elt, Indices... indices) { static_assert(is_dtype::value, "valid store"); *(buffer + noffset::value>{}( *this, array{{indices...}})) = static_cast(elt); } template dtype load(Indices... indices) const { return *(buffer + noffset::value>{}( *this, array{{indices...}})); } template void update(E elt, Indices... indices) const { static_assert(is_dtype::value, "valid store"); Op{}(*(buffer + noffset::value>{}( *this, array{{indices...}})), static_cast(elt)); } /* element indexing * differentiate const from non const, && r-value from l-value * */ auto fast(long i) const & -> decltype(type_helper::get(*this, i)) { return type_helper::get(*this, i); } auto fast(long i) && -> decltype(type_helper::get(std::move(*this), i)) { return type_helper::get(std::move(*this), i); } template typename std::enable_if::value, T &>::type fast(array const &indices); template typename std::enable_if::value, T>::type fast(array const &indices) const; template auto fast(array const &indices) const & -> typename std::enable_if::value, decltype(nget().fast(*this, indices))>::type; template auto fast(array const &indices) && -> typename std::enable_if::value, decltype(nget().fast(std::move(*this), indices))>::type; #ifdef USE_XSIMD using simd_iterator = const_simd_nditerator; using simd_iterator_nobroadcast = simd_iterator; template simd_iterator vbegin(vectorizer) const; template simd_iterator vend(vectorizer) const; #endif #ifndef NDEBUG template bool inbound_indices(IndicesTy const &indices) const { auto const shp = sutils::getshape(*this); for (size_t i = 0, n = indices.size(); i < n; ++i) { auto const index = indices[i]; auto const dim = shp[i]; if (0 > index || index >= dim) return false; } return true; } #endif /* slice indexing */ ndarray>> operator[](none_type) const; template typename std::enable_if::value, numpy_gexpr>>::type operator[](S const &s) const &; template typename std::enable_if::value, numpy_gexpr>>::type operator[](S const &s) && ; long size() const; /* extended slice indexing */ template auto operator()(Ty s) const -> typename std::enable_if::value, decltype((*this)[s])>::type { return (*this)[s]; } template auto operator()(S0 const &s0, S const &... s) const & -> decltype( extended_slice::value>{}((*this), s0, s...)); template auto operator()(S0 const &s0, S const &... s) & -> decltype(extended_slice::value>{}((*this), s0, s...)); template auto operator()(S0 const &s0, S const &... s) && -> decltype(extended_slice::value>{}( std::move(*this), s0, s...)); /* element filtering */ template // indexing through an array of boolean -- a mask typename std::enable_if< is_numexpr_arg::value && std::is_same::value && F::value == 1 && !is_pod_array::value, numpy_vexpr>>>::type fast(F const &filter) const; template // indexing through an array of boolean -- a mask typename std::enable_if< is_numexpr_arg::value && std::is_same::value && F::value == 1 && !is_pod_array::value, numpy_vexpr>>>::type operator[](F const &filter) const; template // indexing through an array of boolean -- a mask typename std::enable_if::value && std::is_same::value && F::value != 1 && !is_pod_array::value, numpy_vexpr>, ndarray>>>::type fast(F const &filter) const; template // indexing through an array of boolean -- a mask typename std::enable_if::value && std::is_same::value && F::value != 1 && !is_pod_array::value, numpy_vexpr>, ndarray>>>::type operator[](F const &filter) const; template // indexing through an array of indices -- a view typename std::enable_if::value && !is_array_index::value && !std::is_same::value && !is_pod_array::value, numpy_vexpr>::type operator[](F const &filter) const; template // indexing through an array of indices -- a view typename std::enable_if::value && !is_array_index::value && !std::is_same::value && !is_pod_array::value, numpy_vexpr>::type fast(F const &filter) const; auto operator[](long i) const & -> decltype(this->fast(i)) { if (i < 0) i += std::get<0>(_shape); assert(0 <= i && i < std::get<0>(_shape)); return fast(i); } auto operator[](long i) && -> decltype(std::move(*this).fast(i)) { if (i < 0) i += std::get<0>(_shape); assert(0 <= i && i < std::get<0>(_shape)); return std::move(*this).fast(i); } template typename std::enable_if::value, T const &>::type operator[](array const &indices) const; template typename std::enable_if::value, T &>::type operator[](array const &indices); template auto operator[](array const &indices) const & -> typename std::enable_if::value, decltype(nget()(*this, indices))>::type; template auto operator[](array const &indices) && -> typename std::enable_if::value, decltype(nget()(std::move(*this), indices))>::type; template auto operator[](std::tuple const &indices) const -> decltype((*this)[std::get<0>(indices)]) { return (*this)[std::get<0>(indices)]; } template auto operator[](std::tuple const &indices) const -> typename std::enable_if< std::is_integral::value, decltype((*this)[std::get<0>(indices)][tuple_tail(indices)])>::type { return (*this)[std::get<0>(indices)][tuple_tail(indices)]; } template auto _fwdindex(Slices const &indices, utils::index_sequence) const & -> decltype((*this)(std::get(indices)...)) { return (*this)(std::get(indices)...); } template auto _fwdindex(dynamic_tuple const &indices, utils::index_sequence) const & -> decltype((*this)(std::get(indices)...)) { return (*this)((indices.size() > Is ? std::get(indices) : contiguous_slice())...); } template ::value, void>::type> auto operator[](std::tuple const &indices) const -> typename std::enable_if::value, decltype(this->_fwdindex( indices, utils::make_index_sequence< 2 + sizeof...(Tys)>()))>::type; template ::value, void>::type> auto operator[](array const &indices) const & -> decltype(this->_fwdindex(indices, utils::make_index_sequence())) { return _fwdindex(indices, utils::make_index_sequence()); } template auto operator[](dynamic_tuple const &indices) const -> decltype(this->_fwdindex(indices, utils::make_index_sequence())) { return _fwdindex(indices, utils::make_index_sequence()); } /* through iterators */ iterator begin(); const_iterator begin() const; iterator end(); const_iterator end() const; const_flat_iterator fbegin() const; const_flat_iterator fend() const; flat_iterator fbegin(); flat_iterator fend(); /* member functions */ long flat_size() const; bool may_overlap(ndarray const &) const; template ndarray reshape(qS const &shape) const &; template ndarray reshape(qS const &shape) && ; explicit operator bool() const; ndarray> flat() const; ndarray copy() const; intptr_t id() const; template auto shape() const -> decltype(std::get(_shape)) { return std::get(_shape); } template auto strides() const -> decltype(std::get(_strides)) { return std::get(_strides); } operator pointer() { return {buffer}; } }; /* pretty printing { */ template std::ostream &operator<<(std::ostream &os, ndarray const &e); template typename std::enable_if::value, std::ostream &>::type operator<<(std::ostream &os, E const &e); /* } */ } PYTHONIC_NS_END /* std::get overloads */ namespace std { template auto get(E &&a) -> typename std::enable_if< pythonic::types::is_array::type>::type>::value, decltype(std::forward(a)[I])>::type; template struct tuple_element> { using type = typename pythonic::types::ndarray::value_type; }; template struct tuple_element> { using type = typename pythonic::types::numpy_expr::dtype; }; template struct tuple_element> { using type = decltype(std::declval>()[0]); }; template struct tuple_element> { using type = decltype(std::declval>()[0]); }; template struct tuple_element> { using type = decltype(std::declval>()[0]); }; } /* pythran attribute system { */ #include "pythonic/include/numpy/transpose.hpp" PYTHONIC_NS_BEGIN namespace types { namespace details { using dtype_table = std::tuple; using dtype_utable = std::tuple; template struct dtype_helper { using table = typename std::conditional::value, dtype_table, dtype_utable>::type; using type = typename std::tuple_element < (sizeof(T) < std::tuple_size::value) ? sizeof(T) : 0, table > ::type; }; template <> struct dtype_helper { using type = pythonic::numpy::functor::bool_; }; template <> struct dtype_helper { using type = pythonic::numpy::functor::float32; }; template <> struct dtype_helper { using type = pythonic::numpy::functor::float64; }; template <> struct dtype_helper> { using type = pythonic::numpy::functor::complex64; }; template <> struct dtype_helper> { using type = pythonic::numpy::functor::complex128; }; template <> struct dtype_helper> { using type = pythonic::numpy::functor::complex256; }; } template using dtype_t = typename details::dtype_helper::type; } namespace builtins { namespace details { template struct _build_gexpr { template auto operator()(E const &a, S const &... slices) -> decltype(_build_gexpr{}(a, types::contiguous_slice(), slices...)); }; template <> struct _build_gexpr<1> { template types::numpy_gexpr...> operator()(E const &a, S const &... slices); }; template E _make_real(E const &a, utils::int_<0>); template auto _make_real(E const &a, utils::int_<1>) -> decltype(_build_gexpr{}( types::ndarray::type, types::array>{}, types::slice())); template auto real_get(T &&expr, Ss const &indices, utils::index_sequence) -> decltype(std::forward(expr)(std::get(indices)...)) { return std::forward(expr)(std::get(indices)...); } template types::ndarray _make_imag(E const &a, utils::int_<0>); template auto _make_imag(E const &a, utils::int_<1>) -> decltype(_build_gexpr{}( types::ndarray::type, types::array>{}, types::slice())); template auto imag_get(T &&expr, Ss const &indices, utils::index_sequence) -> decltype(std::forward(expr)(std::get(indices)...)) { return std::forward(expr)(std::get(indices)...); } } template types::array getattr(types::attr::SHAPE, E const &a); template long getattr(types::attr::NDIM, E const &a); template types::array getattr(types::attr::STRIDES, E const &a); template long getattr(types::attr::SIZE, E const &a); template long getattr(types::attr::ITEMSIZE, E const &a); template long getattr(types::attr::NBYTES, E const &a); template auto getattr(types::attr::FLAT, E const &a) -> decltype(a.flat()); template auto getattr(types::attr::T, E const &a) -> decltype(numpy::transpose(a)) { return numpy::transpose(a); } template auto getattr(types::attr::REAL, types::ndarray const &a) -> decltype( details::_make_real(a, utils::int_::value>{})); template auto getattr(types::attr::REAL, types::numpy_expr const &a) -> decltype(details::_make_real( a, utils::int_::dtype>::value>{})); template auto getattr(types::attr::REAL, types::numpy_texpr const &a) -> decltype( types::numpy_texpr{ getattr(types::attr::REAL{}, a.arg)}); template auto getattr(types::attr::REAL, types::numpy_iexpr const &a) -> decltype( types::numpy_iexpr{ getattr(types::attr::REAL{}, a.arg)}) { return {getattr(types::attr::REAL{}, a.arg)}; } template auto getattr(types::attr::REAL, types::numpy_vexpr const &a) -> decltype( types::numpy_vexpr{getattr(types::attr::REAL{}, a.data_), a.view_}) { return {getattr(types::attr::REAL{}, a.data_), a.view_}; } template auto getattr(types::attr::REAL, types::numpy_gexpr const &a) -> decltype( details::real_get(getattr(types::attr::REAL{}, a.arg), a.slices, utils::make_index_sequence< std::tuple_size::value>())) { return details::real_get(getattr(types::attr::REAL{}, a.arg), a.slices, utils::make_index_sequence< std::tuple_size::value>()); } template auto getattr(types::attr::IMAG, types::ndarray const &a) -> decltype( details::_make_imag(a, utils::int_::value>{})); template auto getattr(types::attr::IMAG, types::numpy_expr const &a) -> decltype(details::_make_imag( a, utils::int_::dtype>::value>{})); template auto getattr(types::attr::IMAG, types::numpy_texpr const &a) -> decltype( types::numpy_texpr{ getattr(types::attr::IMAG{}, a.arg)}); template auto geatttr(types::attr::IMAG, types::numpy_iexpr const &a) -> decltype( types::numpy_iexpr{ getattr(types::attr::IMAG{}, a.arg)}) { return {getattr(types::attr::IMAG{}, a.arg)}; } template auto getattr(types::attr::IMAG, types::numpy_vexpr const &a) -> decltype( types::numpy_vexpr{getattr(types::attr::IMAG{}, a.data_), a.view_}) { return {getattr(types::attr::IMAG{}, a.data_), a.view_}; } template auto getattr(types::attr::IMAG, types::numpy_gexpr const &a) -> decltype( details::imag_get(getattr(types::attr::IMAG{}, a.arg), a.slices, utils::make_index_sequence< std::tuple_size::value>())) { return details::imag_get(getattr(types::attr::IMAG{}, a.arg), a.slices, utils::make_index_sequence< std::tuple_size::value>()); } template types::dtype_t::type> getattr(types::attr::DTYPE, E const &); } PYTHONIC_NS_END /* } */ /* type inference stuff {*/ #include "pythonic/include/types/combined.hpp" template struct __combined, pythonic::types::ndarray> { using type = pythonic::types::ndarray< typename __combined::type, pythonic::sutils::common_shapes_t::value, pS1, pS2>>; }; template struct __combined, pythonic::types::numpy_expr> { using expr_type = pythonic::types::numpy_expr; using type = pythonic::types::ndarray< typename __combined::type, pythonic::sutils::common_shapes_t::value, pS, typename expr_type::shape_t>>; }; template struct __combined, O> { using type = pythonic::types::ndarray; }; template struct __combined, pythonic::types::none> { using type = pythonic::types::none< typename __combined, O>::type>; }; template struct __combined, pythonic::types::ndarray> { using type = pythonic::types::none< typename __combined>::type>; }; template struct __combined, pythonic::types::none_type> { using type = pythonic::types::none>; }; template struct __combined> { using type = pythonic::types::none>; }; template struct __combined, pythonic::types::ndarray> { using type = pythonic::types::ndarray; }; template struct __combined, pythonic::types::ndarray> { using type = pythonic::types::ndarray; }; template struct __combined, pythonic::types::ndarray> { using type = pythonic::types::ndarray; }; /* } */ #include "pythonic/include/types/numpy_operators.hpp" #ifdef ENABLE_PYTHON_MODULE #include "pythonic/python/core.hpp" PYTHONIC_NS_BEGIN template struct to_python> { static PyObject *convert(types::ndarray const &n, bool transpose = false); }; template struct to_python> { static PyObject *convert(types::numpy_iexpr const &v, bool transpose = false); }; template struct to_python> { static PyObject *convert(types::numpy_gexpr const &v, bool transpose = false); }; template struct to_python> { static PyObject *convert(types::numpy_texpr const &t, bool transpose = false) { auto const &n = t.arg; PyObject *result = to_python::convert(n, !transpose); return result; } }; template struct from_python> { static bool is_convertible(PyObject *obj); static types::ndarray convert(PyObject *obj); }; template struct from_python, S...>> { static bool is_convertible(PyObject *obj); static types::numpy_gexpr, S...> convert(PyObject *obj); }; template struct from_python const &, S...>> : from_python, S...>> { }; template struct from_python> { static bool is_convertible(PyObject *obj); static types::numpy_texpr convert(PyObject *obj); }; PYTHONIC_NS_END /* specialization of std::copy to avoid the multiple calls implied by the * recursive calls to std::copy */ namespace std { template typename pythonic::types::nditerator> copy( typename pythonic::types::const_nditerator< pythonic::types::ndarray> begin, typename pythonic::types::const_nditerator< pythonic::types::ndarray> end, typename pythonic::types::nditerator> out) { const long offset = pythonic::sutils::prod_tail(begin.data); std::copy(begin.data.buffer + begin.index * offset, end.data.buffer + end.index * offset, out.data.buffer + out.index * offset); return out + (end - begin); } } #endif #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/nditerator.hpp000066400000000000000000000123551416264035500247520ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_NDITERATOR_HPP #define PYTHONIC_INCLUDE_TYPES_NDITERATOR_HPP #include #ifdef USE_XSIMD #include #endif PYTHONIC_NS_BEGIN namespace types { struct fast { }; template auto fast_begin(T const &e) -> typename std::enable_if::value, decltype(e.begin(fast{}))>::type { return e.begin(fast{}); } template auto fast_begin(T const &e) -> typename std::enable_if::value, decltype(e.begin())>::type { return e.begin(); } template auto fast_end(T const &e) -> typename std::enable_if::value, decltype(e.end(fast{}))>::type { return e.end(fast{}); } template auto fast_end(T const &e) -> typename std::enable_if::value, decltype(e.end())>::type { return e.end(); } /* Iterator over whatever provides a fast(long) method to access its element */ template struct nditerator : public std::iterator().fast(0))>::type> { E &data; long index; nditerator(E &data, long index); auto operator*() -> decltype(data.fast(index)); auto operator*() const -> decltype(data.fast(index)); nditerator &operator++(); nditerator &operator--(); nditerator &operator+=(long i); nditerator &operator-=(long i); nditerator operator+(long i) const; nditerator operator-(long i) const; long operator-(nditerator const &other) const; bool operator!=(nditerator const &other) const; bool operator==(nditerator const &other) const; bool operator<(nditerator const &other) const; nditerator &operator=(nditerator const &other); }; /* Const iterator over whatever provides a fast(long) method to access its * element */ template struct const_nditerator : public std::iterator().fast(0))>::type> { E const &data; long index; const_nditerator(E const &data, long index); auto operator*() const -> decltype(data.fast(index)); const_nditerator &operator++(); const_nditerator &operator--(); const_nditerator &operator+=(long i); const_nditerator &operator-=(long i); const_nditerator operator+(long i) const; const_nditerator operator-(long i) const; long operator-(const_nditerator const &other) const; bool operator!=(const_nditerator const &other) const; bool operator==(const_nditerator const &other) const; bool operator<(const_nditerator const &other) const; const_nditerator &operator=(const_nditerator const &other); }; #ifdef USE_XSIMD template struct const_simd_nditerator : public std::iterator> { using vector_type = typename xsimd::simd_type; typename E::dtype const *data; static const std::size_t vector_size = vector_type::size; const_simd_nditerator(typename E::dtype const *data); auto operator*() const -> decltype(xsimd::load_unaligned(data)); const_simd_nditerator &operator++(); const_simd_nditerator &operator+=(long); const_simd_nditerator operator+(long); const_simd_nditerator &operator--(); long operator-(const_simd_nditerator const &other) const; bool operator!=(const_simd_nditerator const &other) const; bool operator==(const_simd_nditerator const &other) const; bool operator<(const_simd_nditerator const &other) const; const_simd_nditerator &operator=(const_simd_nditerator const &other); void store(xsimd::simd_type const &); }; template struct const_simd_nditerator_nostep : const_simd_nditerator { const_simd_nditerator_nostep &operator++() { return *this; } const_simd_nditerator_nostep &operator+=(long) { return *this; } const_simd_nditerator_nostep &operator--() { return *this; } const_simd_nditerator_nostep & operator=(const_simd_nditerator_nostep const &other) = default; }; #endif // build an iterator over T, selecting a raw pointer if possible template struct make_nditerator { template auto operator()(T &self, long i) -> decltype(nditerator(self, i)) const; }; template <> struct make_nditerator { template typename T::dtype *operator()(T &self, long i) const; }; template struct make_const_nditerator { template auto operator()(T const &self, long i) -> decltype(const_nditerator(self, i)) const; }; template <> struct make_const_nditerator { template typename T::dtype const *operator()(T const &self, long i) const; }; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/numpy_binary_op.hpp000066400000000000000000000011731416264035500260050ustar00rootroot00000000000000#ifndef NUMPY_BINARY_FUNC_NAME #error NUMPY_BINARY_FUNC_NAME undefined #endif #ifndef NUMPY_BINARY_FUNC_SYM #error NUMPY_BINARY_FUNC_SYM undefined #endif template typename std::enable_if< types::valid_numop_parameters::type, typename std::decay::type>::value, types::numpy_expr::type, typename types::adapt_type::type>>::type NUMPY_BINARY_FUNC_NAME(E0 &&self, E1 &&other); #undef NUMPY_BINARY_FUNC_NAME #undef NUMPY_BINARY_FUNC_SYM pythran-0.10.0+ds2/pythran/pythonic/include/types/numpy_broadcast.hpp000066400000000000000000000204351416264035500257670ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_NUMPY_BROADCAST_HPP #define PYTHONIC_INCLUDE_TYPES_NUMPY_BROADCAST_HPP #ifdef USE_XSIMD #include #endif #include "pythonic/include/types/vectorizable_type.hpp" #include "pythonic/include/types/nditerator.hpp" #include "pythonic/include/types/slice.hpp" #include "pythonic/include/types/tuple.hpp" PYTHONIC_NS_BEGIN namespace types { template struct broadcasted_iterator : std::iterator::type> { T value_; broadcasted_iterator(T const &value) : value_(value) { } T const &operator*() const { return value_; } broadcasted_iterator &operator++() { return *this; } broadcasted_iterator &operator+=(long i) { return *this; } long operator-(broadcasted_iterator const &other) const { return 0; } bool operator!=(broadcasted_iterator const &other) const { return false; } bool operator==(broadcasted_iterator const &other) const { return true; } bool operator<(broadcasted_iterator const &other) const { return false; } }; /* Type adaptor for broadcasted array values * * Used when the args of a binary operator do not have the same dimensions: * in that case their first dimension always yields a copy */ template struct broadcasted { static const bool is_vectorizable = true; static const bool is_strided = false; using dtype = typename std::remove_reference::type::dtype; using value_type = typename std::remove_reference::type::value_type; static constexpr size_t value = std::remove_reference::type::value + 1; using const_iterator = broadcasted_iterator; using iterator = const_iterator; T ref; using shape_t = types::array; template long shape() const { return I == 0 ? 1 : (long)(ref.template shape < I == 0 ? 0 : (I - 1) > ()); } broadcasted() = default; template broadcasted(E &&other) : ref(std::forward(other)) { } const_iterator begin() const { return {ref}; } const_iterator end() const { return {ref}; } T const &operator[](long i) const; template typename std::enable_if::value, broadcasted const &>::type operator[](S s) const { return *this; } T const &fast(long i) const; template dtype load(long i, Indices... indices) const { return ref.load(indices...); } #ifdef USE_XSIMD using simd_iterator = const_simd_nditerator_nostep; using simd_iterator_nobroadcast = simd_iterator; template simd_iterator vbegin(vectorizer) const; template simd_iterator vend(vectorizer) const; #endif template typename std::enable_if::value, broadcasted const &>::type operator()(S s) const { return *this; } T operator()(long) const { return ref; } template auto operator()(long arg0, Arg1 &&arg1, Args &&... args) const -> decltype(ref(std::forward(arg1), std::forward(args)...)); template auto operator()(S arg0, Arg1 &&arg1, Args &&... args) const -> decltype(ref((arg0.step, std::forward(arg1)), std::forward(args)...)); long flat_size() const; }; /* Type adaptor for scalar values * * Have them behave like infinite arrays of that value * * B is the original type of the broadcast value, && T is the type of the *expression it is combined with * if both B && T are integer types, we choose T instead of B to prevent *automatic conversion into larger types * * That way, np.ones(10, dtype=np.uint8) + 1 yields an array of np.uint8, *although 1 is of type long */ template struct broadcast_base { dtype _value; struct ignored { } _splated; broadcast_base() = default; template broadcast_base(V v); template void load(I) const; }; #ifdef USE_XSIMD template struct broadcast_base { dtype _value; xsimd::simd_type _splated; broadcast_base() = default; template broadcast_base(V v); template auto load(I) const -> decltype(this->_splated); }; #endif template struct const_broadcast_iterator : public std::iterator { T value; const_broadcast_iterator(T data) : value{data} { } T operator*() const { return value; } const_broadcast_iterator &operator++() { return *this; } const_broadcast_iterator &operator--() { return *this; } const_broadcast_iterator &operator+=(long i) { return *this; } const_broadcast_iterator &operator-=(long i) { return *this; } const_broadcast_iterator operator+(long i) const { return *this; } const_broadcast_iterator operator-(long i) const { return *this; } long operator-(const_broadcast_iterator const &other) const { return 0; } bool operator!=(const_broadcast_iterator const &other) const { return false; } bool operator==(const_broadcast_iterator const &other) const { return true; } bool operator<(const_broadcast_iterator const &other) const { return false; } const_broadcast_iterator &operator=(const_broadcast_iterator const &other) { return *this; } }; template struct broadcast_dtype { using type = typename std::conditional::value && std::is_integral::value, T, typename __combined::type>::type; }; #ifndef USE_XSIMD template struct broadcast_dtype, B> { using type = T; }; template struct broadcast_dtype, std::complex> { using type = std::complex::type>; }; #endif template struct broadcast { // Perform the type conversion here if it seems valid (although it is ! // always) using dtype = typename broadcast_dtype::type; static const bool is_vectorizable = types::is_vectorizable::value; static const bool is_strided = false; using value_type = dtype; using const_iterator = const_broadcast_iterator; using iterator = const_iterator; static constexpr size_t value = 1; broadcast_base _base; operator dtype() const { return _base._value; } broadcast() = default; template broadcast(V v); dtype operator[](long) const; template dtype operator[](array) const; template typename std::enable_if::value, broadcast const &>::type operator[](S) const { return *this; } dtype fast(long) const; template dtype load(long i, Indices... indices) const { return _base._value; } template auto load(I i) const -> decltype(this->_base.load(i)); template dtype operator()(Args &&...) const; using shape_t = types::pshape>; template std::integral_constant shape() const; long flat_size() const; const_iterator begin() const { return {_base._value}; } const_iterator end() const { return {_base._value}; } #ifdef USE_XSIMD using simd_iterator = const_broadcast_iterator; using simd_iterator_nobroadcast = simd_iterator; template simd_iterator vbegin(vectorizer) const { return {_base._splated}; } template simd_iterator vend(vectorizer) const { return {_base._splated}; } #endif }; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/numpy_expr.hpp000066400000000000000000000741311416264035500250050ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_NUMPY_EXPR_HPP #define PYTHONIC_INCLUDE_TYPES_NUMPY_EXPR_HPP #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/types/nditerator.hpp" PYTHONIC_NS_BEGIN namespace types { template bool is_trivial_broadcast() { return std::is_same::type>::type::shape_t>::type, std::integral_constant>::value; } template struct count_non_integral; template <> struct count_non_integral : std::integral_constant { }; template struct count_non_integral : std::integral_constant { }; template struct count_non_integral : std::integral_constant::value + count_non_integral::value> { }; template struct is_perfect_stepping; template struct is_perfect_stepping> : std::integral_constant::value == 1> { }; template struct all_valid_indices; template struct all_valid_indices { using type = utils::index_sequence; }; template struct all_valid_indices : std::conditional<(value <= std::remove_reference::type>::type::value), all_valid_indices, all_valid_indices>::type { }; template using valid_indices = typename all_valid_indices::value>::type; template struct is_numexpr_arg; template struct numpy_gexpr; template struct Dereferencer { template auto operator()(Ts const &iters, utils::index_sequence) -> decltype(Op{}(*std::get(iters)...)) { return Op{}(*std::get(iters)...); } }; template struct step { using type = typename T::step_type; }; namespace details { template long init_shape_element(Args const &args, utils::index_sequence); } template struct numpy_expr_iterator : std::iterator()(*std::declval()...))>::type> { Steps steps_; std::tuple iters_; numpy_expr_iterator(Steps steps, Iters... iters) : steps_(steps), iters_(iters...) { } numpy_expr_iterator(numpy_expr_iterator const &other) : steps_(other.steps_), iters_(other.iters_) { } numpy_expr_iterator &operator=(numpy_expr_iterator const &other) { iters_ = other.iters_; return *this; } template auto _dereference(utils::index_sequence s) const -> decltype(Dereferencer{}(iters_, s)) { return Dereferencer{}(iters_, s); } auto operator*() const -> decltype( this->_dereference(utils::make_index_sequence{})) { return _dereference(utils::make_index_sequence{}); } template bool _incr_opt(std::integral_constant long_step) { if (is_perfect_stepping::value) ++std::get(iters_); else std::get(iters_) += std::get(steps_); return true; } template bool _incr_opt(std::integral_constant long_step) { if (std::tuple_element::type::value) ++std::get(iters_); return true; } template void _incr(utils::index_sequence) { (void)std::initializer_list{_incr_opt(std::integral_constant< bool, std::is_same::type>::value>{})...}; } numpy_expr_iterator &operator++() { _incr(utils::make_index_sequence{}); return *this; } numpy_expr_iterator operator+(long i) const { numpy_expr_iterator other(*this); return other += i; } template void _update(long i, utils::index_sequence) { (void)std::initializer_list{ (std::get(iters_) += std::get(steps_) * i, true)...}; } numpy_expr_iterator &operator+=(long i) { _update(i, utils::make_index_sequence{}); return *this; } template long _difference(numpy_expr_iterator const &other, utils::index_sequence) const { std::initializer_list distances{(static_cast( std::get(iters_) - std::get(other.iters_)))...}; return *std::max_element(distances.begin(), distances.end()); } long operator-(numpy_expr_iterator const &other) const { return _difference(other, utils::make_index_sequence{}); } bool _neq(numpy_expr_iterator const &other, utils::int_<0u>) const { return false; } template bool _neq(numpy_expr_iterator const &other, utils::int_) const { return (std::get(steps_) && (std::get(iters_) != std::get(other.iters_))) || _neq(other, utils::int_{}); } bool operator!=(numpy_expr_iterator const &other) const { return _neq(other, utils::int_{}); } bool _eq(numpy_expr_iterator const &other, utils::int_<0u>) const { return true; } template bool _eq(numpy_expr_iterator const &other, utils::int_) const { return (!std::get(steps_) || (std::get(iters_) == std::get(other.iters_))) && _eq(other, utils::int_{}); } bool operator==(numpy_expr_iterator const &other) const { return _eq(other, utils::int_{}); } bool _lt(numpy_expr_iterator const &other, utils::int_<0u>) const { return false; } template bool _lt(numpy_expr_iterator const &other, utils::int_) const { if (!std::get(steps_) || (std::get(iters_) == std::get(other.iters_))) return _lt(other, utils::int_{}); else return std::get(steps_) && (std::get(iters_) < std::get(other.iters_)); } bool operator<(numpy_expr_iterator const &other) const { return _lt(other, utils::int_{}); } }; #ifdef USE_XSIMD template struct numpy_expr_simd_iterator : std::iterator()(*std::declval()...))>::type> { Steps steps_; std::tuple iters_; SIters siters_; numpy_expr_simd_iterator(array steps, SIters const &siters, Iters... iters) : steps_(steps), iters_(iters...), siters_(siters) { } numpy_expr_simd_iterator(numpy_expr_simd_iterator const &other) : steps_(other.steps_), iters_(other.iters_), siters_(other.siters_) { } numpy_expr_simd_iterator &operator=(numpy_expr_simd_iterator const &other) { iters_ = other.iters_; siters_ = other.siters_; return *this; } template auto _dereference(utils::index_sequence) const -> decltype(Op{}(*std::get(iters_)...)) { return Op{}(((std::get(steps_)) ? (*std::get(iters_)) : (xsimd::simd_type(iters_))>( *std::get(siters_))))...); } auto operator*() const -> decltype( this->_dereference(utils::make_index_sequence{})) { return _dereference(utils::make_index_sequence{}); } template bool _incr_opt(std::integral_constant long_step) { if (is_perfect_stepping::value) ++std::get(iters_); else std::get(iters_) += std::get(steps_); return true; } template bool _incr_opt(std::integral_constant long_step) { if (std::tuple_element::type::value) ++std::get(iters_); return true; } template void _incr(utils::index_sequence) { (void)std::initializer_list{_incr_opt(std::integral_constant< bool, std::is_same::type>::value>{})...}; } numpy_expr_simd_iterator &operator++() { _incr(utils::make_index_sequence{}); return *this; } numpy_expr_simd_iterator operator+(long i) const { numpy_expr_simd_iterator other(*this); return other += i; } template void _update(long i, utils::index_sequence) { (void)std::initializer_list{ (std::get(iters_) += std::get(steps_) * i, true)...}; } numpy_expr_simd_iterator &operator+=(long i) { _update(i, utils::make_index_sequence{}); return *this; } template long _difference(numpy_expr_simd_iterator const &other, utils::index_sequence) const { std::initializer_list distances{ (std::get(iters_) - std::get(other.iters_))...}; return *std::max_element(distances.begin(), distances.end()); } long operator-(numpy_expr_simd_iterator const &other) const { return _difference(other, utils::make_index_sequence{}); } bool _neq(numpy_expr_simd_iterator const &other, utils::int_<0u>) const { return false; } template bool _neq(numpy_expr_simd_iterator const &other, utils::int_) const { return (std::get(steps_) && (std::get(iters_) != std::get(other.iters_))) || _neq(other, utils::int_{}); } bool operator!=(numpy_expr_simd_iterator const &other) const { return _neq(other, utils::int_{}); } bool _eq(numpy_expr_simd_iterator const &other, utils::int_<0u>) const { return true; } template bool _eq(numpy_expr_simd_iterator const &other, utils::int_) const { return (std::get(steps_) && (std::get(iters_) == std::get(other.iters_))) && _eq(other, utils::int_{}); } bool operator==(numpy_expr_simd_iterator const &other) const { return _eq(other, utils::int_{}); } bool _lt(numpy_expr_simd_iterator const &other, utils::int_<0u>) const { return false; } template bool _lt(numpy_expr_simd_iterator const &other, utils::int_) const { if (std::get(steps_) && (std::get(iters_) == std::get(other.iters_))) return _lt(other, utils::int_{}); else return std::get(steps_) && (std::get(iters_) < std::get(other.iters_)); } bool operator<(numpy_expr_simd_iterator const &other) const { return _lt(other, utils::int_{}); } }; template struct numpy_expr_simd_iterator_nobroadcast : std::iterator()(*std::declval()...))>::type> { std::tuple iters_; numpy_expr_simd_iterator_nobroadcast(Iters... iters) : iters_(iters...) { } numpy_expr_simd_iterator_nobroadcast( numpy_expr_simd_iterator_nobroadcast const &other) : iters_(other.iters_) { } numpy_expr_simd_iterator_nobroadcast & operator=(numpy_expr_simd_iterator_nobroadcast const &other) { iters_ = other.iters_; return *this; } template auto _dereference(utils::index_sequence) const -> decltype(Op{}(*std::get(iters_)...)) { return Op{}((*std::get(iters_))...); } auto operator*() const -> decltype( this->_dereference(utils::make_index_sequence{})) { return _dereference(utils::make_index_sequence{}); } template void _incr(utils::index_sequence) { (void)std::initializer_list{(++std::get(iters_), true)...}; } numpy_expr_simd_iterator_nobroadcast &operator++() { _incr(utils::make_index_sequence{}); return *this; } template long _difference(numpy_expr_simd_iterator_nobroadcast const &other, utils::index_sequence) const { std::initializer_list distances{ (std::get(iters_) - std::get(other.iters_))...}; return *std::max_element(distances.begin(), distances.end()); } long operator-(numpy_expr_simd_iterator_nobroadcast const &other) const { return _difference(other, utils::make_index_sequence{}); } numpy_expr_simd_iterator_nobroadcast operator+(long i) const { numpy_expr_simd_iterator_nobroadcast other(*this); return other += i; } template void _update(long i, utils::index_sequence) { (void)std::initializer_list{(std::get(iters_) += i, true)...}; } numpy_expr_simd_iterator_nobroadcast &operator+=(long i) { _update(i, utils::make_index_sequence{}); return *this; } bool _neq(numpy_expr_simd_iterator_nobroadcast const &other, utils::int_<0u>) const { return false; } template bool _neq(numpy_expr_simd_iterator_nobroadcast const &other, utils::int_) const { return (std::get(iters_) != std::get(other.iters_)) || _neq(other, utils::int_{}); } bool operator!=(numpy_expr_simd_iterator_nobroadcast const &other) const { return _neq(other, utils::int_{}); } bool _eq(numpy_expr_simd_iterator_nobroadcast const &other, utils::int_<0u>) const { return true; } template bool _eq(numpy_expr_simd_iterator_nobroadcast const &other, utils::int_) const { return (std::get(iters_) == std::get(other.iters_)) && _eq(other, utils::int_{}); } bool operator==(numpy_expr_simd_iterator_nobroadcast const &other) const { return _eq(other, utils::int_{}); } bool _lt(numpy_expr_simd_iterator_nobroadcast const &other, utils::int_<0u>) const { return false; } template bool _lt(numpy_expr_simd_iterator_nobroadcast const &other, utils::int_) const { if (std::get(iters_) == std::get(other.iters_)) return _lt(other, utils::int_{}); else return std::get(iters_) < std::get(other.iters_); } bool operator<(numpy_expr_simd_iterator_nobroadcast const &other) const { return _lt(other, utils::int_{}); } }; #endif template std::integral_constant make_step(std::integral_constant, std::integral_constant) { return {}; } template long make_step(T0 n0, T1 n1) { return (long)n0 == (long)n1; } template constexpr size_t count_none(size_t I) { return std::is_same::value; } template constexpr size_t count_none(size_t I) { return std::is_same::value + (I == 0 ? 0 : count_none(I - 1)); } template using step_type_t = decltype(make_step(std::get<0>(std::declval()), std::get<0>(std::declval()))); constexpr size_t clamp(size_t i, size_t j) { return i > j ? j : i; } template auto make_subslice(utils::index_sequence, Arg const &arg, Shp const &shp, std::tuple const &ss) -> decltype(arg(std::get(ss)...)) { // we need to adapt_slice to take broadcasting into account return arg(adapt_slice(std::get(ss), shp.template shape(J)>(), arg.template shape(J), Arg::value - 1)>())...); } /* Expression template for numpy expressions - binary operators */ template struct numpy_expr { using first_arg = typename utils::front::type; static const bool is_vectorizable = utils::all_of< std::remove_reference::type::is_vectorizable...>::value && utils::all_of< std::is_same::type>::type::dtype, typename std::remove_cv::type>::type::dtype>::value...>::value && types::is_vector_op< Op, typename std::remove_reference::type::dtype...>::value; static const bool is_strided = utils::any_of::type::is_strided...>::value; static constexpr size_t value = utils::max_element::type::value...>::value; using value_type = decltype(Op()(std::declval< typename std::remove_reference::type::value_type>()...)); using dtype = decltype(Op()( std::declval::type::dtype>()...)); #ifdef CYTHON_ABI std::tuple::type...> args; #else std::tuple args; #endif using shape_t = sutils::merged_shapes_t< value, typename std::remove_reference::type::shape_t...>; using steps_t = pshape::type::shape_t>...>; static_assert(value == std::tuple_size::value, "consistent shape and size"); using const_iterator = numpy_expr_iterator< Op, steps_t, typename std::remove_reference::type::const_iterator...>; using iterator = numpy_expr_iterator< Op, steps_t, typename std::remove_reference::type::iterator...>; using const_fast_iterator = const_nditerator; numpy_expr() = default; numpy_expr(numpy_expr const &) = default; numpy_expr(numpy_expr &&) = default; #ifdef CYTHON_ABI template numpy_expr(numpy_expr const &other) : args(other.args) { } #endif numpy_expr(Args const &... args); template const_iterator _begin(utils::index_sequence) const; const_iterator begin() const; template const_iterator _end(utils::index_sequence) const; const_iterator end() const; const_fast_iterator begin(types::fast) const; const_fast_iterator end(types::fast) const; template iterator _begin(utils::index_sequence); iterator begin(); template iterator _end(utils::index_sequence); iterator end(); template auto _fast(long i, utils::index_sequence) const -> decltype(Op()(std::get(args).fast(i)...)) { return Op()(std::get(args).fast(i)...); } auto fast(long i) const -> decltype(this->_fast(i, utils::make_index_sequence{})); template auto _load(utils::index_sequence, Indices... indices) const -> decltype(Op()(std::get(args).load(indices...)...)) { return Op()(std::get(args).load(indices...)...); } template auto load(Indices... indices) const -> decltype(this->_load(utils::make_index_sequence{}, indices...)) { return this->_load(utils::make_index_sequence{}, indices...); } template auto _map_fast(array const &indices, utils::index_sequence) const -> decltype(Op()(std::get(args).fast(std::get(indices))...)) { return Op()(std::get(args).fast(std::get(indices))...); } template auto map_fast(Indices... indices) const -> decltype( this->_map_fast(array{{indices...}}, utils::make_index_sequence{})); public: template auto shape() const -> decltype(details::init_shape_element( args, valid_indices>{})) { return details::init_shape_element( args, valid_indices>{}); } template bool _no_broadcast(utils::index_sequence) const; bool no_broadcast() const; template bool _no_broadcast_vectorize(utils::index_sequence) const; bool no_broadcast_vectorize() const; template bool _no_broadcast_ex(utils::index_sequence) const; bool no_broadcast_ex() const; #ifdef USE_XSIMD using simd_iterator = numpy_expr_simd_iterator< numpy_expr, Op, pshape::type::shape_t>...>, std::tuple< typename std::remove_reference::type::const_iterator...>, typename std::remove_reference::type::simd_iterator...>; using simd_iterator_nobroadcast = numpy_expr_simd_iterator_nobroadcast< numpy_expr, Op, typename std::remove_reference< Args>::type::simd_iterator_nobroadcast...>; template simd_iterator _vbegin(types::vectorize, utils::index_sequence) const; simd_iterator vbegin(types::vectorize) const; template simd_iterator _vend(types::vectorize, utils::index_sequence) const; simd_iterator vend(types::vectorize) const; template simd_iterator_nobroadcast _vbegin(types::vectorize_nobroadcast, utils::index_sequence) const; simd_iterator_nobroadcast vbegin(types::vectorize_nobroadcast) const; template simd_iterator_nobroadcast _vend(types::vectorize_nobroadcast, utils::index_sequence) const; simd_iterator_nobroadcast vend(types::vectorize_nobroadcast) const; #endif template auto _get(utils::index_sequence is, S const &... s) const -> decltype( Op{}(make_subslice(utils::make_index_sequence{}, std::get(args), *this, std::make_tuple(s...))...)) { return Op{}(make_subslice(utils::make_index_sequence{}, std::get(args), *this, std::make_tuple(s...))...); } template auto operator()(S const &... s) const -> decltype(this->_get(utils::make_index_sequence{}, s...)); template typename std::enable_if< is_numexpr_arg::value && std::is_same::value && !is_pod_array::value, numpy_vexpr>>>::type fast(F const &filter) const; template typename std::enable_if< is_numexpr_arg::value && std::is_same::value && !is_pod_array::value, numpy_vexpr>>>::type operator[](F const &filter) const; template // indexing through an array of indices -- a view typename std::enable_if::value && !is_array_index::value && !std::is_same::value && !is_pod_array::value, numpy_vexpr>::type operator[](F const &filter) const; template // indexing through an array of indices -- a view typename std::enable_if::value && !is_array_index::value && !std::is_same::value && !is_pod_array::value, numpy_vexpr>::type fast(F const &filter) const; // FIXME: this does not take into account bounds and broadcasting auto operator[](long i) const -> decltype(this->fast(i)); template auto _index(S s, utils::index_sequence) const -> decltype(Op{}(std::get(args)[s]...)) { return Op{}(std::get(args)[s]...); } template auto operator[](S s) const -> decltype((*this) ._index((s.lower, s), utils::make_index_sequence{})) { return _index(s, utils::make_index_sequence{}); } dtype operator[](array const &indices) const { return _index(indices, utils::make_index_sequence{}); } explicit operator bool() const; long flat_size() const; long size() const; }; } template struct assignable> { using type = types::ndarray< typename pythonic::types::numpy_expr::dtype, typename pythonic::types::numpy_expr::shape_t>; }; template struct lazy> { using type = types::numpy_expr::type...>; }; PYTHONIC_NS_END /* type inference stuff {*/ #include "pythonic/include/types/combined.hpp" template struct __combined, indexable> { using type = pythonic::types::numpy_expr; }; template struct __combined, pythonic::types::numpy_expr> { using type = pythonic::types::numpy_expr; }; template struct __combined, indexable_container> { using type = pythonic::types::numpy_expr; }; template struct __combined, pythonic::types::numpy_expr> { using type = pythonic::types::numpy_expr; }; template struct __combined, pythonic::types::numpy_expr> { using type = pythonic::types::numpy_expr; }; template struct __combined, container> { using type = pythonic::types::numpy_expr; }; template struct __combined, pythonic::types::numpy_expr> { using type = pythonic::types::ndarray< typename pythonic::types::numpy_expr::dtype, pythonic::types::array::value>>; }; template struct __combined, pythonic::types::numpy_expr> { using type = pythonic::types::numpy_iexpr; }; template struct __combined, pythonic::types::numpy_iexpr> { using type = pythonic::types::numpy_iexpr; }; template struct __combined, pythonic::types::ndarray> { using type = pythonic::types::ndarray; }; template struct __combined, pythonic::types::numpy_texpr> { using type = pythonic::types::ndarray< typename pythonic::types::numpy_expr::dtype, pythonic::types::array::value>>; }; template struct __combined, pythonic::types::numpy_expr> { using type = pythonic::types::ndarray< typename pythonic::types::numpy_expr::dtype, pythonic::types::array::value>>; }; /*}*/ #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/numpy_gexpr.hpp000066400000000000000000001010261416264035500251460ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_NUMPY_GEXPR_HPP #define PYTHONIC_INCLUDE_TYPES_NUMPY_GEXPR_HPP #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/utils/array_helper.hpp" PYTHONIC_NS_BEGIN namespace types { /* helper to count new axis */ template struct count_new_axis; template <> struct count_new_axis<> { static constexpr size_t value = 0; }; template <> struct count_new_axis { static constexpr size_t value = 1; }; template struct count_new_axis { static constexpr size_t value = 0; }; template struct count_new_axis { static constexpr size_t value = count_new_axis::value + count_new_axis::value; }; /* helper to turn a new axis into a slice */ template struct to_slice { using type = T; static constexpr bool is_new_axis = false; T operator()(T value); }; template <> struct to_slice { using type = fast_contiguous_slice; static constexpr bool is_new_axis = true; fast_contiguous_slice operator()(none_type); }; template struct to_normalized_slice { using type = T; T operator()(T value); }; template <> struct to_normalized_slice { using type = contiguous_normalized_slice; contiguous_normalized_slice operator()(none_type); }; /* helper to build a new shape out of a shape and a slice with new axis */ template auto make_reshape(pS const &shape, IsNewAxis is_new_axis) -> decltype(sutils::copy_new_axis(shape, is_new_axis)); /* helper to build an extended slice aka numpy_gexpr out of a subscript */ template struct extended_slice { template auto operator()(E &&expr, S const &... s) -> decltype(std::forward(expr).reshape(make_reshape( expr, std::tuple< std::integral_constant::is_new_axis>...>()))( to_slice{}(s)...)) { return std::forward(expr).reshape(make_reshape( expr, std::tuple< std::integral_constant::is_new_axis>...>()))( to_slice{}(s)...); } }; template <> struct extended_slice<0> { template auto operator()(E &&expr, long const &s0, S const &... s) -> typename std::enable_if< utils::all_of::value...>::value, decltype(std::forward(expr)[types::make_tuple(s0, s...)])>::type { return std::forward(expr)[types::make_tuple(s0, s...)]; } template auto operator()(E &&expr, long const &s0, S const &... s) -> typename std::enable_if< !utils::all_of::value...>::value, decltype(std::forward(expr)[s0](s...))>::type { return std::forward(expr)[s0](s...); } template numpy_gexpr::type, normalize_t...> fwd(E &&expr, std::tuple const &s, utils::index_sequence) { return {std::forward(expr), std::get(s).normalize(expr.template shape())...}; } template typename std::enable_if< is_slice::value, numpy_gexpr, normalize_t...>>::type operator()(E &&expr, Sp const &s0, S const &... s) { return make_gexpr(std::forward(expr), s0, s...); } template typename std::enable_if< !is_slice::value, numpy_gexpr::type::dtype, array::type::value>>, contiguous_normalized_slice, normalize_t...>>::type operator()(E &&expr, F const &s0, S const &... s) { return numpy_vexpr::type::dtype, array::type::value>>, F>{std::forward(expr), s0}( fast_contiguous_slice(none_type{}, none_type{}), s...); } }; /* Meta-Function to count the number of slices in a type list */ template struct count_long; template <> struct count_long { static constexpr size_t value = 1; }; template <> struct count_long { static constexpr size_t value = 0; }; template <> struct count_long { static constexpr size_t value = 0; }; template struct count_long { static constexpr size_t value = count_long::value + count_long::value; }; template <> struct count_long<> { static constexpr size_t value = 0; }; /* helper to get the type of the nth element of an array */ template struct nth_value_type { using type = typename nth_value_type::type; }; template struct nth_value_type { using type = T; }; /* helper that yields true if the first slice of a pack is a contiguous * slice */ template struct is_contiguous { static const bool value = false; }; template struct is_contiguous { static const bool value = true; }; /* numpy_gexpr factory * * replaces the constructor, in order to properly merge gexpr composition *into a single gexpr */ namespace details { template std::tuple::type...> tuple_push_head(T const &val, Ts const &vals, utils::index_sequence) { return std::tuple::type...>{ val, std::get(vals)...}; } template auto tuple_push_head(T const &val, Ts const &vals) -> decltype(tuple_push_head( val, vals, utils::make_index_sequence::value>())) { return tuple_push_head( val, vals, utils::make_index_sequence::value>()); } // this struct is specialized for every type combination && takes care of // the slice merge template struct merge_gexpr; template <> struct merge_gexpr, std::tuple<>> { template std::tuple<> run(S const &, std::tuple<> const &t0, std::tuple<> const &); }; template struct merge_gexpr, std::tuple<>> { template std::tuple run(S const &, std::tuple const &t0, std::tuple<>); static_assert( utils::all_of>::value...>::value, "all slices are normalized"); }; template struct merge_gexpr, std::tuple> { template std::tuple...> run(S const &, std::tuple<>, std::tuple const &t1); }; template struct merge_gexpr, std::tuple> { template auto run(S const &s, std::tuple const &t0, std::tuple const &t1) -> decltype(tuple_push_head( std::get<0>(t0) * std::get<0>(t1), merge_gexpr, std::tuple>{} .template run(s, tuple_tail(t0), tuple_tail(t1)))) { return tuple_push_head( std::get<0>(t0) * std::get<0>(t1), merge_gexpr, std::tuple>{} .template run(s, tuple_tail(t0), tuple_tail(t1))); } static_assert( std::is_same() * std::declval()), normalize_t() * std::declval())>>::value, "all slices are normalized"); }; template struct merge_gexpr, std::tuple> { template auto run(S const &s, std::tuple const &t0, std::tuple const &t1) -> decltype(tuple_push_head( std::get<0>(t1), merge_gexpr, std::tuple>{} .template run(s, t0, tuple_tail(t1)))) { return tuple_push_head( std::get<0>(t1), merge_gexpr, std::tuple>{} .template run(s, t0, tuple_tail(t1))); } }; template struct merge_gexpr, std::tuple> { template auto run(S const &s, std::tuple const &t0, std::tuple const &t1) -> decltype(tuple_push_head( std::get<0>(t0), merge_gexpr, std::tuple>{} .template run(s, tuple_tail(t0), t1))) { return tuple_push_head( std::get<0>(t0), merge_gexpr, std::tuple>{} .template run(s, tuple_tail(t0), t1)); } }; template struct merge_gexpr, std::tuple> { template auto run(S const &s, std::tuple const &t0, std::tuple const &t1) -> decltype(tuple_push_head( std::get<0>(t0), merge_gexpr, std::tuple>{} .template run(s, tuple_tail(t0), t1))) { return tuple_push_head( std::get<0>(t0), merge_gexpr, std::tuple>{} .template run(s, tuple_tail(t0), t1)); } }; template struct merge_gexpr, std::tuple> { template auto run(S const &s, std::tuple const &t0, std::tuple const &t1) -> decltype(tuple_push_head( std::get<0>(t1) * std::get<0>(t0).step + std::get<0>(t0).lower, merge_gexpr, std::tuple>{} .template run(s, tuple_tail(t0), tuple_tail(t1)))) { return tuple_push_head( std::get<0>(t1) * std::get<0>(t0).step + std::get<0>(t0).lower, merge_gexpr, std::tuple>{} .template run(s, tuple_tail(t0), tuple_tail(t1))); } }; template struct merge_gexpr, std::tuple> { template auto run(S const &s, std::tuple const &t0, std::tuple const &t1) -> decltype(tuple_push_head( std::get<0>(t0), merge_gexpr, std::tuple>{} .template run(s, tuple_tail(t0), t1))) { return tuple_push_head( std::get<0>(t0), merge_gexpr, std::tuple>{} .template run(s, tuple_tail(t0), t1)); } }; template typename std::enable_if::value == 0, numpy_gexpr>::type _make_gexpr(Arg arg, std::tuple const &t); template numpy_gexpr::type>::type...> _make_gexpr_helper(Arg arg, S const &s, utils::index_sequence); template auto _make_gexpr(Arg arg, std::tuple const &s) -> typename std::enable_if< count_new_axis::value != 0, decltype(_make_gexpr_helper( arg.reshape(make_reshape::value>( arg, std::tuple::is_new_axis>...>())), s, utils::make_index_sequence()))>::type; template struct make_gexpr { template numpy_gexpr...> operator()(Arg arg, std::tuple, utils::index_sequence); numpy_gexpr...> operator()(Arg arg, S const &... s); }; // this specialization is in charge of merging gexpr template struct make_gexpr const &, Sp...> { auto operator()(numpy_gexpr const &arg, Sp const &... s) -> decltype( _make_gexpr(std::declval(), merge_gexpr, std::tuple>{} .template run<0>(arg, std::tuple(), std::tuple()))) { return _make_gexpr( arg.arg, merge_gexpr, std::tuple>{}.template run<0>( arg, arg.slices, std::make_tuple(s...))); } }; } template auto make_gexpr(Arg &&arg, S const &... s) -> decltype(details::make_gexpr{}(std::forward(arg), s...)); /* type-based compile time overlapping detection: detect if a type may *overlap with another * the goal is to detect whether the following operation * * a[...] = b * * requires a copy. * * It requires a copy if b = a[...], as in * * a[1:] = a[:-1] * * because this is *!* equivalent to for i in range(0, n-1): a[i+1] = a[i] * * to avoid the copy, we rely on the lhs type */ template struct may_overlap_gexpr : std::integral_constant::value> { }; template struct may_overlap_gexpr> : std::false_type { }; template struct may_overlap_gexpr> : std::false_type { }; template struct may_overlap_gexpr : may_overlap_gexpr { }; template struct may_overlap_gexpr : may_overlap_gexpr { }; template struct may_overlap_gexpr> : std::false_type { }; template struct may_overlap_gexpr> : may_overlap_gexpr { }; template struct may_overlap_gexpr> : may_overlap_gexpr { }; template struct may_overlap_gexpr> : std::integral_constant::value> { }; template struct may_overlap_gexpr> : may_overlap_gexpr { }; template struct may_overlap_gexpr> : utils::any_of::value...> { }; template struct gexpr_shape; template struct gexpr_shape, pshape> { using type = pshape; }; template struct gexpr_shape, array> { using type = pshape; }; template struct gexpr_shape, array> : gexpr_shape, array> { }; template struct gexpr_shape, pshape, oTys...>, contiguous_normalized_slice, S...> : gexpr_shape>, pshape, S...> { }; template struct gexpr_shape, pshape, oTys...>, normalized_slice, S...> : gexpr_shape>, pshape, S...> { }; template struct gexpr_shape, pshape, long, S...> : gexpr_shape, pshape, S...> { }; template struct gexpr_shape, pshape, cS, S...> : gexpr_shape, pshape, S...> { }; template struct gexpr_shape, array, long, S...> : gexpr_shape, array, S...> { }; template struct gexpr_shape, array, cS, S...> : gexpr_shape, array, S...> { }; template using gexpr_shape_t = typename gexpr_shape, pS, S...>::type; /* Expression template for numpy expressions - extended slicing operators */ template struct numpy_gexpr { static_assert( utils::all_of>::value...>::value, "all slices are normalized"); static_assert( utils::all_of<(std::is_same::value || std::is_same::value || std::is_same::value)...>::value, "all slices are valid"); static_assert(std::decay::type::value >= sizeof...(S), "slicing respects array shape"); // numpy_gexpr is a wrapper for extended sliced array around a numpy // expression. // It contains compacted sorted slices value in lower, step && upper is // the same as shape. // indices for long index are store in the indices array. // position for slice and long value in the extended slice can be found // through the S... template // && compacted values as we know that first S is a slice. static_assert( utils::all_of< std::is_same::type>::value...>::value, "no modifiers on slices"); using dtype = typename std::remove_reference::type::dtype; static constexpr size_t value = std::remove_reference::type::value - count_long::value; // It is not possible to vectorize everything. We only vectorize if the // last dimension is contiguous, which happens if // 1. Arg is an ndarray (this is too strict) // 2. the size of the gexpr is lower than the dim of arg, || it's the // same, but the last slice is contiguous static const bool is_vectorizable = std::remove_reference::type::is_vectorizable && (sizeof...(S) < std::remove_reference::type::value || std::is_same>::type>::value); static const bool is_strided = std::remove_reference::type::is_strided || (((sizeof...(S)-count_long::value) == value) && !std::is_same>::type>::value); using value_type = typename std::decay::get(std::declval(), 1))>::type; using iterator = typename std::conditional, dtype *>::type; using const_iterator = typename std::conditional, dtype const *>::type; typename std::remove_cv::type arg; std::tuple slices; using shape_t = gexpr_shape_t::type::shape_t, S...>; shape_t _shape; dtype *buffer; array _strides; template auto shape() const -> decltype(std::get(_shape)) { return std::get(_shape); } template auto strides() const -> decltype(std::get(_strides)) { return std::get(_strides); } numpy_gexpr(); numpy_gexpr(numpy_gexpr const &) = default; numpy_gexpr(numpy_gexpr &&) = default; template // ! using the default one, to make it possible to // accept reference && non reference version of // Argp numpy_gexpr(numpy_gexpr const &other); template typename std::enable_if< std::is_same::value || std::is_same::value, void>::type init_shape(Slice const &s, utils::int_<1>, utils::int_); template typename std::enable_if< std::is_same::value || std::is_same::value, void>::type init_shape(Slice const &s, utils::int_, utils::int_); template void init_shape(long cs, utils::int_<1>, utils::int_); template void init_shape(long cs, utils::int_, utils::int_); // private because we must use the make_gexpr factory to create a gexpr private: template friend struct details::make_gexpr; friend struct array_base_slicer; template friend typename std::enable_if::value == 0, numpy_gexpr<_Arg, _other_classes...>>::type details::_make_gexpr(_Arg arg, std::tuple<_other_classes...> const &t); template friend numpy_gexpr<_Arg, typename to_normalized_slice::type>::type...> details::_make_gexpr_helper(_Arg arg, _other_classes const &s, utils::index_sequence); template friend struct extended_slice; #ifdef ENABLE_PYTHON_MODULE template friend struct pythonic::from_python; #endif // When we create a new numpy_gexpr, we deduce step, lower && shape from // slices // && indices from long value. // Also, last shape information are set from origin array like in : // >>> a = numpy.arange(2*3*4).reshape(2,3,4) // >>> a[:, 1] // the last dimension (4) is missing from slice information // Finally, if origin expression was already sliced, lower bound && step // have to // be increased numpy_gexpr(Arg const &arg, std::tuple const &values); numpy_gexpr(Arg const &arg, S const &... s); public: template numpy_gexpr(numpy_gexpr const &expr, Arg arg); template numpy_gexpr(G const &expr, Arg &&arg); template ndarray reshape(pS const &shape) const { return copy().reshape(shape); } template typename std::enable_if::value, numpy_gexpr &>::type _copy(E const &expr); template typename std::enable_if::value, numpy_gexpr &>::type _copy(E const &expr); template numpy_gexpr &operator=(E const &expr); numpy_gexpr &operator=(numpy_gexpr const &expr); template numpy_gexpr &operator=(numpy_gexpr const &expr); template typename std::enable_if::value, numpy_gexpr &>::type update_(E const &expr); template typename std::enable_if::value, numpy_gexpr &>::type update_(E const &expr); template numpy_gexpr &operator+=(E const &expr); numpy_gexpr &operator+=(numpy_gexpr const &expr); template numpy_gexpr &operator-=(E const &expr); numpy_gexpr &operator-=(numpy_gexpr const &expr); template numpy_gexpr &operator*=(E const &expr); numpy_gexpr &operator*=(numpy_gexpr const &expr); template numpy_gexpr &operator/=(E const &expr); numpy_gexpr &operator/=(numpy_gexpr const &expr); template numpy_gexpr &operator|=(E const &expr); numpy_gexpr &operator|=(numpy_gexpr const &expr); template numpy_gexpr &operator&=(E const &expr); numpy_gexpr &operator&=(numpy_gexpr const &expr); template numpy_gexpr &operator^=(E const &expr); numpy_gexpr &operator^=(numpy_gexpr const &expr); const_iterator begin() const; const_iterator end() const; iterator begin(); iterator end(); auto fast(long i) const & -> decltype(numpy_iexpr_helper::get(*this, i)) { return numpy_iexpr_helper::get(*this, i); } auto fast(long i) & -> decltype(numpy_iexpr_helper::get(*this, i)) { return numpy_iexpr_helper::get(*this, i); } template void store(E elt, Indices... indices) { static_assert(is_dtype::value, "valid store"); *(buffer + noffset{}(*this, array{{indices...}})) = static_cast(elt); } template dtype load(Indices... indices) const { return *(buffer + noffset{}(*this, array{{indices...}})); } template void update(E elt, Indices... indices) const { static_assert(is_dtype::value, "valid store"); Op{}( *(buffer + noffset{}(*this, array{{indices...}})), static_cast(elt)); } #ifdef USE_XSIMD using simd_iterator = const_simd_nditerator; using simd_iterator_nobroadcast = simd_iterator; template simd_iterator vbegin(vectorizer) const; template simd_iterator vend(vectorizer) const; #endif template auto operator()(Sp const &... s) const -> decltype(make_gexpr(*this, s...)); template auto operator[](Sp const &s) const -> typename std::enable_if< is_slice::value, decltype(make_gexpr(*this, (s.lower, s)))>::type; template auto fast(array const &indices) const & -> decltype(nget().fast(*this, indices)); template auto fast(array const &indices) && -> decltype(nget().fast(std::move(*this), indices)); template auto operator[](array const &indices) const & -> decltype(nget()(*this, indices)); template auto operator[](array const &indices) && -> decltype(nget()(std::move(*this), indices)); template // indexing through an array of indices -- a view typename std::enable_if::value && !is_array_index::value && !std::is_same::value, numpy_vexpr>::type operator[](F const &filter) const { return {*this, filter}; } template // indexing through an array of indices -- a view typename std::enable_if::value && !is_array_index::value && !std::is_same::value, numpy_vexpr>::type fast(F const &filter) const { return {*this, filter}; } template typename std::enable_if< is_numexpr_arg::value && std::is_same::value, numpy_vexpr>>>::type fast(F const &filter) const; template typename std::enable_if< is_numexpr_arg::value && std::is_same::value, numpy_vexpr>>>::type operator[](F const &filter) const; auto operator[](long i) const -> decltype(this->fast(i)); auto operator[](long i) -> decltype(this->fast(i)); // template // auto operator()(long i, Sp const &... s) const // -> decltype((*this)[i](s...)); explicit operator bool() const; long flat_size() const; long size() const; ndarray copy() const { return {*this}; } }; } template struct assignable_noescape> { using type = types::numpy_gexpr; }; template struct assignable const &, S...>> { using type = types::numpy_gexpr, S...>; }; template struct assignable &, S...>> { using type = types::numpy_gexpr, S...>; }; template struct assignable> { using type = types::numpy_gexpr::type, S...>; }; template struct lazy> : assignable> { }; PYTHONIC_NS_END /* type inference stuff {*/ #include "pythonic/include/types/combined.hpp" template struct __combined, pythonic::types::numpy_gexpr> { using type = pythonic::types::numpy_gexpr; }; template struct __combined, pythonic::types::numpy_gexpr> { using t0 = pythonic::types::numpy_gexpr; using t1 = pythonic::types::numpy_gexpr; using type = pythonic::types::ndarray < typename __combined::type, pythonic::types::array < long, t0::value < t1::value ? t1::value : t0::value >> ; }; template struct __combined, O> { using type = pythonic::types::numpy_gexpr; }; template struct __combined, pythonic::types::numpy_gexpr> { using type = pythonic::types::list::value_type, T>::type>; }; template struct __combined, pythonic::types::list> { using type = pythonic::types::list::value_type, T>::type>; }; template struct __combined, pythonic::types::none_type> { using type = pythonic::types::none>; }; template struct __combined> { using type = pythonic::types::numpy_gexpr; }; template struct __combined> { using type = pythonic::types::none>; }; /* combined are sorted such that the assigned type comes first */ template struct __combined, pythonic::types::ndarray> { using type = pythonic::types::ndarray; }; template struct __combined, pythonic::types::numpy_gexpr> { using type = pythonic::types::ndarray; }; #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/numpy_iexpr.hpp000066400000000000000000000352771416264035500251660ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_NUMPY_IEXPR_HPP #define PYTHONIC_INCLUDE_TYPES_NUMPY_IEXPR_HPP #include "pythonic/include/types/nditerator.hpp" #include "pythonic/include/types/tuple.hpp" #include "pythonic/utils/array_helper.hpp" #include PYTHONIC_NS_BEGIN namespace types { template struct noffset { template long operator()(S const &strides, array const &indices) const; template long operator()(S const &strides, array const &indices, pS const &shape) const; }; template struct numpy_gexpr; /* Expression template for numpy expressions - indexing */ template struct numpy_iexpr_helper; template // Arg often is a reference, e.g. for something as // simple as a[i] struct numpy_iexpr { // wrapper around another numpy expression to skip first dimension using a // given value. static constexpr size_t value = std::remove_reference::type::value - 1; static const bool is_vectorizable = std::remove_reference::type::is_vectorizable; using dtype = typename std::remove_reference::type::dtype; using value_type = typename std::remove_reference::get(std::declval(), 0L))>::type; static constexpr bool is_strided = std::remove_reference::type::is_strided; using iterator = typename std::conditional, dtype *>::type; using const_iterator = typename std::conditional, dtype const *>::type; Arg arg; dtype *buffer; using shape_t = sutils::pop_head_t::type::shape_t>; numpy_iexpr(); numpy_iexpr(numpy_iexpr const &) = default; numpy_iexpr(numpy_iexpr &&) = default; template numpy_iexpr(numpy_iexpr const &other); template numpy_iexpr(numpy_iexpr const &other); numpy_iexpr(Arg const &arg, long index); numpy_iexpr(Arg const &arg, long index, dtype *b); long size() const; template struct is_almost_same : std::false_type { }; template struct is_almost_same> : std::integral_constant< bool, !std::is_same::value && std::is_same::type, typename std::decay::type>::value> { }; template ::value, void>::type> numpy_iexpr &operator=(E const &expr); template >::value, void>::type> numpy_iexpr &operator=(numpy_iexpr const &expr); numpy_iexpr &operator=(numpy_iexpr const &expr); template numpy_iexpr &update_(E const &expr); template numpy_iexpr &operator+=(E const &expr); numpy_iexpr &operator+=(numpy_iexpr const &expr); template numpy_iexpr &operator-=(E const &expr); numpy_iexpr &operator-=(numpy_iexpr const &expr); template numpy_iexpr &operator*=(E const &expr); numpy_iexpr &operator*=(numpy_iexpr const &expr); template numpy_iexpr &operator/=(E const &expr); numpy_iexpr &operator/=(numpy_iexpr const &expr); template numpy_iexpr &operator&=(E const &expr); numpy_iexpr &operator&=(numpy_iexpr const &expr); template numpy_iexpr &operator|=(E const &expr); numpy_iexpr &operator|=(numpy_iexpr const &expr); template numpy_iexpr &operator^=(E const &expr); numpy_iexpr &operator^=(numpy_iexpr const &expr); const_iterator begin() const; const_iterator end() const; iterator begin(); iterator end(); dtype const *fbegin() const; dtype const *fend() const; dtype *fbegin(); dtype const *fend(); /* There are three kind of indexing operator: fast(long), [long] && *(long): * - fast does ! perform automatic bound wrapping * - [] performs automatic bound wrapping, hen forwards to fast * - () is an alias to [] && directly forwards to [] * * For each indexing operator, we have three variant: &, const& && &&: * - & means the numpy_iexpr has been bound to a non-const value, as in *``b=a[i] ; print b[j]`` * in that case the return type if the dim of a is 2 is a reference, to *allow ``b[j] = 1`` * - const & means the numpy_iexpr has been bound to a const value, as in *``np.copy(a[i])`` * in that case the return type if the dim of a is 2 is a value (|| *const ref) * - && means the numpy_iexpr is a r-value, which happens a lot, as in *``a[i][j]`` * in that case the return type if the dim of a is 2 is a reference. * It is a bit weird because we return a refrence from a rvalue, but the *reference is bound to * the buffer of ``a`` that is ! temp. */ auto fast(long i) const & -> decltype(numpy_iexpr_helper::get(*this, i)) { return numpy_iexpr_helper::get(*this, i); } auto fast(long i) & -> decltype(numpy_iexpr_helper::get(*this, i)) { return numpy_iexpr_helper::get(*this, i); } auto fast(long i) && -> decltype(numpy_iexpr_helper::get(std::move(*this), i)) { return numpy_iexpr_helper::get(std::move(*this), i); } dtype const &fast(array const &indices) const; dtype &fast(array const &indices); template auto fast(array const &indices) const -> decltype(nget()(*this, indices)) { return nget()(*this, indices); } template typename std::enable_if< is_numexpr_arg::value && std::is_same::value, numpy_vexpr>>>::type fast(F const &filter) const; template void store(E elt, Indices... indices) { static_assert(is_dtype::value, "valid store"); assert(buffer); *(buffer + noffset{}(*this, array{{indices...}})) = static_cast(elt); } template dtype load(Indices... indices) const { assert(buffer); return *(buffer + noffset{}(*this, array{{indices...}})); } template void update(E elt, Indices... indices) const { static_assert(is_dtype::value, "valid store"); assert(buffer); Op{}( *(buffer + noffset{}(*this, array{{indices...}})), static_cast(elt)); } #ifdef USE_XSIMD using simd_iterator = const_simd_nditerator; using simd_iterator_nobroadcast = simd_iterator; template simd_iterator vbegin(vectorizer) const; template simd_iterator vend(vectorizer) const; #endif template typename std::enable_if< is_slice::value, numpy_gexpr, normalize_t...>>::type operator()(Sp const &s0, S const &... s) const; template auto operator()(long s0, S const &... s) const -> decltype(std::declval>()(s...)) { return (*this)[s0](s...); } template typename std::enable_if< is_numexpr_arg::value && std::is_same::value, numpy_vexpr>>>::type operator[](F const &filter) const; auto operator[](long i) const & -> decltype(this->fast(i)); auto operator[](long i) & -> decltype(this->fast(i)); auto operator[](long i) && -> decltype(std::move(*this).fast(i)); template typename std::enable_if::value, numpy_gexpr>>::type operator[](Sp const &s0) const; dtype const &operator[](array const &indices) const; dtype &operator[](array const &indices); template auto operator[](array const &indices) const & -> decltype(nget()(*this, indices)) { return nget()(*this, indices); } explicit operator bool() const; long flat_size() const; template auto shape() const -> decltype(arg.template shape()) { return arg.template shape(); } template auto strides() const -> decltype(arg.template strides()) { return arg.template strides(); } template auto reshape(pS const &new_shape) const -> numpy_iexpr< decltype(std::declval().reshape(std::declval::type::shape_t>::type>>()))> { assert(buffer); sutils::push_front_t< pS, typename std::tuple_element< 0, typename std::decay::type::shape_t>::type> fixed_new_shape; sutils::scopy_shape<1, -1>( fixed_new_shape, new_shape, utils::make_index_sequence::value>{}); sutils::assign(std::get<0>(fixed_new_shape), arg.template shape<0>()); return numpy_iexpr( arg.reshape(fixed_new_shape), (buffer - arg.buffer) / arg.template strides<0>()); } ndarray copy() const { return {*this}; } template // indexing through an array of indices -- a view typename std::enable_if::value && !is_array_index::value && !std::is_same::value && !is_pod_array::value, numpy_vexpr>::type operator[](F const &filter) const { return {*this, filter}; } template // indexing through an array of indices -- a view typename std::enable_if::value && !is_array_index::value && !std::is_same::value && !is_pod_array::value, numpy_vexpr>::type operator[](F const &filter) { return {*this, filter}; } template auto operator[](std::tuple const &index) const -> decltype((*this)[std::get<0>(index)]) { return (*this)[std::get<0>(index)]; } private: /* compute the buffer offset, returning the offset between the * first element of the iexpr and the start of the buffer. * This used to be a plain loop, but g++ fails to unroll it, while it * unrolls it with the template version... */ long buffer_offset(Arg const &shape, long index, utils::int_<0>); template long buffer_offset(ndarray const &arg, long index, utils::int_); template long buffer_offset(E const &arg, long index, utils::int_); }; // Indexing an numpy_iexpr that has a dimension greater than one yields a // new numpy_iexpr template struct numpy_iexpr_helper { template static numpy_iexpr get(T &&e, long i); }; // Indexing an iexpr that has a dimension of one yields a qualified scalar. // The qualifier is either: // - a reference if the numpy_iexpr is a ref itself, as in ``b = a[i] ; b[i] // = 1`` // - a reference if the numpy_iexpr is a r-value, as in ``a[i][j] = 1`` // - a value if the numpy_iexpr is a const ref, as in ``b = a[i] ; c = // b[i]`` template <> struct numpy_iexpr_helper<1> { template static typename T::dtype &get(T const &e, long i); template static typename T::dtype &get(T &&e, long i); template static typename T::dtype &get(T &e, long i); }; } template struct assignable_noescape> { using type = types::numpy_iexpr; }; template struct assignable> { using type = types::numpy_iexpr::type>; }; template struct assignable &>> { using type = types::numpy_iexpr>; }; template struct assignable>> { using type = types::numpy_iexpr>; }; template struct returnable> { using type = types::numpy_iexpr::type>; }; template struct lazy> : assignable> { }; PYTHONIC_NS_END /* type inference stuff {*/ #include "pythonic/include/types/combined.hpp" template struct __combined, indexable> { using type = pythonic::types::numpy_iexpr; }; template struct __combined, pythonic::types::numpy_iexpr> { using type = pythonic::types::numpy_iexpr; }; template struct __combined, indexable_container> { using type = pythonic::types::numpy_iexpr; }; template struct __combined, pythonic::types::numpy_iexpr> { using type = pythonic::types::numpy_iexpr; }; template struct __combined, pythonic::types::numpy_iexpr> { using type = pythonic::types::numpy_iexpr; }; template struct __combined, container> { using type = pythonic::types::numpy_iexpr; }; template struct __combined, pythonic::types::numpy_iexpr> { using type = pythonic::types::numpy_iexpr::type>; }; template struct __combined, pythonic::types::ndarray> { using type = pythonic::types::ndarray; }; /*}*/ #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/numpy_nary_expr.hpp000066400000000000000000000024511416264035500260320ustar00rootroot00000000000000#ifndef NUMPY_NARY_FUNC_NAME #error NUMPY_NARY_FUNC_NAME undefined #endif #ifndef NUMPY_NARY_FUNC_SYM #error NUMPY_NARY_FUNC_SYM undefined #endif #ifndef NUMPY_NARY_RESHAPE_MODE #define NUMPY_NARY_RESHAPE_MODE adapt_type #endif #ifndef NUMPY_NARY_EXTRA_METHOD #define NUMPY_NARY_EXTRA_METHOD #endif #define STR(a) STR_(a) namespace functor { struct NUMPY_NARY_FUNC_NAME { using callable = void; // We accept implementation here NUMPY_NARY_EXTRA_METHOD template auto operator()(T &&... args) const -> typename std::enable_if< !types::valid_numexpr_parameters< typename std::decay::type...>::value, decltype(NUMPY_NARY_FUNC_SYM(std::forward(args)...))>::type; template typename std::enable_if< types::valid_numexpr_parameters::type...>::value, types::numpy_expr< NUMPY_NARY_FUNC_NAME, typename types::NUMPY_NARY_RESHAPE_MODE::type...>>::type operator()(E &&... args) const; friend std::ostream &operator<<(std::ostream &os, NUMPY_NARY_FUNC_NAME) { return os << STR(NUMPY_NARY_FUNC_NAME); } }; } #undef NUMPY_NARY_FUNC_NAME #undef NUMPY_NARY_FUNC_SYM #undef NUMPY_NARY_RESHAPE_MODE #undef NUMPY_NARY_EXTRA_METHOD #undef STR pythran-0.10.0+ds2/pythran/pythonic/include/types/numpy_op_helper.hpp000066400000000000000000000143351416264035500260040ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_NUMPY_OP_HELPER_HPP #define PYTHONIC_INCLUDE_TYPES_NUMPY_OP_HELPER_HPP #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/utils/meta.hpp" PYTHONIC_NS_BEGIN namespace types { template struct all_valid_arg { static constexpr bool value = all_valid_arg::value && all_valid_arg::value; }; template struct all_valid_arg { static constexpr bool value = (is_numexpr_arg::value || is_complex::value || std::is_scalar::value); }; template struct any_numexpr_arg { static constexpr bool value = any_numexpr_arg::value || any_numexpr_arg::value; }; template struct any_numexpr_arg : is_numexpr_arg { }; template struct valid_numexpr_parameters { static constexpr bool value = any_numexpr_arg::value && all_valid_arg::value; }; template <> struct valid_numexpr_parameters<> : std::false_type { }; template struct any_numop_arg { static constexpr bool value = any_numop_arg::value || any_numop_arg::value; }; template struct any_numop_arg : is_numexpr_arg { }; template <> struct any_numop_arg : std::false_type { }; template struct any_numop_arg> : std::false_type { }; template struct any_numop_arg> : std::false_type { }; template struct any_numop_arg> : std::false_type { }; template struct any_numop_arg> : std::false_type { }; template struct valid_numop_parameters { static constexpr bool value = any_numop_arg::value && all_valid_arg::value; }; template <> struct valid_numop_parameters<> : std::false_type { }; template ::value, bool T0_number = is_dtype::value, bool T1_number = is_dtype::value> struct the_common_type { using type = typename std::conditional < std::decay::type::value::type::value, T1, T0>::type; }; template struct the_common_type { using type = T0; }; template struct the_common_type { using type = T1; }; template struct the_common_type { using type = T0; // keep the first one! It's important for the type // adaptation to avoid type promotion }; template struct common_type; template struct common_type { using type = T0; }; template struct common_type { using type = typename the_common_type::type; }; template struct common_type { using type = typename common_type::type, typename common_type::type>::type; }; /* An adapted type creates a type that has the same shape as C && the same * dtype as T * to the exception of broadcasted constants that may take the dtype of C * instead */ template struct adapted_type; template struct adapted_type { using type = T; }; template struct adapted_type { using type = broadcast::type::dtype, typename std::decay::type>; }; template struct broadcasted_n; template struct broadcasted_n { using type = broadcasted; }; template struct broadcasted_n { using type = broadcasted::type>; }; constexpr size_t absdiff(size_t x, size_t y) { return x > y ? x - y : y - x; } template struct adapted_type { using type = typename broadcasted_n< T, absdiff(std::remove_reference::type::value, std::remove_reference::type::value)>::type; }; template struct adapt_type { using ctype = typename common_type::type, OtherTypes...>::type; static constexpr bool isdtype = is_dtype::type>::value; using type = typename adapted_type< T, ctype, std::is_same::type, ctype>::value, isdtype>::type; }; template struct adapt_type, OtherTypes...> { using type = broadcast; }; /* A reshaped type create a type that has the same shape as C && the same * dtype as T * To the opposite of an adapted type, it does *not* changes constants type */ template struct reshaped_type; template struct reshaped_type { using type = T; }; template struct reshaped_type { using type = broadcast::type, typename std::decay::type>; }; template struct reshaped_type { using type = broadcasted; }; template struct reshape_type { using ctype = typename common_type::type, OtherTypes...>::type; static constexpr bool isdtype = is_dtype::type>::value; using type = typename reshaped_type< T, ctype, std::is_same::type, ctype>::value, isdtype>::type; }; template struct is_array_index : std::false_type { }; template struct is_array_index> : std::true_type { }; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/numpy_operators.hpp000066400000000000000000000102501416264035500260350ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_NUMPY_OPERATORS_HPP #define PYTHONIC_INCLUDE_TYPES_NUMPY_OPERATORS_HPP #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/include/operator_/add.hpp" #include "pythonic/include/operator_/and_.hpp" #include "pythonic/include/operator_/or_.hpp" #include "pythonic/include/operator_/xor_.hpp" #include "pythonic/include/operator_/div.hpp" #include "pythonic/include/operator_/eq.hpp" #include "pythonic/include/operator_/gt.hpp" #include "pythonic/include/operator_/ge.hpp" #include "pythonic/include/operator_/lshift.hpp" #include "pythonic/include/operator_/lt.hpp" #include "pythonic/include/operator_/le.hpp" #include "pythonic/include/operator_/mul.hpp" #include "pythonic/include/operator_/neg.hpp" #include "pythonic/include/operator_/not_.hpp" #include "pythonic/include/operator_/ne.hpp" #include "pythonic/include/operator_/pos.hpp" #include "pythonic/include/operator_/rshift.hpp" #include "pythonic/include/operator_/sub.hpp" #include "pythonic/include/numpy/mod.hpp" #include "pythonic/include/numpy/bitwise_not.hpp" #include "pythonic/include/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN /* operators must live in the same namespace as the associated type */ namespace types { #define NUMPY_BINARY_FUNC_NAME operator+ #define NUMPY_BINARY_FUNC_SYM operator_::functor::add #include "pythonic/include/types/numpy_binary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator& #define NUMPY_BINARY_FUNC_SYM operator_::functor::and_ #include "pythonic/include/types/numpy_binary_op.hpp" #define NUMPY_UNARY_FUNC_NAME operator~ #define NUMPY_UNARY_FUNC_SYM numpy::functor::bitwise_not #include "pythonic/include/types/numpy_unary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator| #define NUMPY_BINARY_FUNC_SYM operator_::functor::or_ #include "pythonic/include/types/numpy_binary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator^ #define NUMPY_BINARY_FUNC_SYM operator_::functor::xor_ #include "pythonic/include/types/numpy_binary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator/ #define NUMPY_BINARY_FUNC_SYM operator_::functor::div #include "pythonic/include/types/numpy_binary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator== #define NUMPY_BINARY_FUNC_SYM operator_::functor::eq #include "pythonic/include/types/numpy_binary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator% #define NUMPY_BINARY_FUNC_SYM numpy::functor::mod #include "pythonic/include/types/numpy_binary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator> #define NUMPY_BINARY_FUNC_SYM operator_::functor::gt #include "pythonic/include/types/numpy_binary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator>= #define NUMPY_BINARY_FUNC_SYM operator_::functor::ge #include "pythonic/include/types/numpy_binary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator<< #define NUMPY_BINARY_FUNC_SYM operator_::functor::lshift #include "pythonic/include/types/numpy_binary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator< #define NUMPY_BINARY_FUNC_SYM operator_::functor::lt #include "pythonic/include/types/numpy_binary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator<= #define NUMPY_BINARY_FUNC_SYM operator_::functor::le #include "pythonic/include/types/numpy_binary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator* #define NUMPY_BINARY_FUNC_SYM operator_::functor::mul #include "pythonic/include/types/numpy_binary_op.hpp" #define NUMPY_UNARY_FUNC_NAME operator- #define NUMPY_UNARY_FUNC_SYM operator_::functor::neg #include "pythonic/include/types/numpy_unary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator!= #define NUMPY_BINARY_FUNC_SYM operator_::functor::ne #include "pythonic/include/types/numpy_binary_op.hpp" #define NUMPY_UNARY_FUNC_NAME operator+ #define NUMPY_UNARY_FUNC_SYM operator_::functor::pos #include "pythonic/include/types/numpy_unary_op.hpp" #define NUMPY_UNARY_FUNC_NAME operator! #define NUMPY_UNARY_FUNC_SYM operator_::functor::not_ #include "pythonic/include/types/numpy_unary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator>> #define NUMPY_BINARY_FUNC_SYM operator_::functor::rshift #include "pythonic/include/types/numpy_binary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator- #define NUMPY_BINARY_FUNC_SYM operator_::functor::sub #include "pythonic/include/types/numpy_binary_op.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/numpy_texpr.hpp000066400000000000000000000311141416264035500251630ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_NUMPY_TEXPR_HPP #define PYTHONIC_INCLUDE_TYPES_NUMPY_TEXPR_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/include/builtins/None.hpp" #include "pythonic/include/numpy/transpose.hpp" PYTHONIC_NS_BEGIN namespace types { template struct numpy_gexpr; /* expression template for Transposed matrix */ template struct numpy_texpr; // wrapper around numpy expression for 2D transposed matrix using gexpr // representation // >>> b = a.transpose // >>> b[i] == a[:,i] // True // // for N = 2 template struct numpy_texpr_2 { static_assert(E::value == 2, "texpr only implemented for matrices"); static const bool is_vectorizable = false; static const bool is_strided = true; using Arg = E; using iterator = nditerator>; using const_iterator = const_nditerator>; static constexpr size_t value = Arg::value; using value_type = numpy_gexpr; using dtype = typename E::dtype; Arg arg; using shape_t = sutils::transpose_t; template auto shape() const -> decltype(arg.template shape < I == 0 ? 1 : 0 > ()) { return arg.template shape < I == 0 ? 1 : 0 > (); } numpy_texpr_2(); numpy_texpr_2(numpy_texpr_2 const &) = default; numpy_texpr_2(numpy_texpr_2 &&) = default; numpy_texpr_2 &operator=(numpy_texpr_2 const &) = default; numpy_texpr_2(Arg const &arg); const_iterator begin() const; const_iterator end() const; iterator begin(); iterator end(); long size() const { return this->template shape<0>(); } auto fast(long i) const -> decltype(this->arg(fast_contiguous_slice(pythonic::builtins::None, pythonic::builtins::None), i)); auto fast(long i) -> decltype(this->arg(fast_contiguous_slice(pythonic::builtins::None, pythonic::builtins::None), i)); auto fast(array const &indices) -> decltype(arg.fast(array{{indices[1], indices[0]}})) { return arg.fast(array{{indices[1], indices[0]}}); } auto fast(array const &indices) const -> decltype(arg.fast(array{{indices[1], indices[0]}})) { return arg.fast(array{{indices[1], indices[0]}}); } auto load(long i, long j) const -> decltype(arg.load(j, i)) { return arg.load(j, i); } template void store(Elt elt, long i, long j) { arg.store(elt, j, i); } template void update(Elt elt, long i, long j) const { arg.template update(elt, j, i); } #ifdef USE_XSIMD using simd_iterator = const_simd_nditerator; using simd_iterator_nobroadcast = simd_iterator; template simd_iterator vbegin(vectorizer) const; template simd_iterator vend(vectorizer) const; #endif /* element filtering */ template // indexing through an array of boolean -- a mask typename std::enable_if< is_numexpr_arg::value && std::is_same::value && F::value == 1 && !is_pod_array::value, numpy_vexpr>>>::type fast(F const &filter) const; template // indexing through an array of boolean -- a mask typename std::enable_if::value && std::is_same::value && F::value != 1 && !is_pod_array::value, numpy_vexpr>, ndarray>>>::type fast(F const &filter) const; template // indexing through an array of indices -- a view typename std::enable_if< is_numexpr_arg::value && !std::is_same::value && !is_pod_array::value, numpy_vexpr>>>::type fast(F const &filter) const; template // indexing through an array of boolean -- a mask typename std::enable_if< is_numexpr_arg::value && std::is_same::value && F::value == 1 && !is_pod_array::value, numpy_vexpr>>>::type operator[](F const &filter) const; template // indexing through an array of boolean -- a mask typename std::enable_if::value && std::is_same::value && F::value != 1 && !is_pod_array::value, numpy_vexpr>, ndarray>>>::type operator[](F const &filter) const; template // indexing through an array of indices -- a view typename std::enable_if< is_numexpr_arg::value && !std::is_same::value && !is_pod_array::value, numpy_vexpr>>>::type operator[](F const &filter) const; auto operator[](long i) const -> decltype(this->fast(i)); auto operator[](long i) -> decltype(this->fast(i)); template auto operator[](array const &indices) -> decltype(arg[array{{indices[1], indices[0]}}]) { return arg[array{{indices[1], indices[0]}}]; } template auto operator[](array const &indices) const -> decltype(arg[array{{indices[1], indices[0]}}]) { return arg[array{{indices[1], indices[0]}}]; } template auto operator[](std::tuple const &indices) -> decltype( arg[std::tuple{std::get<1>(indices), std::get<0>(indices)}]) { return arg[std::tuple{std::get<1>(indices), std::get<0>(indices)}]; } template auto operator[](std::tuple const &indices) const -> decltype( arg[std::tuple{std::get<1>(indices), std::get<0>(indices)}]) { return arg[std::tuple{std::get<1>(indices), std::get<0>(indices)}]; } template auto operator[](S const &s0) const -> numpy_texpr< decltype(this->arg(fast_contiguous_slice(pythonic::builtins::None, pythonic::builtins::None), (s0.step, s0)))>; template auto operator[](S const &s0) -> numpy_texpr< decltype(this->arg(fast_contiguous_slice(pythonic::builtins::None, pythonic::builtins::None), (s0.step, s0)))>; template auto _reverse_index(S const &indices, utils::index_sequence) const -> decltype( numpy::functor::transpose{}(this->arg(std::get(indices)...))) { return numpy::functor::transpose{}(arg(std::get(indices)...)); } ndarray copy() const { return *this; } template auto operator()(S0 const &s0, S const &... s) const -> typename std::enable_if< !is_numexpr_arg::value, decltype(this->_reverse_index( std::tuple{s0, s...}, utils::make_reversed_index_sequence<1 + sizeof...(S)>()))>::type; template auto operator()(S0 const &s0, S const &... s) const -> typename std::enable_if::value, decltype(this->copy()(s0, s...))>::type; explicit operator bool() const; long flat_size() const; intptr_t id() const; template numpy_texpr_2 &operator=(Expr const &expr); template numpy_texpr_2 &operator=(numpy_texpr const &expr); template numpy_texpr_2 &update_(Expr const &expr); template numpy_texpr_2 &operator+=(Expr const &expr); template numpy_texpr_2 &operator-=(Expr const &expr); template numpy_texpr_2 &operator*=(Expr const &expr); template numpy_texpr_2 &operator/=(Expr const &expr); template numpy_texpr_2 &operator&=(Expr const &expr); template numpy_texpr_2 &operator|=(Expr const &expr); template numpy_texpr_2 &operator^=(Expr const &expr); template ndarray reshape(NewShape const &shape) const { return copy().reshape(shape); } }; // only implemented for N = 2 template struct numpy_texpr>> : numpy_texpr_2>> { numpy_texpr() = default; numpy_texpr(numpy_texpr const &) = default; numpy_texpr(numpy_texpr &&) = default; numpy_texpr(ndarray> const &arg); numpy_texpr &operator=(numpy_texpr const &) = default; using numpy_texpr_2>>::operator=; }; template struct numpy_texpr>> : numpy_texpr_2>> { numpy_texpr() = default; numpy_texpr(numpy_texpr const &) = default; numpy_texpr(numpy_texpr &&) = default; numpy_texpr(ndarray> const &arg); numpy_texpr &operator=(numpy_texpr const &) = default; using numpy_texpr_2>>::operator=; }; template struct numpy_texpr> : numpy_texpr_2> { numpy_texpr() = default; numpy_texpr(numpy_texpr const &) = default; numpy_texpr(numpy_texpr &&) = default; numpy_texpr(numpy_gexpr const &arg); template numpy_texpr(numpy_texpr const &other) : numpy_texpr(numpy_gexpr(other.arg)) { } numpy_texpr &operator=(numpy_texpr const &) = default; using numpy_texpr_2>::operator=; }; } template struct assignable_noescape> { using type = types::numpy_texpr; }; template struct assignable> { using type = types::numpy_texpr::type>; }; template struct returnable> { using type = types::numpy_texpr::type>; }; template struct lazy> { using type = types::numpy_texpr::type>; }; PYTHONIC_NS_END /* type inference stuff {*/ #include "pythonic/include/types/combined.hpp" template struct __combined, pythonic::types::numpy_texpr> { using type = pythonic::types::numpy_texpr; }; template struct __combined, pythonic::types::numpy_texpr> { using type = pythonic::types::numpy_texpr::type>; }; template struct __combined, K> { using type = pythonic::types::numpy_texpr; }; template struct __combined, pythonic::types::numpy_gexpr> { using type = pythonic::types::numpy_texpr; }; template struct __combined, pythonic::types::none> { using type = pythonic::types::none< typename __combined, O>::type>; }; template struct __combined, pythonic::types::numpy_texpr> { using type = pythonic::types::none< typename __combined>::type>; }; template struct __combined, pythonic::types::none_type> { using type = pythonic::types::none>; }; template struct __combined> { using type = pythonic::types::none>; }; /*}*/ #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/numpy_unary_op.hpp000066400000000000000000000006341416264035500256600ustar00rootroot00000000000000#ifndef NUMPY_UNARY_FUNC_NAME #error NUMPY_UNARY_FUNC_NAME undefined #endif #ifndef NUMPY_UNARY_FUNC_SYM #error NUMPY_UNARY_FUNC_SYM undefined #endif template typename std::enable_if< types::valid_numop_parameters::type>::value, types::numpy_expr>::type NUMPY_UNARY_FUNC_NAME(E &&self); #undef NUMPY_UNARY_FUNC_NAME #undef NUMPY_UNARY_FUNC_SYM pythran-0.10.0+ds2/pythran/pythonic/include/types/numpy_vexpr.hpp000066400000000000000000000135771416264035500252020ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_NUMPY_VEXPR_HPP #define PYTHONIC_INCLUDE_TYPES_NUMPY_VEXPR_HPP #include "pythonic/include/types/nditerator.hpp" PYTHONIC_NS_BEGIN namespace types { template struct numpy_vexpr { static constexpr size_t value = T::value; static const bool is_vectorizable = false; using dtype = typename dtype_of::type; using value_type = T; static constexpr bool is_strided = T::is_strided; using iterator = nditerator; using const_iterator = const_nditerator; T data_; F view_; numpy_vexpr() = default; numpy_vexpr(T const &data, F const &view) : data_(data), view_(view) { } long flat_size() const { return sutils::prod_tail(data_) * view_.template shape<0>(); } long size() const { return view_.size(); } template typename std::enable_if::value, numpy_vexpr &>::type operator=(E const &); template typename std::enable_if::value, numpy_vexpr &>::type operator=(E const &expr); numpy_vexpr &operator=(numpy_vexpr const &); using shape_t = array; template long shape() const { if (I == 0) return view_.template shape<0>(); else return data_.template shape(); } iterator begin(); iterator end(); const_iterator begin() const; const_iterator end() const; #ifdef USE_XSIMD using simd_iterator = const_simd_nditerator; using simd_iterator_nobroadcast = simd_iterator; template simd_iterator vbegin(vectorizer) const; template simd_iterator vend(vectorizer) const; #endif template dtype load(long i, Indices... indices) const { return data_.load(view_.fast(i), indices...); } template void store(Elt elt, long i, Indices... indices) const { data_.store(elt, view_.fast(i), indices...); } template void update(Elt elt, long i, Indices... indices) const { data_.template update(elt, view_.fast(i), indices...); } auto fast(long i) -> decltype(data_.fast(i)) { return data_.fast(view_.fast(i)); } auto fast(long i) const -> decltype(data_.fast(i)) { return data_.fast(view_.fast(i)); } template auto operator()(S const &... slices) const -> decltype(ndarray>{*this}(slices...)); auto operator[](long i) const -> decltype(data_[i]) { return data_.fast(view_[i]); } template typename std::enable_if< is_slice::value, numpy_gexpr().normalize(1))>> operator[](S s) const { return {*this, s.normalize(size())}; } /* element filtering */ template // indexing through an array of boolean -- a mask typename std::enable_if< is_numexpr_arg::value && std::is_same::value && !is_pod_array::value, numpy_vexpr>>>::type fast(E const &filter) const; template // indexing through an array of boolean -- a mask typename std::enable_if< !is_slice::value && is_numexpr_arg::value && std::is_same::value && !is_pod_array::value, numpy_vexpr>>>::type operator[](E const &filter) const; template // indexing through an array of indices -- a view typename std::enable_if::value && !is_array_index::value && !std::is_same::value && !is_pod_array::value, numpy_vexpr>::type operator[](E const &filter) const; template // indexing through an array of indices -- a view typename std::enable_if::value && !is_array_index::value && !std::is_same::value && !is_pod_array::value, numpy_vexpr>::type fast(E const &filter) const; template numpy_vexpr &update_(Expr const &expr); template numpy_vexpr &operator+=(E const &expr); template numpy_vexpr &operator-=(E const &expr); template numpy_vexpr &operator*=(E const &expr); template numpy_vexpr &operator/=(E const &expr); template numpy_vexpr &operator&=(E const &expr); template numpy_vexpr &operator|=(E const &expr); template numpy_vexpr &operator^=(E const &expr); }; } template struct assignable> { using type = types::ndarray::type, typename types::numpy_vexpr::shape_t>; }; template struct lazy> { using type = types::numpy_vexpr::type, typename lazy::type>; }; PYTHONIC_NS_END /* combined are sorted such that the assigned type comes first */ template struct __combined, pythonic::types::ndarray> { using type = pythonic::types::ndarray; }; template struct __combined, pythonic::types::numpy_vexpr> { using type = pythonic::types::ndarray; }; #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/pointer.hpp000066400000000000000000000025341416264035500242550ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_POINTER_HPP #define PYTHONIC_INCLUDE_TYPES_POINTER_HPP PYTHONIC_NS_BEGIN namespace types { template struct pointer { T *data; using reference = T &; using const_reference = T const &; using value_type = T; reference operator[](long); value_type operator[](long) const; reference fast(long); value_type fast(long) const; }; } PYTHONIC_NS_END namespace std { template typename pythonic::types::pointer::reference get(pythonic::types::pointer &t); template typename pythonic::types::pointer::value_type get(pythonic::types::pointer const &t); template typename pythonic::types::pointer::value_type get(pythonic::types::pointer &&t); template struct tuple_element> { typedef typename pythonic::types::pointer::value_type type; }; } #ifdef ENABLE_PYTHON_MODULE #include "pythonic/python/core.hpp" PYTHONIC_NS_BEGIN template struct to_python> { static PyObject *convert(types::pointer const &v); }; template struct from_python> { static bool is_convertible(PyObject *obj); static types::pointer convert(PyObject *obj); }; PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/raw_array.hpp000066400000000000000000000012171416264035500245610ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_RAW_ARRAY_HPP #define PYTHONIC_INCLUDE_TYPES_RAW_ARRAY_HPP PYTHONIC_NS_BEGIN namespace types { enum class ownership { external, owned, }; /* Wrapper class to store an array pointer * * for internal use only, meant to be stored in a shared_ptr */ template class raw_array { raw_array(raw_array const &) = delete; public: using pointer_type = T *; T *data; raw_array(); raw_array(size_t n); raw_array(T *d, ownership o); raw_array(raw_array &&d); void forget(); ~raw_array(); private: bool external; }; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/set.hpp000066400000000000000000000216171416264035500233730ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_SET_HPP #define PYTHONIC_INCLUDE_TYPES_SET_HPP #include "pythonic/include/types/assignable.hpp" #include "pythonic/include/types/empty_iterator.hpp" #include "pythonic/include/types/list.hpp" #include "pythonic/include/utils/iterator.hpp" #include "pythonic/include/utils/reserve.hpp" #include "pythonic/include/utils/shared_ref.hpp" #include "pythonic/include/builtins/in.hpp" #include #include #include #include #include #include PYTHONIC_NS_BEGIN namespace types { struct empty_set; template class set; } PYTHONIC_NS_END /* type inference stuff {*/ #include "pythonic/include/types/combined.hpp" template struct __combined, pythonic::types::set> { using type = pythonic::types::set::type>; }; template struct __combined> { using type = pythonic::types::set; }; template struct __combined, pythonic::types::empty_set> { using type = pythonic::types::set; }; template struct __combined, container> { using type = pythonic::types::set::type>; }; template struct __combined, pythonic::types::set> { using type = pythonic::types::set::type>; }; template struct __combined, pythonic::types::list> { using type = pythonic::types::set::type>; }; template struct __combined, pythonic::types::empty_set> { using type = pythonic::types::set; }; template struct __combined> { using type = pythonic::types::set; }; template struct __combined, pythonic::types::empty_set> { using type = indexable; }; template struct __combined> { using type = indexable; }; template struct __combined> { using type = pythonic::types::set; }; template struct __combined, pythonic::types::set> { using type = pythonic::types::set; }; template struct __combined, indexable> { using type = pythonic::types::set; }; template struct __combined, pythonic::types::set> { using type = pythonic::types::set() + std::declval())>; }; template struct __combined, indexable_container> { using type = pythonic::types::set() + std::declval())>; }; template struct __combined, pythonic::types::set> { using type = pythonic::types::set::type>; }; /* } */ PYTHONIC_NS_BEGIN namespace types { template class set { // data holder using _type = typename std::remove_cv::type>::type; using container_type = std::set<_type>; utils::shared_ref data; public: template friend class set; // types using reference = typename container_type::reference; using const_reference = typename container_type::const_reference; using iterator = utils::comparable_iterator; using const_iterator = utils::comparable_iterator; using size_type = typename container_type::size_type; using difference_type = typename container_type::difference_type; using value_type = typename container_type::value_type; using allocator_type = typename container_type::allocator_type; using pointer = typename container_type::pointer; using const_pointer = typename container_type::const_pointer; using reverse_iterator = typename container_type::reverse_iterator; using const_reverse_iterator = typename container_type::const_reverse_iterator; // constructors set(); template set(InputIterator start, InputIterator stop); set(empty_set const &); set(T const &value, single_value); set(std::initializer_list l); set(set const &other); template set(set const &other); // iterators iterator begin(); const_iterator begin() const; iterator end(); const_iterator end() const; reverse_iterator rbegin(); const_reverse_iterator rbegin() const; reverse_iterator rend(); const_reverse_iterator rend() const; // modifiers T pop(); void add(const T &x); void push_back(const T &x); void clear(); template void discard(U const &elem); template void remove(U const &elem); // set interface operator bool() const; long size() const; // Misc set copy() const; template bool isdisjoint(U const &other) const; template bool issubset(U const &other) const; template bool issuperset(U const &other) const; set union_() const; template typename __combined, U, Types...>::type union_(U &&other, Types &&... others) const; template none_type update(Types &&... others); set intersection() const; template typename __combined, U, Types...>::type intersection(U const &other, Types const &... others) const; template void intersection_update(Types const &... others); set difference() const; template set difference(U const &other, Types const &... others) const; template bool contains(V const &v) const; template void difference_update(Types const &... others); template set::type> symmetric_difference(set const &other) const; template typename __combined>::type symmetric_difference(U const &other) const; template void symmetric_difference_update(U const &other); // Operators template bool operator==(set const &other) const; template bool operator<=(set const &other) const; template bool operator<(set const &other) const; template bool operator>=(set const &other) const; template bool operator>(set const &other) const; template set::type> operator|(set const &other) const; template void operator|=(set const &other); template set::type> operator&(set const &other) const; template void operator&=(set const &other); template set operator-(set const &other) const; template void operator-=(set const &other); template set::type> operator^(set const &other) const; template void operator^=(set const &other); intptr_t id() const; template friend std::ostream &operator<<(std::ostream &os, set const &v); }; struct empty_set { using value_type = void; using iterator = empty_iterator; using const_iterator = empty_iterator; empty_set operator|(empty_set const &); template set operator|(set const &s); template U operator&(U const &s); template U operator-(U const &s); empty_set operator^(empty_set const &); template set operator^(set const &s); template none_type update(Types &&...); operator bool(); iterator begin() const; iterator end() const; template bool contains(V const &) const; constexpr long size() const { return 0; } }; } template struct assignable> { using type = types::set::type>; }; PYTHONIC_NS_END #ifdef ENABLE_PYTHON_MODULE #include "pythonic/python/core.hpp" PYTHONIC_NS_BEGIN template struct to_python> { static PyObject *convert(types::set const &v); }; template <> struct to_python { static PyObject *convert(types::empty_set); }; template struct from_python> { static bool is_convertible(PyObject *obj); static types::set convert(PyObject *obj); }; PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/slice.hpp000066400000000000000000000170111416264035500236700ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_SLICE_HPP #define PYTHONIC_INCLUDE_TYPES_SLICE_HPP #include "pythonic/include/types/NoneType.hpp" #include "pythonic/include/types/attr.hpp" #include #include PYTHONIC_NS_BEGIN namespace types { template class bound { T value_; // use a sentinel to store none, it takes less space static constexpr long sentinel = std::numeric_limits::min(); public: bound() = default; bound(none_type) : value_(sentinel) { } bound(none v) : value_(v.is_none ? sentinel : (T)v) { } bound(T v) : value_(v) { } operator T() const { return value_; } operator none() const { if (value_ == sentinel) return none_type(); else return value_; } bool is_none() const { return value_ == sentinel; } }; struct slice; struct contiguous_slice; struct fast_contiguous_slice; struct contiguous_normalized_slice; struct normalized_slice { long lower, upper, step; normalized_slice(); normalized_slice(long lower, long upper, long step = 1); normalized_slice operator*(normalized_slice const &other) const; normalized_slice operator*(contiguous_normalized_slice const &other) const; normalized_slice operator*(slice const &other) const; normalized_slice operator*(contiguous_slice const &other) const; normalized_slice operator*(fast_contiguous_slice const &other) const; long size() const; inline long get(long i) const; }; struct slice { using normalized_type = normalized_slice; bound lower, upper, step; slice(none lower, none upper, none step); slice(); slice operator*(slice const &other) const; slice operator*(contiguous_slice const &other) const; slice operator*(fast_contiguous_slice const &other) const; /* Normalize change a[:-1] to a[:len(a)-1] to have positif index. It also check for value bigger than len(a) to fit the size of the container */ normalized_slice normalize(long max_size) const; /* * An assert is raised when we can't compute the size without more * informations. */ long size() const; long get(long i) const; }; struct contiguous_normalized_slice { long lower, upper; static constexpr long step = 1; contiguous_normalized_slice(); contiguous_normalized_slice(long lower, long upper); contiguous_normalized_slice operator*(contiguous_normalized_slice const &other) const; contiguous_normalized_slice operator*(contiguous_slice const &other) const; contiguous_normalized_slice operator*(fast_contiguous_slice const &other) const; normalized_slice operator*(normalized_slice const &other) const; normalized_slice operator*(slice const &other) const; long size() const; inline long get(long i) const; }; struct contiguous_slice { using normalized_type = contiguous_normalized_slice; long lower; bound upper; static constexpr long step = 1; contiguous_slice(none lower, none upper); contiguous_slice() = default; contiguous_slice operator*(contiguous_slice const &other) const; contiguous_slice operator*(fast_contiguous_slice const &other) const; slice operator*(slice const &other) const; /* Normalize change a[:-1] to a[:len(a)-1] to have positif index. It also check for value bigger than len(a) to fit the size of the container */ contiguous_normalized_slice normalize(long max_size) const; long size() const; inline long get(long i) const; }; struct fast_contiguous_slice { using normalized_type = contiguous_normalized_slice; long lower; bound upper; static constexpr long step = 1; fast_contiguous_slice(none lower, none upper); fast_contiguous_slice() = default; fast_contiguous_slice operator*(fast_contiguous_slice const &other) const; contiguous_slice operator*(contiguous_slice const &other) const; slice operator*(slice const &other) const; contiguous_normalized_slice normalize(long max_size) const; long size() const; }; template struct normalized { using type = T; }; template <> struct normalized { using type = normalized_slice; }; template <> struct normalized { using type = contiguous_normalized_slice; }; template <> struct normalized { using type = contiguous_normalized_slice; }; template struct is_slice : std::false_type { }; template <> struct is_slice : std::true_type { }; template <> struct is_slice : std::true_type { }; template <> struct is_slice : std::true_type { }; template using normalize_t = typename normalized::type; template typename std::enable_if::value, S>::type normalize(S s, long n) { if (s < 0) s += n; return s; } inline none_type normalize(none_type s, long n) { return {}; } template auto normalize(S s, long n) -> decltype(s.normalize(n)) { return s.normalize(n); } template none_type adapt_slice(none_type, I0 const &, I1 const &) { return {}; } template long adapt_slice(long l, I0 const &index0, I1 const &index1) { if ((long)index0 != (long)index1) return 0; else return l; } template slice adapt_slice(slice const &s, I0 const &index0, I1 const &index1) { if ((long)index0 != (long)index1) return {0, 1, 1}; else return s; } template contiguous_slice adapt_slice(S const &s, I0 const &index0, I1 const &index1) { if ((long)index0 != (long)index1) return {0, 1}; else return s; } template fast_contiguous_slice adapt_slice(fast_contiguous_slice const &s, I0 const &index0, I1 const &index1) { if ((long)index0 != (long)index1) return {0, 1}; else return s; } template typename std::enable_if::value, std::ostream &>::type operator<<(std::ostream &os, S const &s); } namespace builtins { template auto getattr(types::attr::START, T const &s) -> decltype(s.lower) { return s.lower; } template auto getattr(types::attr::STOP, T const &s) -> decltype(s.upper) { return s.upper; } template auto getattr(types::attr::STEP, T const &s) -> decltype(s.step) { return s.step; } } PYTHONIC_NS_END #ifdef ENABLE_PYTHON_MODULE #include "pythonic/python/core.hpp" PYTHONIC_NS_BEGIN template struct to_python> { static PyObject *convert(types::bound const &n); }; template <> struct to_python { static PyObject *convert(types::contiguous_slice const &n); }; template <> struct to_python { static PyObject *convert(types::contiguous_normalized_slice const &n); }; template <> struct to_python { static PyObject *convert(types::slice const &n); }; template <> struct to_python { static PyObject *convert(types::normalized_slice const &n); }; template <> struct from_python { static bool is_convertible(PyObject *obj); static types::slice convert(PyObject *obj); }; PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/static_if.hpp000066400000000000000000000204501416264035500245370ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_STATIC_IF_HPP #define PYTHONIC_INCLUDE_TYPES_STATIC_IF_HPP #include PYTHONIC_NS_BEGIN namespace types { template struct StaticIfReturn { T arg; }; template struct StaticIfNoReturn { T arg; StaticIfNoReturn() = default; StaticIfNoReturn(T const &arg) : arg(arg) { } long get(std::integral_constant) { return 0; } StaticIfNoReturn &get(std::integral_constant) { return *this; } T &get(std::integral_constant) { return arg; } template operator OT() const { return OT(); } }; template struct StaticIfBreak { T arg; StaticIfBreak() = default; StaticIfBreak(T const &arg) : arg(arg) { } long get(std::integral_constant) { return 1; } T &get(std::integral_constant) { return arg; } T &get(std::integral_constant) { return arg; } template operator OT() const { return OT(); } }; template struct StaticIfCont { T arg; StaticIfCont() = default; StaticIfCont(T const &arg) : arg(arg) { } long get(std::integral_constant) { return 2; } StaticIfCont &get(std::integral_constant) { return *this; } T &get(std::integral_constant) { return arg; } template operator OT() const { return OT(); } }; template struct StaticIfReturnHolder { std::tuple args; StaticIfReturnHolder() : args(0, T0(), T1()) { } StaticIfReturnHolder(StaticIfReturnHolder const &) = default; template StaticIfReturnHolder(StaticIfReturnHolder const &other) : args(other.args) { } template StaticIfReturnHolder(StaticIfReturn const &arg) : args(1, arg.arg, T1()) { } StaticIfReturnHolder(StaticIfNoReturn const &arg) : args(0, T0(), arg.arg) { } StaticIfReturnHolder(StaticIfBreak const &arg) : args(2, T0(), arg.arg) { } StaticIfReturnHolder(StaticIfCont const &arg) : args(3, T0(), arg.arg) { } }; } PYTHONIC_NS_END namespace std { template struct tuple_element> { using type = typename std::conditional< I == 0, bool, typename std::conditional::type>::type; }; template auto get(pythonic::types::StaticIfReturnHolder &t) -> decltype(std::get(t.args)) { return std::get(t.args); } template struct tuple_element> { using type = decltype(std::declval>().get( std::integral_constant{})); }; template auto get(pythonic::types::StaticIfNoReturn &t) -> decltype(t.get(std::integral_constant{})) { return t.get(std::integral_constant{}); } template struct tuple_element> { using type = decltype(std::declval>().get( std::integral_constant{})); }; template auto get(pythonic::types::StaticIfBreak &t) -> decltype(t.get(std::integral_constant{})) { return t.get(std::integral_constant{}); } template struct tuple_element> { using type = decltype(std::declval>().get( std::integral_constant{})); }; template auto get(pythonic::types::StaticIfCont &t) -> decltype(t.get(std::integral_constant{})) { return t.get(std::integral_constant{}); } } /* type inference stuff { */ #include "pythonic/include/types/combined.hpp" template struct __combined, pythonic::types::StaticIfNoReturn> { using type = pythonic::types::StaticIfReturnHolder; }; template struct __combined, pythonic::types::StaticIfBreak> { using type = pythonic::types::StaticIfReturnHolder; }; template struct __combined, pythonic::types::StaticIfCont> { using type = pythonic::types::StaticIfReturnHolder; }; template struct __combined, pythonic::types::StaticIfReturn> { using type = pythonic::types::StaticIfReturnHolder; }; template struct __combined &, pythonic::types::none_type> { using type = pythonic::types::none_type; }; template struct __combined, pythonic::types::none_type> { using type = pythonic::types::none_type; }; template struct __combined> { using type = pythonic::types::none_type; }; template struct __combined, pythonic::types::StaticIfReturn> { using type = pythonic::types::StaticIfReturnHolder; }; template struct __combined, pythonic::types::StaticIfReturn> { using type = pythonic::types::StaticIfReturnHolder; }; template struct __combined, pythonic::types::StaticIfReturnHolder> { using type = pythonic::types::StaticIfReturnHolder::type, typename __combined::type>; }; template struct __combined, pythonic::types::StaticIfCont> { using type = pythonic::types::StaticIfReturnHolder::type, T1>; }; template struct __combined, pythonic::types::StaticIfBreak> { using type = pythonic::types::StaticIfReturnHolder::type, T1>; }; template struct __combined, pythonic::types::StaticIfReturn> { using type = pythonic::types::StaticIfReturnHolder::type, T1>; }; template struct __combined, pythonic::types::StaticIfReturnHolder> { using type = pythonic::types::StaticIfReturnHolder::type, T1>; }; template struct __combined, pythonic::types::StaticIfReturnHolder> { using type = pythonic::types::StaticIfReturnHolder::type, T1>; }; template struct __combined, pythonic::types::StaticIfReturnHolder> { using type = pythonic::types::StaticIfReturnHolder::type, T1>; }; template struct __combined> { using type = T0; }; template struct __combined, T0> { using type = T0; }; /* } */ #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/str.hpp000066400000000000000000000262451416264035500234120ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_STR_HPP #define PYTHONIC_INCLUDE_TYPES_STR_HPP #include "pythonic/include/types/slice.hpp" #include "pythonic/include/types/tuple.hpp" #include "pythonic/include/types/assignable.hpp" #include "pythonic/include/utils/shared_ref.hpp" #include "pythonic/include/utils/functor.hpp" #include "pythonic/include/utils/int_.hpp" #include #include #include #include #include PYTHONIC_NS_BEGIN namespace types { class str; struct const_sliced_str_iterator; struct chr { char c; chr() = default; chr(char c) : c(c) { } bool operator==(chr other) const { return c == other.c; } long size() const { return 1; } operator long() const { return c - '0'; } operator str() const; char const *begin() const { return &c; } char const *end() const { return 1 + &c; } std::array chars() const { return {{c}}; } }; template class sliced_str { using container_type = std::string; utils::shared_ref data; typename S::normalized_type slicing; public: // types using reference = container_type::reference; using const_reference = container_type::const_reference; using iterator = const_sliced_str_iterator; using const_iterator = const_sliced_str_iterator; using size_type = container_type::size_type; using difference_type = container_type::difference_type; using value_type = container_type::value_type; using allocator_type = container_type::allocator_type; using pointer = container_type::pointer; using const_pointer = container_type::const_pointer; // constructor sliced_str(); sliced_str(sliced_str const &s); sliced_str(sliced_str const &s, typename S::normalized_type const &sl); sliced_str(types::str const &other, typename S::normalized_type const &s); // const getter container_type const &get_data() const; typename S::normalized_type const &get_slice() const; // assignment sliced_str &operator=(str const &); sliced_str &operator=(sliced_str const &); str operator+(sliced_str const &); // iterators const_iterator begin() const; const_iterator end() const; // size long size() const; // accessor chr operator[](long i) const; chr fast(long i) const; template typename std::enable_if::value, sliced_str>::type operator[](Sp const &s) const; // conversion operator long() const; explicit operator bool() const; bool operator!() const; size_t find(str const &s, size_t pos = 0) const; bool contains(str const &v) const; bool operator==(str const &v) const; // io template friend std::ostream &operator<<(std::ostream &os, types::sliced_str const &v); }; struct string_iterator; class str { template friend class sliced_str; using container_type = std::string; utils::shared_ref data; public: static const size_t npos = -1 /*std::string::npos*/; static constexpr bool is_vectorizable = false; using value_type = str; // in Python, a string contains... strings using iterator = string_iterator; using reverse_iterator = std::reverse_iterator; using const_reverse_iterator = std::reverse_iterator; str(); str(std::string const &s); str(std::string &&s); explicit str(char c); str(const char *s); template str(const char(&s)[N]); str(const char *s, size_t n); template str(sliced_str const &other); template str(T const &begin, T const &end); template explicit str(T const &); explicit operator char() const; explicit operator long int() const; explicit operator float() const; explicit operator double() const; template str &operator=(sliced_str const &other); types::str &operator+=(types::str const &s); types::str &operator+=(types::chr const &s); long size() const; iterator begin() const; reverse_iterator rbegin() const; iterator end() const; reverse_iterator rend() const; auto c_str() const -> decltype(data->c_str()); container_type &chars() { return *data; } container_type const &chars() const { return *data; } auto resize(long n) -> decltype(data->resize(n)); long find(str const &s, size_t pos = 0) const; bool contains(str const &v) const; long find_first_of(str const &s, size_t pos = 0) const; long find_first_of(const char *s, size_t pos = 0) const; long find_first_not_of(str const &s, size_t pos = 0) const; long find_last_not_of(str const &s, size_t pos = npos) const; str substr(size_t pos = 0, size_t len = npos) const; bool empty() const; int compare(size_t pos, size_t len, str const &str) const; void reserve(size_t n); str &replace(size_t pos, size_t len, str const &str); template str &operator+=(sliced_str const &other); bool operator==(str const &other) const; bool operator!=(str const &other) const; bool operator<=(str const &other) const; bool operator<(str const &other) const; bool operator>=(str const &other) const; bool operator>(str const &other) const; template bool operator==(sliced_str const &other) const; bool operator==(chr other) const; template typename std::enable_if::value, sliced_str>::type operator()(S const &s) const; chr operator[](long i) const; chr fast(long i) const; template typename std::enable_if::value, sliced_str>::type operator[](S const &s) const; explicit operator bool() const; long count(types::str const &sub) const; intptr_t id() const { return reinterpret_cast(&(*data)); } }; struct string_iterator : std::iterator { std::string::const_iterator curr; string_iterator(std::string::const_iterator iter) : curr(iter) { } chr operator*() const { return chr(*curr); } string_iterator &operator++() { ++curr; return *this; } string_iterator &operator+=(std::size_t n) { curr += n; return *this; } string_iterator operator+(std::size_t n) { return {curr + n}; } string_iterator &operator--() { --curr; return *this; } string_iterator &operator-=(std::size_t n) { curr -= n; return *this; } string_iterator operator-(std::size_t n) { return {curr - n}; } bool operator==(string_iterator const &other) const { return curr == other.curr; } bool operator!=(string_iterator const &other) const { return curr != other.curr; } std::ptrdiff_t operator-(string_iterator const &other) const { return curr - other.curr; } }; struct const_sliced_str_iterator : std::iterator { const char *data; long step; const_sliced_str_iterator(char const *data, long step); const_sliced_str_iterator operator++(); bool operator<(const_sliced_str_iterator const &other) const; bool operator==(const_sliced_str_iterator const &other) const; bool operator!=(const_sliced_str_iterator const &other) const; chr operator*() const; const_sliced_str_iterator operator-(long n) const; long operator-(const_sliced_str_iterator const &other) const; }; size_t hash_value(str const &x); str operator+(str const &self, str const &other); str operator+(chr const &self, chr const &other); str operator+(chr const &self, str const &other); str operator+(chr const &self, str const &other); template str operator+(str const &self, char const(&other)[N]); template str operator+(chr const &self, char const(&other)[N]); template str operator+(char const(&self)[N], str const &other); template str operator+(char const(&self)[N], chr const &other); template bool operator==(char const(&self)[N], str const &other); bool operator==(chr self, str const &other); std::ostream &operator<<(std::ostream &os, chr const &s); std::ostream &operator<<(std::ostream &os, str const &s); } namespace operator_ { template auto mod(const char(&fmt)[N], Arg &&arg) -> decltype(pythonic::types::str(fmt) % std::forward(arg)); pythonic::types::str add(char const *self, char const *other); pythonic::types::str mul(char const *self, long other); pythonic::types::str mul(long self, char const *other); } template <> struct assignable { using type = types::str; }; template <> struct assignable { using type = types::str; }; template <> struct assignable { using type = types::str; }; template struct assignable { using type = types::str; }; template struct assignable { using type = types::str; }; PYTHONIC_NS_END pythonic::types::str operator*(pythonic::types::str const &s, long n); pythonic::types::str operator*(long t, pythonic::types::str const &s); pythonic::types::str operator*(pythonic::types::chr const &s, long n); pythonic::types::str operator*(long t, pythonic::types::chr const &s); namespace std { template <> struct hash { size_t operator()(const pythonic::types::str &x) const; }; template <> struct hash { size_t operator()(const pythonic::types::chr &x) const; }; /* std::get overload */ template pythonic::types::str get(pythonic::types::str const &t); template struct tuple_element { using type = pythonic::types::str; }; template struct tuple_element> { using type = pythonic::types::str; }; } /* type inference stuff {*/ #include "pythonic/include/types/combined.hpp" template <> struct __combined { using type = pythonic::types::str; }; template <> struct __combined { using type = pythonic::types::str; }; template struct __combined { using type = pythonic::types::str; }; /* } */ #ifdef ENABLE_PYTHON_MODULE #include "pythonic/python/core.hpp" PYTHONIC_NS_BEGIN template <> struct to_python { static PyObject *convert(types::str const &v); }; template <> struct to_python { static PyObject *convert(types::chr const &v); }; template struct to_python> { static PyObject *convert(types::sliced_str const &v); }; template <> struct from_python { static bool is_convertible(PyObject *obj); static types::str convert(PyObject *obj); }; PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/traits.hpp000066400000000000000000000070331416264035500241020ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_TRAITS_HPP #define PYTHONIC_INCLUDE_TYPES_TRAITS_HPP #include PYTHONIC_NS_BEGIN namespace types { template struct is_complex : std::false_type { using type = T; }; template struct is_complex> : std::true_type { using type = T; }; template struct is_dtype { static constexpr bool value = std::is_scalar::value || is_complex::value; }; #define MEMBER_TYPE_TRAIT(check_struct, member) \ template \ struct check_struct { \ using yes = char; \ using no = struct { \ char _[2]; \ }; \ template \ static yes _test(typename C::member *); \ template \ static no _test(...); \ static const bool value = \ sizeof(_test::type>(nullptr)) == \ sizeof(yes); \ }; #define MEMBER_ATTR_TRAIT(check_struct, member) \ template \ struct check_struct { \ template \ static std::integral_constant _test(decltype(&C::member)); \ template \ static std::integral_constant _test(...); \ static const bool value = decltype( \ _test::type>(nullptr))::value; \ }; /* trait to check if a type is iterable*/ MEMBER_TYPE_TRAIT(is_iterable, iterator); /* trait to check if a type is callable */ MEMBER_TYPE_TRAIT(is_callable, callable); /* trait to check if a type is pure */ MEMBER_TYPE_TRAIT(is_pure, pure); /* trait to check if the type has a size member */ MEMBER_ATTR_TRAIT(has_size, size); /* trait to check if a type has a fast iterator */ MEMBER_TYPE_TRAIT(has_fast_iterator, const_fast_iterator); /* trait to check if a type has a fast vectorizable type field */ MEMBER_ATTR_TRAIT(has_vectorizable, is_vectorizable); /* trait to check if the type has a contains member */ template struct has_contains { template static auto _test(C *t) -> decltype(t->contains(std::declval()), std::integral_constant()); static std::integral_constant _test(...); static const bool value = decltype(_test((T *)nullptr))::value; }; /* trait to check if the type has a shape member */ MEMBER_ATTR_TRAIT(has_shape, shape); /* trait to check if the type has a static size */ template struct len_of { static long constexpr value = -1; }; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/tuple.hpp000066400000000000000000001422141416264035500237260ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_TUPLE_HPP #define PYTHONIC_INCLUDE_TYPES_TUPLE_HPP #include "pythonic/include/types/assignable.hpp" #include "pythonic/include/types/traits.hpp" #include "pythonic/include/types/nditerator.hpp" #include "pythonic/include/utils/int_.hpp" #include "pythonic/include/utils/seq.hpp" #include "pythonic/include/utils/nested_container.hpp" #include #include #if !defined(HAVE_SSIZE_T) || !HAVE_SSIZE_T #if defined(_MSC_VER) #include typedef SSIZE_T ssize_t; #endif #endif // Equality comparison between pair && tuple namespace std { template bool operator==(pair const &self, tuple const &other); template bool operator==(pair const &self, tuple const &other); } // Tuple concatenation with operator+ template std::tuple operator+(std::tuple const &t0, std::tuple const &t1); template std::tuple operator+(std::tuple &&t0, std::tuple const &t1); template std::tuple operator+(std::tuple const &t0, std::tuple &&t1); template std::tuple operator+(std::tuple &&t0, std::tuple &&t1); PYTHONIC_NS_BEGIN namespace types { template struct iterator { using type = T; }; template struct dynamic_tuple; template struct array_base; struct tuple_version { }; struct list_version { }; template using array = array_base; template using static_list = array_base; template struct is_pod_array { static constexpr bool value = false; }; template struct is_pod_array> { static constexpr bool value = true; }; template struct pshape; template struct ndarray; class str; struct slice; struct contiguous_slice; /* helper to extract the tail of a tuple, && pop the head */ template auto make_tuple_tail(T const &t, utils::index_sequence) -> decltype(std::make_tuple(std::get(t)...)) { return std::make_tuple(std::get(t)...); } template std::tuple tuple_tail(std::tuple const &t); template struct count_trailing_long : std::integral_constant { }; template struct count_trailing_long : std::integral_constant::value> { }; template auto tuple_pop(std::tuple const &t) -> decltype(make_tuple_tail::value>( t, utils::make_index_sequence< sizeof...(Stail)-count_trailing_long::value>{})) { return make_tuple_tail::value>( t, utils::make_index_sequence::value>{}); } template std::tuple array_to_tuple(A const &a, utils::index_sequence, utils::type_sequence) { return std::tuple(a[I]...); } template struct pshape; template struct iterator> { using type = array; }; template long check_type(long, std::integral_constant) { return N; } long check_type(long, long value) { return value; } template std::integral_constant check_type(std::integral_constant, std::integral_constant) { assert(N == P && "consistent init"); return {}; } template std::integral_constant check_type(std::integral_constant, long v) { assert(N == v && "consistent init"); return {}; } template struct is_pshape_element : std::is_integral { }; template struct is_pshape_element> : std::true_type { }; template struct pshape { static_assert(utils::all_of::value...>::value, "valid pshape"); struct checked { }; std::tuple values; template pshape(std::tuple const &v, utils::index_sequence) : values{check_type(std::get(values), std::get(v))...} { } template pshape(std::tuple const &v) : pshape(v, utils::make_index_sequence()) { } template pshape(long arg, Args... args) : pshape(std::make_tuple(arg, args...), utils::make_index_sequence<1 + sizeof...(args)>()) { } template pshape(std::integral_constant arg, Args... args) : pshape(std::make_tuple(arg, args...), utils::make_index_sequence<1 + sizeof...(args)>()) { } template pshape(S const *buffer, utils::index_sequence) : values{check_type(std::get(values), buffer[Is])...} { } template pshape(S const *buffer) : pshape(buffer, utils::make_index_sequence()) { } template pshape(pshape other) : pshape(other.values, utils::make_index_sequence()) { static_assert(sizeof...(TyOs) == sizeof...(Tys), "compatible sizes"); } template pshape(pythonic::types::array_base data) : pshape(data.data()) { } pshape() = default; pshape(pshape const &) = default; pshape(pshape &&) = default; pshape &operator=(pshape const &) = default; pshape &operator=(pshape &&) = default; template types::array array(utils::index_sequence) const { return {{get()...}}; } types::array array() const { return array(utils::make_index_sequence()); } operator types::array() const { return array(); } template long get() const { return std::get(values); } template auto get() -> decltype(std::get(values)) { return std::get(values); } }; template struct shape_builder; template struct shape_builder, M, Ss...> : shape_builder> { }; template struct shape_builder { using type = pshape; }; template struct shape_builder : shape_builder { }; struct array_base_slicer { template dynamic_tuple operator()(array const &b, slice const &s); template dynamic_tuple operator()(array const &b, contiguous_slice const &s); template dynamic_tuple operator()(array const &b, fast_contiguous_slice const &s); template typename std::enable_if::value, sliced_list>::type operator()(static_list const &b, S const &s) { return {b, s}; } }; namespace details { template auto extract_shape(E const &e, utils::int_<0>) -> decltype(e.size()) { return e.size(); } template auto extract_shape(E const &e, utils::int_) -> decltype(extract_shape(e[0], utils::int_{})) { return extract_shape(e[0], utils::int_{}); } } /* inspired by std::array implementation */ template struct array_base { using value_type = T; using pointer = value_type *; using const_pointer = const value_type *; using reference = value_type &; using const_reference = const value_type &; using iterator = value_type *; using const_iterator = const value_type *; using size_type = std::size_t; using difference_type = std::ptrdiff_t; using reverse_iterator = std::reverse_iterator; using const_reverse_iterator = std::reverse_iterator; // minimal ndarray interface using dtype = typename utils::nested_container_value_type::type; static const size_t value = utils::nested_container_depth::value; static const bool is_vectorizable = true; static const bool is_strided = false; // flat_size implementation template long _flat_size(E const &e, utils::int_<1>) const; template long _flat_size(E const &e, utils::int_) const; long flat_size() const; // Support for zero-sized arrays mandatory. value_type buffer[N ? N : 1]; // No explicit construct/copy/destroy for aggregate type. void fill(const value_type &__u); long count(value_type const &u) const { return std::count(begin(), end(), u); } // Iterators. iterator begin() noexcept; const_iterator begin() const noexcept; iterator end() noexcept; const_iterator end() const noexcept; reverse_iterator rbegin() noexcept; const_reverse_iterator rbegin() const noexcept; reverse_iterator rend() noexcept; const_reverse_iterator rend() const noexcept; const_iterator cbegin() const noexcept; const_iterator cend() const noexcept; const_reverse_iterator crbegin() const noexcept; const_reverse_iterator crend() const noexcept; // Capacity. constexpr size_type size() const noexcept; constexpr size_type max_size() const noexcept; constexpr bool empty() const noexcept; intptr_t id() const; // Element access. reference fast(long n); const_reference fast(long n) const noexcept; #ifdef USE_XSIMD using simd_iterator = const_simd_nditerator; using simd_iterator_nobroadcast = simd_iterator; template simd_iterator vbegin(vectorizer) const; template simd_iterator vend(vectorizer) const; #endif template dtype load(long index0, long index1, Indices... indices) const { return fast(index0).load(index1, indices...); } dtype load(long index) const { return fast(index); } reference operator[](long __n); const_reference operator[](long __n) const noexcept; template auto operator[](S s) const -> decltype(array_base_slicer{}(*this, (s.lower, s))) { return array_base_slicer{}(*this, s); } reference front(); const_reference front() const; reference back(); const_reference back() const; pointer data() noexcept; const_pointer data() const noexcept; // operator // for conversion to dict item type template operator std::pair() const { static_assert(std::is_same::value && std::is_same::value && N == 2, "compatible conversion"); return {data()[0], data()[1]}; } template bool operator==(array_base const &other) const; template bool operator!=(array_base const &other) const; template bool operator<(array_base const &other) const; template array_base::type, N + M, Version> operator+(array_base const &other) const; // tuple conversion template operator std::tuple() const; template operator array_base() const; auto to_tuple() const -> decltype(array_to_tuple(*this, utils::make_index_sequence{}, utils::make_repeated_type())); template array_base to_array() const; template explicit operator array_base() const { return to_array(); } template auto operator()(S const &s) const -> decltype((*this)[s]) { return (*this)[s]; } bool operator!() const { return N == 0; } /* array */ template friend std::ostream & operator<<(std::ostream &os, types::array_base const &v); using shape_t = typename shape_builder::type; template auto shape() const -> decltype(details::extract_shape(*this, utils::int_{})) { return details::extract_shape(*this, utils::int_{}); } }; // Implementation for detection of "same type". // With this information, we know if we must create a real tuple || a // static sized array namespace details { template struct alike; template <> struct alike<> { static bool const value = false; using type = void; }; template struct alike { static bool const value = true; using type = typename std::remove_cv< typename std::remove_reference::type>::type; }; template struct alike, numpy_gexpr> { static bool const value = true; using type = numpy_gexpr; }; template struct alike { static bool const value = std::is_same::value; using type = typename std::conditional::type; }; // specialization to make static string alike types::str template struct alike { static bool const value = true; using type = str; }; template struct alike { static bool const value = true; using type = str; }; template struct alike { static bool const value = true; using type = str; }; template struct alike, array_base> { static bool const value = sizeof...(Types) == N && alike::type>::type...>::value; using type = typename std::conditional< value, typename alike< T, typename std::remove_cv::type>::type...>::type, void>::type; }; template struct alike, std::tuple> : alike, array_base> { }; template struct alike { static bool const value = alike::value && alike::type>::value; using type = typename alike::type>::type; }; } template struct alike : details::alike::type>::type...> { }; // Pythonic implementation for make_tuple to have the best return type // (static array for sames types || real tuple otherwise) template struct _make_tuple { auto operator()(Types &&... types) -> decltype(std::make_tuple(std::forward(types)...)) { return std::make_tuple(std::forward(types)...); } }; template struct _make_tuple { types::array::type, sizeof...(Types)> operator()(Types &&... types) { return {{std::forward(types)...}}; } }; template auto make_tuple(Types &&... types) #if !_MSC_VER || __clang__ -> decltype(_make_tuple::value, Types...>()( std::forward(types)...)) #endif { return _make_tuple::value, Types...>()( std::forward(types)...); } template using make_tuple_t = decltype(types::make_tuple(std::declval()...)); template types::array _to_array(Tuple const &t, utils::index_sequence) { return {{static_cast(std::get(t))...}}; } template types::array to_array(std::tuple const &t) { return _to_array(t, utils::make_index_sequence()); } // Tuple concatenation for array && tuple template auto operator+(std::tuple const &t, types::array_base const <) -> decltype(std::tuple_cat(t, lt.to_tuple())); template auto operator+(types::array_base const <, std::tuple const &t) -> decltype(std::tuple_cat(lt.to_tuple(), t)); } template struct assignable> { using type = std::tuple::type...>; }; template struct assignable> { using type = pythonic::types::array_base::type, N, V>; }; template struct returnable> { using type = std::tuple::type...>; }; template struct returnable> { using type = pythonic::types::array_base::type, N, V>; }; PYTHONIC_NS_END /* specialize std::get */ namespace std { template typename pythonic::types::array_base::reference get(pythonic::types::array_base &t) { return t[I]; } template typename pythonic::types::array_base::const_reference get(pythonic::types::array_base const &t) { return t[I]; } template struct tuple_element> { using type = typename pythonic::types::array_base::value_type; }; template struct tuple_size> { static const size_t value = N; }; } /* hashable tuples, as proposed in * http://stackoverflow.com/questions/7110301/generic-hash-for-tuples-in-unordered-map-unordered-set */ namespace { inline size_t hash_combiner(size_t left, size_t right); // replacable template struct hash_impl { size_t operator()(size_t a, const std::tuple &t) const; }; template struct hash_impl<0, types...> { size_t operator()(size_t a, const std::tuple &t) const; }; } /* specialize std::hash */ namespace std { template struct hash> { size_t operator()(std::tuple const &t) const; }; template struct hash> { size_t operator()(pythonic::types::array_base const &l) const; }; } /* type inference stuff {*/ #include "pythonic/include/types/combined.hpp" template struct __combined, std::tuple> { using type = std::tuple; }; template struct __combined, indexable> { using type = std::tuple; }; template struct __combined, pythonic::types::static_list> { using type = pythonic::types::static_list; }; template struct __combined, pythonic::types::array> { using type = pythonic::types::array; }; template struct __combined, pythonic::types::array_base> { using type = pythonic::types::array_base::type, N, V>; }; template struct __combined, pythonic::types::static_list> { using type = pythonic::types::static_list::type, N>; }; template struct __combined, pythonic::types::static_list> { using type = pythonic::types::list::type>; }; template struct __combined, pythonic::types::array_base> { using type = pythonic::types::array_base; }; template struct __combined, indexable> { using type = pythonic::types::array_base; }; template struct __combined, pythonic::types::array_base> { using type = pythonic::types::array_base::type, N, V>; }; template struct __combined, container> { using type = pythonic::types::array_base::type, N, V>; }; template struct __combined, pythonic::types::array_base> { using type = pythonic::types::array_base::type, N, AV>; }; template struct __combined, indexable_container> { using type = pythonic::types::array_base::type, N, AV>; }; template struct __combined, std::tuple> { using type = std::tuple::type...>; }; template struct __combined, container> { using type = std::tuple; }; template struct __combined, std::tuple> { using type = std::tuple; }; PYTHONIC_NS_BEGIN namespace details { template struct pick_combined; template struct pick_combined { using type = typename __combined::type; }; template struct pick_combined { using type = P; }; } PYTHONIC_NS_END template struct __combined, indexable_container, t>> { using holder = std::tuple; template static std::tuple::type, I == Is>::type...> make_type(pythonic::utils::index_sequence); static auto make_type() -> decltype( make_type(pythonic::utils::make_index_sequence())); using type = decltype(make_type()); }; template struct __combined, std::tuple> : __combined, indexable_container> { }; template struct __combined, std::tuple> { using type = std::tuple::type...>; }; template struct __combined, pythonic::types::pshape> { using type = pythonic::types::array; }; template struct __combined, pythonic::types::array> { using type = pythonic::types::array; }; template struct __combined, pythonic::types::array> { using type = std::tuple::type...>; }; template struct __combined, std::pair> { using type = std::pair::type, typename __combined::type>; // no further combination }; /* } */ PYTHONIC_NS_BEGIN namespace types { template void print_tuple(std::ostream &os, Tuple const &t, utils::int_); template void print_tuple(std::ostream &os, Tuple const &t, utils::int_<0>); template struct len_of> { static constexpr long value = N; }; template struct len_of, Is...>>> { static constexpr long value = I; }; template struct len_of> { static constexpr long value = sizeof...(Types); }; } PYTHONIC_NS_END namespace std { template ostream &operator<<(ostream &os, tuple const &t); template long get(pythonic::types::pshape const &s) { return s.template get(); } template auto get(pythonic::types::pshape &s) -> decltype(s.template get()) { return s.template get(); } template auto get(T *s) -> decltype(s[I]) { return s[I]; } template long get(T const *s) { return s[I]; } template struct tuple_size> : public std::integral_constant { }; template struct tuple_element> { using type = typename std::tuple_element < I < sizeof...(Tys) ? I : 0, std::tuple> ::type; }; } PYTHONIC_NS_BEGIN namespace sutils { template struct make_shape { using type = T; }; template struct make_shape> { using type = types::array; }; template using shape_t = typename std::enable_if::value, typename make_shape::type>::type; template struct shape_merger; template struct shape_merger { using type = Curr; }; template struct shape_merger { using type = long; }; template struct shape_merger, std::integral_constant, Ss...> : shape_merger N1 ? N0 : N1)>, Ss...> { }; template struct shape_merger, Ss...> { using type = long; }; template struct shape_selecter : std::conditional< (I < std::tuple_size::value), typename std::tuple_element< (I < std::tuple_size::value ? I : 0L), Ss>::type, std::integral_constant> { }; template struct merge_shape; template struct merge_shape> { using type = typename shape_merger::type...>::type; }; template struct merged_shapes; template struct merged_shapes> { using type = types::pshape::type...>; }; template using merged_shapes_t = typename merged_shapes, utils::make_index_sequence>::type; template struct shape_commonifier; template struct shape_commonifier { using type = Ss; }; template struct shape_commonifier { using type = long; }; template struct shape_commonifier, long, Ss...> { using type = long; }; template struct shape_commonifier, std::integral_constant, Ss...> { using type = typename std::conditional< N0 == N1, typename shape_commonifier, Ss...>::type, long>::type; }; template struct common_shape; template struct common_shape> { using type = typename shape_commonifier< typename std::tuple_element::type...>::type; }; template struct common_shapes; template struct common_shapes> { using type = types::pshape::type...>; }; template using common_shapes_t = typename common_shapes, utils::make_index_sequence>::type; template struct transpose; template struct transpose> { using type = types::array; }; template struct transpose> { using type = types::pshape; }; template using transpose_t = typename transpose::type; template void assign(T0 &t0, T1 t1) { t0 = (T0)t1; } template void assign(std::integral_constant &t0, T1 t1) { assert((long)t0 == (long)t1 && "consistent"); } template void copy_shape(T0 &shape0, T1 const &shape1, utils::index_sequence) { (void)std::initializer_list{ (assign(std::get(shape0), shape1.template shape()), 1)...}; } template void scopy_shape(T0 &shape0, T1 const &shape1, utils::index_sequence) { (void)std::initializer_list{ (assign(std::get(shape0), std::get(shape1)), 1)...}; } template void copy_strides(T0 &stride0, T1 const &stride1, utils::index_sequence) { (void)std::initializer_list{ (assign(std::get(stride0), stride1.template strides()), 1)...}; } template struct pop_type; template struct pop_type, Ty> { using type = types::pshape; }; template struct pop_type, Ty, Tys...> : pop_type, Tys...> { }; template struct pop_tail; template struct pop_tail> { using type = typename pop_type, Tys...>::type; }; template struct pop_tail> { using type = types::array; }; template struct pop_head; template struct pop_head> { using type = types::pshape; }; template struct pop_head> { using type = types::array; }; template struct head; template struct head> { using type = Ty; }; template struct head> { using type = T; }; template using pop_head_t = typename pop_head::type; template using pop_tail_t = typename pop_tail::type; template using head_t = typename head::type; template types::array array(types::pshape const &pS) { return pS.array(); } template types::array_base array(types::array_base const &pS) { return pS; } template types::array getshape(E const &e, utils::index_sequence) { return {(long)(e.template shape())...}; } template auto getshape(E const &e) -> decltype(getshape(e, utils::make_index_sequence())) { return getshape(e, utils::make_index_sequence()); } template struct concat; template struct concat, types::pshape> { using type = types::pshape; }; template struct concat, types::array> { using type = types::pshape; }; template struct concat, types::array> : concat, types::array> { }; template struct concat, types::pshape> { using type = types::pshape; }; template struct concat, types::pshape> : concat, types::pshape> { }; template using concat_t = typename concat::type; template using push_front_t = concat_t, P>; template long find(S &s, long v, std::integral_constant, long start, bool comp(long, long)) { return comp(s.template shape<0>(), v) && 0 < start ? 0 : -1; } template long find(S &s, long v, std::integral_constant, long start, bool comp(long, long)) { return comp(s.template shape(), v) && I < start ? I : find(s, v, std::integral_constant(), start, comp); } template long find(S &s, long v, long start = S::value, bool comp(long, long) = [](long a, long b) { return (a == b); }) { return find(s, v, std::integral_constant(), start, comp); } template long sfind(S &s, long v, std::integral_constant, long start, bool comp(long, long)) { return comp(std::get<0>(s), v) && 0 < start ? 0 : -1; } template long sfind(S &s, long v, std::integral_constant, long start, bool comp(long, long)) { return comp(std::get(s), v) && (long)I < start ? (long)I : sfind(s, v, std::integral_constant(), start, comp); } template long sfind(S &s, long v, long start = std::tuple_size::value, bool comp(long, long) = [](long a, long b) { return (a == b); }) { return sfind( s, v, std::integral_constant::value - 1>(), start, comp); } template bool equals(S const &s, B const &other, std::integral_constant) { return std::get<0>(other) == s.template shape<0>(); } template bool equals(S const &s, B const &other, std::integral_constant) { return std::get(other) == s.template shape() && equals(s, other, std::integral_constant()); } template typename std::enable_if::value, bool>::type equals(S const &s, B const &other) { return equals(s, other, std::integral_constant()); } template typename std::enable_if< std::tuple_size::value != std::tuple_size::value, bool>::type equals(S const &s, B const &other) { return false; } template bool equals(S const &s, B *other) { return equals(s, other, std::integral_constant()); } template bool requals(S const &s, B const *other, std::integral_constant) { return other[S::value - 1] == s.template shape<0>(); } template bool requals(S const &s, B const *other, std::integral_constant) { return other[S::value - I - 1] == s.template shape() && requals(s, other, std::integral_constant()); } template bool requals(S const &s, B const *other) { return requals(s, other, std::integral_constant()); } template bool any_of(S const &s, P pred, std::integral_constant) { return pred(s.template shape<0>()); } template bool any_of(S const &s, P pred, std::integral_constant) { return pred(s.template shape()) || any_of(s, pred, std::integral_constant()); } template bool any_of(S const &s, Pred pred) { return any_of(s, pred, std::integral_constant()); } template long min(long curr, S const &s, std::integral_constant) { return std::min(curr, s.template shape<0>()); } template long min(long curr, S const &s, std::integral_constant) { return min(std::min(curr, s.template shape()), s, std::integral_constant()); } template long min(S const &s) { return min(s.template shape(), s, std::integral_constant()); } template long prod(S const &s, std::integral_constant) { return s.template shape<0>(); } template long prod(S const &s, std::integral_constant) { return s.template shape() * prod(s, std::integral_constant()); } template long prod(S const &s) { return prod(s, std::integral_constant()); } template long sprod(S const &s, std::integral_constant) { return std::get<0>(s); } template long sprod(S const &s, std::integral_constant) { return std::get(s) * sprod(s, std::integral_constant()); } template long sprod(S const &s) { return sprod( s, std::integral_constant::value - 1>()); } template long prod_tail(S, std::integral_constant) { return 1; } template long prod_tail(S const &s, std::integral_constant) { return s.template shape() * prod_tail(s, std::integral_constant()); } template long prod_tail(S const &s) { return prod_tail(s, std::integral_constant()); } template long prod_head(S, std::integral_constant) { return 1; } template long prod_head(S const &s, std::integral_constant) { return s.template shape() * prod_head(s, std::integral_constant()); } template long prod_head(S const &s) { return prod_head(s, std::integral_constant()); } template struct safe_tuple_element { using type = typename std::tuple_element<(I < std::tuple_size

(utils::make_index_sequence()); } } template long _argminmax_seq(E const &elts, T &minmax_elts) { long index = 0; long res = -1; for (auto const &elt : elts) { if (Op::value(elt, minmax_elts)) { minmax_elts = elt; res = index; } ++index; } return res; } template #ifdef USE_XSIMD typename std::enable_if< !E::is_vectorizable || !types::is_vector_op::value || std::is_same::value, long>::type #else long #endif _argminmax(E const &elts, T &minmax_elts, utils::int_<1>) { return _argminmax_seq(elts, minmax_elts); } template std::tuple _argminmax_fast(E const &elts, T &minmax_elts, long current_pos, utils::int_<1>, Indices... indices) { long res = -1; long n = elts.template shape::type::value - 1>(); for (long i = 0; i < n; ++i) { auto elt = elts.load(indices..., i); if (Op::value(elt, minmax_elts)) { minmax_elts = elt; res = current_pos + i; } } return std::make_tuple(res, current_pos + n); } #ifdef USE_XSIMD template struct bool_caster; template <> struct bool_caster { template auto operator()(T const &value) -> decltype(xsimd::bool_cast(value)) { return xsimd::bool_cast(value); } }; template <> struct bool_caster { template T operator()(T const &value) { return value; } }; template typename std::enable_if< E::is_vectorizable && types::is_vector_op::value && !std::is_same::value, long>::type _argminmax(E const &elts, T &minmax_elts, utils::int_<1>) { using vT = xsimd::simd_type; using iT = xsimd::as_integer_t; static const size_t vN = vT::size; const long n = elts.size(); if (n >= std::numeric_limits::max()) { return _argminmax_seq(elts, minmax_elts); } auto viter = types::vectorizer_nobroadcast::vbegin(elts), vend = types::vectorizer_nobroadcast::vend(elts); const long bound = std::distance(viter, vend); long minmax_index = -1; if (bound > 0) { auto vacc = *viter; iT iota[vN] = {0}; for (long i = 0; i < (long)vN; ++i) iota[i] = i; auto curr = xsimd::load_unaligned(iota); xsimd::simd_type indices = curr; xsimd::simd_type step{vN}; for (++viter; viter != vend; ++viter) { curr += step; auto c = *viter; vacc = typename Op::op{}(vacc, c); auto mask = c == vacc; indices = xsimd::select(bool_caster::value>{}(mask), curr, indices); } alignas(sizeof(vT)) T stored[vN]; vacc.store_aligned(&stored[0]); alignas(sizeof(vT)) long indexed[vN]; indices.store_aligned(&indexed[0]); for (size_t j = 0; j < vN; ++j) { if (Op::value(stored[j], minmax_elts)) { minmax_elts = stored[j]; minmax_index = indexed[j]; } } } auto iter = elts.begin() + bound * vN; for (long i = bound * vN; i < n; ++i, ++iter) { if (Op::value(*iter, minmax_elts)) { minmax_elts = *iter; minmax_index = i; } } return minmax_index; } #endif template long _argminmax(E const &elts, T &minmax_elts, utils::int_) { long current_pos = 0; long current_minmaxarg = 0; for (auto &&elt : elts) { long v = _argminmax(elt, minmax_elts, utils::int_()); if (v >= 0) current_minmaxarg = current_pos + v; current_pos += elt.flat_size(); } return current_minmaxarg; } template typename std::enable_if>::type _argminmax_fast(E const &elts, T &minmax_elts, long current_pos, utils::int_, Indices... indices) { long current_minmaxarg = 0; for (long i = 0, n = elts.template shape::type::value - N>(); i < n; ++i) { long v; std::tie(v, current_pos) = _argminmax_fast( elts, minmax_elts, current_pos, utils::int_(), indices..., i); if (v >= 0) current_minmaxarg = v; } return std::make_tuple(current_minmaxarg, current_pos); } template long argminmax(E const &expr) { if (!expr.flat_size()) throw types::ValueError("empty sequence"); using elt_type = typename E::dtype; elt_type argminmax_value = Op::limit(); #ifndef USE_XSIMD if (utils::no_broadcast_ex(expr)) { return std::get<0>(_argminmax_fast(expr, argminmax_value, 0, utils::int_())); } else #endif return _argminmax(expr, argminmax_value, utils::int_()); } template void _argminmax_tail(T &&out, E const &expr, long curr, V &&curr_minmax, std::integral_constant) { if (Op::value(expr, curr_minmax)) { out = curr; curr_minmax = expr; } } template typename std::enable_if::type _argminmax_tail(T &&out, E const &expr, long curr, V &&curr_minmax, std::integral_constant) { static_assert(N >= 1, "specialization ok"); long i = 0; for (auto &&elt : expr) { _argminmax_tail(out.fast(i), elt, curr, curr_minmax.fast(i), std::integral_constant()); ++i; } } template typename std::enable_if::type _argminmax_head(T &&out, E const &expr, std::integral_constant) { typename E::dtype val = Op::limit(); long i = 0; for (auto &&elt : expr) _argminmax_tail(out, elt, i++, val, std::integral_constant()); } template typename std::enable_if::type _argminmax_head(T &&out, E const &expr, std::integral_constant) { static_assert(N > 1, "specialization ok"); types::ndarray> val{ sutils::getshape(out), Op::limit()}; long i = 0; for (auto &&elt : expr) { _argminmax_tail(out, elt, i++, val, std::integral_constant()); } } template typename std::enable_if::type _argminmax_head(T &&out, E const &expr, std::integral_constant) { static_assert(N >= 1, "specialization ok"); auto out_iter = out.begin(); for (auto &&elt : expr) { _argminmax_head(*out_iter, elt, std::integral_constant()); ++out_iter; } } template void _argminmax_pick_axis(long axis, T &&out, E const &expr, utils::index_sequence) { (void)std::initializer_list{ ((Axis == axis) && (_argminmax_head( out, expr, std::integral_constant()), true))...}; } template types::ndarray> argminmax(E const &array, long axis) { if (axis < 0) axis += E::value; if (axis < 0 || size_t(axis) >= E::value) throw types::ValueError("axis out of bounds"); auto shape = sutils::getshape(array); types::array shp; auto next = std::copy(shape.begin(), shape.begin() + axis, shp.begin()); std::copy(shape.begin() + axis + 1, shape.end(), next); types::ndarray> out{shp, builtins::None}; _argminmax_pick_axis(axis, out, array, utils::make_index_sequence()); return out; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/argsort.hpp000066400000000000000000000022501416264035500226320ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ARGSORT_HPP #define PYTHONIC_NUMPY_ARGSORT_HPP #include "pythonic/include/numpy/argsort.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/pdqsort.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray argsort(types::ndarray const &a) { constexpr auto N = std::tuple_size::value; size_t last_axis = a.template shape(); size_t n = a.flat_size(); types::ndarray indices(a._shape, builtins::None); for (long j = 0, *iter_indices = indices.buffer, *end_indices = indices.buffer + n; iter_indices != end_indices; iter_indices += last_axis, j += last_axis) { // fill with the original indices std::iota(iter_indices, iter_indices + last_axis, 0L); // sort the index using the value from a pdqsort(iter_indices, iter_indices + last_axis, [&a, j](long i1, long i2) { return *(a.fbegin() + j + i1) < *(a.fbegin() + j + i2); }); } return indices; } NUMPY_EXPR_TO_NDARRAY0_IMPL(argsort); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/argwhere.hpp000066400000000000000000000022121416264035500227530ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ARGWHERE_HPP #define PYTHONIC_NUMPY_ARGWHERE_HPP #include "pythonic/include/numpy/argwhere.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename types::ndarray> argwhere(E const &expr) { constexpr long N = E::value; auto arr = asarray(expr); long sz = arr.flat_size(); auto eshape = sutils::getshape(arr); utils::shared_ref> buffer(sz * N); // too much memory used long *buffer_iter = buffer->data; long real_sz = 0; auto iter = arr.fbegin(); for (long i = 0; i < sz; ++i, ++iter) { if (*iter) { ++real_sz; long mult = 1; for (long j = N - 1; j > 0; j--) { buffer_iter[j] = (i / mult) % eshape[j]; mult *= eshape[j]; } buffer_iter[0] = i / mult; buffer_iter += N; } } types::array shape = {real_sz, N}; return {buffer, shape}; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/around.hpp000066400000000000000000000043031416264035500224420ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_AROUND_HPP #define PYTHONIC_NUMPY_AROUND_HPP #include "pythonic/include/numpy/around.hpp" #include "pythonic/numpy/rint.hpp" #include "pythonic/numpy/power.hpp" #include "pythonic/numpy/asarray.hpp" #include "pythonic/numpy/floor_divide.hpp" #include "pythonic/numpy/float64.hpp" #include "pythonic/numpy/multiply.hpp" PYTHONIC_NS_BEGIN namespace numpy { // fast path template auto around(E &&a) -> decltype(functor::rint{}(std::forward(a))) { return functor::rint{}(std::forward(a)); } // generic floating point version, pure numpy_expr template auto around(E &&a, long decimals) -> typename std::enable_if< !std::is_integral< typename types::dtype_of::type>::type>::value, decltype(functor::rint{}(functor::multiply{}( std::forward(a), std::declval::type>::type>())) / std::declval::type>::type>())>::type { typename types::dtype_of::type>::type const fact = functor::power{}(10., decimals); return functor::rint{}(functor::multiply{}(std::forward(a), fact)) / fact; } // the integer version is only relevant when decimals < 0 template auto around(E &&a, long decimals) -> typename std::enable_if< std::is_integral< typename types::dtype_of::type>::type>::value, decltype(numpy::functor::floor_divide{}( functor::float64{}(std::forward(a)), std::declval::type>::type>()) * std::declval::type>::type>())>::type { typename types::dtype_of::type>::type const fact = functor::power{}(10L, std::max(0L, -decimals)); return pythonic::numpy::functor::floor_divide{}( functor::float64{}(std::forward(a)), fact) * fact; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/array.hpp000066400000000000000000000043701416264035500222740ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ARRAY_HPP #define PYTHONIC_NUMPY_ARRAY_HPP #include "pythonic/include/numpy/array.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/nested_container.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if< types::has_size::type>::value, types::ndarray::type::value>>>::type array(T &&iterable, dtype d) { return {std::forward(iterable)}; } template typename std::enable_if< !types::has_size::type>::value && !types::is_dtype::type>::value, types::ndarray::type::value>>>::type array(T &&iterable, dtype d) { types::list::type::value_type> tmp{iterable.begin(), iterable.end()}; return {tmp}; } template typename std::enable_if< !types::has_size::type>::value && types::is_dtype::type>::value, typename dtype::type>::type array(T &&non_iterable, dtype d) { return non_iterable; } template types::ndarray>> array(std::tuple<>, dtype) { return {types::pshape>{}, types::none_type{}}; } template types::ndarray array(types::ndarray const &arr) { return arr.copy(); } template types::ndarray::shape_t> array(types::array_base const &a, dtype) { return {a}; } template types::ndarray::shape_t> array(types::array_base &&a, dtype) { return {std::move(a)}; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/array2string.hpp000066400000000000000000000007021416264035500236000ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ARRAY2STRING_HPP #define PYTHONIC_NUMPY_ARRAY2STRING_HPP #include "pythonic/include/numpy/array2string.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/str.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::str array2string(E &&a) { std::ostringstream oss; oss << std::forward(a); return oss.str(); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/array_equal.hpp000066400000000000000000000010231416264035500234530ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ARRAYEQUAL_HPP #define PYTHONIC_NUMPY_ARRAYEQUAL_HPP #include "pythonic/include/numpy/array_equal.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/numpy/all.hpp" #include "pythonic/numpy/equal.hpp" PYTHONIC_NS_BEGIN namespace numpy { template bool array_equal(U const &u, V const &v) { if (sutils::getshape(u) == sutils::getshape(v)) return all(functor::equal{}(u, v)); return false; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/array_equiv.hpp000066400000000000000000000022331416264035500235010ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ARRAYEQUIV_HPP #define PYTHONIC_NUMPY_ARRAYEQUIV_HPP #include "pythonic/include/numpy/array_equiv.hpp" #include "pythonic/numpy/array_equal.hpp" #include "pythonic/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace { template bool _array_equiv(I0 vbegin, I0 vend, U const &uu) { for (; vbegin != vend; ++vbegin) if (!array_equiv(uu, *vbegin)) return false; return true; } } template typename std::enable_if::type array_equiv(U const &u, V const &v) { return array_equal(u, v); } template typename std::enable_if < U::value::type array_equiv(U const &u, V const &v) { if (v.flat_size() % u.flat_size() == 0) // requires allocation for u' as it is used multiple times. return _array_equiv(v.begin(), v.end(), asarray(u)); return false; } template typename std::enable_if<(U::value > V::value), bool>::type array_equiv(U const &u, V const &v) { return array_equiv(v, u); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/array_split.hpp000066400000000000000000000034471416264035500235130ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ARRAYSPLIT_HPP #define PYTHONIC_NUMPY_ARRAYSPLIT_HPP #include "pythonic/include/numpy/array_split.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::list::value>>> array_split(types::ndarray const &a, long nb_split) { long sz = std::distance(a.begin(), a.end()); long n = (sz + nb_split - 1) / nb_split; long end = n * nb_split; long nb_full_split = nb_split; if (end != sz) nb_full_split -= (end - sz); types::list::value>>> out(nb_split); long index = 0; for (long i = 0; i < nb_full_split; ++i, index += n) out[i] = a[types::contiguous_slice(index, index + n)]; for (long i = nb_full_split; i < nb_split; ++i, index += (n - 1)) out[i] = a[types::contiguous_slice(index, index + n - 1)]; return out; } template typename std::enable_if< types::is_iterable::value, types::list::value>>>>::type array_split(types::ndarray const &a, I const &split_mask) { long sz = std::distance(a.begin(), a.end()); types::list< types::ndarray::value>>> out(1 + split_mask.flat_size()); long index = 0; auto inserter = out.begin(); for (auto next_index : split_mask) { *inserter++ = a[types::contiguous_slice(index, next_index)]; index = next_index; } *inserter = a[types::contiguous_slice(index, sz)]; return out; } NUMPY_EXPR_TO_NDARRAY0_IMPL(array_split); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/array_str.hpp000066400000000000000000000002541416264035500231610ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ARRAYSTR_HPP #define PYTHONIC_NUMPY_ARRAYSTR_HPP #include "pythonic/include/numpy/array_str.hpp" #include "pythonic/numpy/array2string.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/asarray.hpp000066400000000000000000000026031416264035500226150ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ASARRAY_HPP #define PYTHONIC_NUMPY_ASARRAY_HPP #include "pythonic/include/numpy/asarray.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/numpy/array.hpp" PYTHONIC_NS_BEGIN namespace numpy { template template auto _asarray::operator()(Types &&... args) -> decltype(array(std::forward(args)...)) { return array(std::forward(args)...); } template template F &&_asarray, T>::operator()(F &&a, dtype) { return std::forward(a); } template auto asarray(E &&e, types::none_type d) -> decltype( _asarray::type, typename types::dtype_of::type>::type>{}( std::forward(e))) { return _asarray< typename std::decay::type, typename types::dtype_of::type>::type>{}( std::forward(e)); } template auto asarray(E &&e, dtype d) -> decltype(_asarray::type, typename dtype::type>{}(std::forward(e), d)) { return _asarray::type, typename dtype::type>{}( std::forward(e), d); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/asarray_chkfinite.hpp000066400000000000000000000013501416264035500246370ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ASARRAYCHKFINITE_HPP #define PYTHONIC_NUMPY_ASARRAYCHKFINITE_HPP #include "pythonic/include/numpy/asarray_chkfinite.hpp" #include "pythonic/builtins/ValueError.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/numpy/isfinite.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template I asarray_chkfinite(I const &a) { if (!functor::isfinite()(a)) throw types::ValueError("array must ! contain infs || NaNs"); return a; } } #define NUMPY_NARY_FUNC_NAME asarray_chkfinite #define NUMPY_NARY_FUNC_SYM wrapper::asarray_chkfinite #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ascontiguousarray.hpp000066400000000000000000000003011416264035500247260ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ASCONTIGUOUSARRAY_HPP #define PYTHONIC_NUMPY_ASCONTIGUOUSARRAY_HPP #include "pythonic/include/numpy/ascontiguousarray.hpp" #include "pythonic/numpy/asarray.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/asfarray.hpp000066400000000000000000000007731416264035500227710ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ASFARRAY_HPP #define PYTHONIC_NUMPY_ASFARRAY_HPP #include "pythonic/include/numpy/asfarray.hpp" #include "pythonic/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto asfarray(E &&e, dtype d) -> decltype(asarray(std::forward(e), d)) { static_assert(std::is_floating_point::value, "expected a floating point type"); return asarray(std::forward(e), d); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/asscalar.hpp000066400000000000000000000011361416264035500227440ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ASSCALAR_HPP #define PYTHONIC_NUMPY_ASSCALAR_HPP #include "pythonic/include/numpy/asscalar.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/numpy/asarray.hpp" #include "pythonic/builtins/ValueError.hpp" PYTHONIC_NS_BEGIN namespace numpy { template asscalar_result_type asscalar(E const &expr) { if (expr.flat_size() != 1) throw types::ValueError( "can only convert an array of size 1 to a Python scalar"); return *asarray(expr).fbegin(); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/atleast_1d.hpp000066400000000000000000000012531416264035500231740ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ATLEAST1D_HPP #define PYTHONIC_NUMPY_ATLEAST1D_HPP #include "pythonic/include/numpy/atleast_1d.hpp" #include "pythonic/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if< types::is_dtype::value, types::ndarray>>>::type atleast_1d(T t) { return {types::pshape>(), t}; } template auto atleast_1d(T const &t) -> typename std::enable_if::value), decltype(asarray(t))>::type { return asarray(t); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/atleast_2d.hpp000066400000000000000000000030521416264035500231740ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ATLEAST2D_HPP #define PYTHONIC_NUMPY_ATLEAST2D_HPP #include "pythonic/include/numpy/atleast_2d.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if< types::is_dtype::value, types::ndarray, std::integral_constant>>>::type atleast_2d(T t) { return {types::pshape, std::integral_constant>(), t}; } template auto atleast_2d(T const &t) -> typename std::enable_if < (!types::is_dtype::value) && T::value<2, types::ndarray< typename T::dtype, types::pshape, typename std::tuple_element< 0, typename T::shape_t>::type>>>::type { return t.reshape(types::pshape< std::integral_constant, typename std::tuple_element<0, typename T::shape_t>::type>( std::integral_constant(), t.template shape<0>())); } template auto atleast_2d(T &&t) -> typename std::enable_if< (!types::is_dtype::type>::type>::value) && std::decay::type::value >= 2, decltype(std::forward(t))>::type { return std::forward(t); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/atleast_3d.hpp000066400000000000000000000050271416264035500232010ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ATLEAST3D_HPP #define PYTHONIC_NUMPY_ATLEAST3D_HPP #include "pythonic/include/numpy/atleast_3d.hpp" #include "pythonic/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if< types::is_dtype::value, types::ndarray, std::integral_constant, std::integral_constant>>>::type atleast_3d(T t) { return {types::pshape, std::integral_constant, std::integral_constant>(), t}; } template auto atleast_3d(T const &t) -> typename std::enable_if< (!types::is_dtype::value) && (T::value == 1), types::ndarray, typename std::tuple_element< 0, typename T::shape_t>::type, std::integral_constant>>>::type { auto r = asarray(t); return r.reshape( types::pshape, typename std::tuple_element<0, typename T::shape_t>::type, std::integral_constant>( std::integral_constant(), r.template shape<0>(), std::integral_constant())); } template auto atleast_3d(T const &t) -> typename std::enable_if< (!types::is_dtype::value) && (T::value == 2), types::ndarray< typename T::dtype, types::pshape< typename std::tuple_element<0, typename T::shape_t>::type, typename std::tuple_element<1, typename T::shape_t>::type, std::integral_constant>>>::type { auto r = asarray(t); return r.reshape( types::pshape::type, typename std::tuple_element<1, typename T::shape_t>::type, std::integral_constant>( r.template shape<0>(), r.template shape<1>(), std::integral_constant())); } template auto atleast_3d(T const &t) -> typename std::enable_if<(!types::is_dtype::value) && T::value >= 3, decltype(asarray(t))>::type { return asarray(t); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/average.hpp000066400000000000000000000017051416264035500225670ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_AVERAGE_HPP #define PYTHONIC_NUMPY_AVERAGE_HPP #include "pythonic/include/numpy/average.hpp" #include "pythonic/numpy/asarray.hpp" #include "pythonic/numpy/sum.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto average(E const &expr, types::none_type const &axis) -> decltype(sum(expr, axis) / 1.) { return sum(expr, axis) / double(expr.flat_size()); } template auto average(E const &expr, long axis) -> decltype(sum(expr, axis) / 1.) { auto shape = sutils::getshape(expr); return sum(expr, axis) / double(shape[axis]); } template auto average(E const &expr, types::none_type const &axis, W const &weights) -> decltype(average(expr *asarray(weights) / average(asarray(weights)))) { auto aweights = asarray(weights); auto weighted_expr = expr * aweights / average(aweights); return average(weighted_expr); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/base_repr.hpp000066400000000000000000000022231416264035500231130ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_BASEREPR_HPP #define PYTHONIC_NUMPY_BASEREPR_HPP #include "pythonic/include/numpy/base_repr.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { types::str base_repr(long number, long base, long padding) { types::str res; // check that the base if valid if (base < 2 || base > 16) { return res; } int const ndigits = (number == 0 ? 1 : std::ceil(std::log(std::labs(number)) / std::log(base))); int const effective_padding = padding - ((number == 0) && (padding > 0) ? 1 : 0); res.resize(ndigits + effective_padding + (number < 0 ? 1 : 0)); // Apply negative sign auto it = res.chars().begin(); if (number < 0) *it++ = '-'; // Apply padding std::fill(it, std::next(it, effective_padding), '0'); auto rit = res.chars().rbegin(); long quotient = std::labs(number); do { const long tmp = quotient / base; *rit++ = "0123456789ABCDEF"[quotient - (tmp * base)]; quotient = tmp; } while (quotient); return res; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/binary_repr.hpp000066400000000000000000000017441416264035500234740ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_BINARYREPR_HPP #define PYTHONIC_NUMPY_BINARYREPR_HPP #include "pythonic/include/numpy/binary_repr.hpp" #include "pythonic/numpy/base_repr.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { namespace details { char *int2bin(long a, char *buffer, int buf_size) { buffer += (buf_size - 1); buffer[1] = 0; for (int i = 0; i < buf_size; ++i) { *buffer-- = (a & 1) + '0'; a >>= 1; } return buffer; } } types::str binary_repr(long number, types::none_type width) { return base_repr(number, 2); } types::str binary_repr(long number, long width) { types::str out = binary_repr(std::abs(number)); if (number >= 0) return base_repr(number, 2, width - out.size()); else { std::unique_ptr mem{new char[width + 1]}; details::int2bin(number, mem.get(), width); auto res = types::str(mem.get()); return res; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/bincount.hpp000066400000000000000000000036221416264035500227760ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_BINCOUNT_HPP #define PYTHONIC_NUMPY_BINCOUNT_HPP #include "pythonic/include/numpy/bincount.hpp" #include "pythonic/numpy/max.hpp" #include "pythonic/utils/numpy_conversion.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if::value == 1, types::ndarray>>::type bincount(types::ndarray const &expr, types::none_type weights, types::none minlength) { long length = 0; if (minlength) length = (long)minlength; length = std::max(length, 1 + max(expr)); types::ndarray> out(types::pshape(length), 0L); for (auto iter = expr.fbegin(), end = expr.fend(); iter != end; ++iter) ++out[*iter]; return out; } template typename std::enable_if< std::tuple_size::value == 1, types::ndarray() * std::declval()), types::pshape>>::type bincount(types::ndarray const &expr, E const &weights, types::none minlength) { long length = 0; if (minlength) length = (long)minlength; length = std::max(length, 1 + max(expr)); typename std::enable_if< std::tuple_size::value == 1, types::ndarray() * std::declval()), types::pshape>>::type out(types::pshape(length), 0L); auto iweight = weights.begin(); for (auto iter = expr.fbegin(), end = expr.fend(); iter != end; ++iter, ++iweight) out[*iter] += *iweight; return out; } NUMPY_EXPR_TO_NDARRAY0_IMPL(bincount); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/bitwise_and.hpp000066400000000000000000000010341416264035500234400ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_BITWISE_AND_HPP #define PYTHONIC_NUMPY_BITWISE_AND_HPP #include "pythonic/include/numpy/bitwise_and.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/operator_/and_.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME bitwise_and #define NUMPY_NARY_FUNC_SYM pythonic::operator_::and_ #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/bitwise_and/000077500000000000000000000000001416264035500227315ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/bitwise_and/accumulate.hpp000066400000000000000000000002731416264035500255670ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_BITWISE_AND_ACCUMULATE_HPP #define PYTHONIC_NUMPY_BITWISE_AND_ACCUMULATE_HPP #define UFUNC_NAME bitwise_and #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/bitwise_and/reduce.hpp000066400000000000000000000004461416264035500247150ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_BITWISE_AND_REDUCE_HPP #define PYTHONIC_NUMPY_BITWISE_AND_REDUCE_HPP #define UFUNC_NAME bitwise_and #define UFUNC_INAME iand #include "pythonic/include/numpy/bitwise_and/reduce.hpp" #include "pythonic/numpy/ufunc_reduce.hpp" #undef UFUNC_NAME #undef UFUNC_INAME #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/bitwise_not.hpp000066400000000000000000000014141416264035500235000ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_BITWISENOT_HPP #define PYTHONIC_NUMPY_BITWISENOT_HPP #include "pythonic/include/numpy/bitwise_not.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template decltype(~std::declval()) bitwise_not(A const &a) { return ~a; } bool bitwise_not(bool t0) { return !t0; } } #define NUMPY_NARY_FUNC_NAME bitwise_not #define NUMPY_NARY_FUNC_SYM wrapper::bitwise_not #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END // ndarray have to be include after as bitwise_not is used as a numpy_operator #include "pythonic/types/ndarray.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/bitwise_or.hpp000066400000000000000000000010261416264035500233170ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_BITWISE_OR_HPP #define PYTHONIC_NUMPY_BITWISE_OR_HPP #include "pythonic/include/numpy/bitwise_or.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/operator_/or_.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME bitwise_or #define NUMPY_NARY_FUNC_SYM pythonic::operator_::or_ #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/bitwise_or/000077500000000000000000000000001416264035500226075ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/bitwise_or/accumulate.hpp000066400000000000000000000002701416264035500254420ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_BITWISE_OR_ACCUMULATE_HPP #define PYTHONIC_NUMPY_BITWISE_OR_ACCUMULATE_HPP #define UFUNC_NAME bitwise_or #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/bitwise_or/reduce.hpp000066400000000000000000000004411416264035500245660ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_BITWISE_OR_REDUCE_HPP #define PYTHONIC_NUMPY_BITWISE_OR_REDUCE_HPP #define UFUNC_NAME bitwise_or #define UFUNC_INAME ior #include "pythonic/include/numpy/bitwise_or/reduce.hpp" #include "pythonic/numpy/ufunc_reduce.hpp" #undef UFUNC_NAME #undef UFUNC_INAME #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/bitwise_xor.hpp000066400000000000000000000010341416264035500235060ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_BITWISE_XOR_HPP #define PYTHONIC_NUMPY_BITWISE_XOR_HPP #include "pythonic/include/numpy/bitwise_xor.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/operator_/xor_.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME bitwise_xor #define NUMPY_NARY_FUNC_SYM pythonic::operator_::xor_ #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/bitwise_xor/000077500000000000000000000000001416264035500227775ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/bitwise_xor/accumulate.hpp000066400000000000000000000002731416264035500256350ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_BITWISE_XOR_ACCUMULATE_HPP #define PYTHONIC_NUMPY_BITWISE_XOR_ACCUMULATE_HPP #define UFUNC_NAME bitwise_xor #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/bitwise_xor/reduce.hpp000066400000000000000000000004461416264035500247630ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_BITWISE_XOR_REDUCE_HPP #define PYTHONIC_NUMPY_BITWISE_XOR_REDUCE_HPP #define UFUNC_NAME bitwise_xor #define UFUNC_INAME ixor #include "pythonic/include/numpy/bitwise_xor/reduce.hpp" #include "pythonic/numpy/ufunc_reduce.hpp" #undef UFUNC_NAME #undef UFUNC_INAME #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/bool_.hpp000066400000000000000000000011451416264035500222450ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_BOOL_HPP #define PYTHONIC_NUMPY_BOOL_HPP #include "pythonic/include/numpy/bool_.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { bool bool_() { return bool(); } template bool bool_(V v) { return v; } } #define NUMPY_NARY_FUNC_NAME bool_ #define NUMPY_NARY_FUNC_SYM details::bool_ #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/broadcast_to.hpp000066400000000000000000000022131416264035500236140ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_BROADCAST_TO_HPP #define PYTHONIC_NUMPY_BROADCAST_TO_HPP #include "pythonic/include/numpy/broadcast_to.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/numpy/empty.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto broadcast_to(E const &expr, pS shape) -> decltype(numpy::functor::empty{}( shape, typename types::dtype_t::type>{})) { using dtype = typename types::dtype_of::type; using BExpr = typename std::conditional::value, types::broadcast, E const &>::type; auto out = numpy::functor::empty{}(shape, typename types::dtype_t{}); using array_type = decltype(out); BExpr bexpr = expr; utils::broadcast_copy::value, std::remove_reference::type::is_vectorizable>( out, bexpr); return out; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/byte.hpp000066400000000000000000000011341416264035500221140ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_BYTE_HPP #define PYTHONIC_NUMPY_BYTE_HPP #include "pythonic/include/numpy/byte.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { char byte() { return {}; } template char byte(V v) { return v; } } #define NUMPY_NARY_FUNC_NAME byte #define NUMPY_NARY_FUNC_SYM details::byte #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/cbrt.hpp000066400000000000000000000006341416264035500221070ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_CBRT_HPP #define PYTHONIC_NUMPY_CBRT_HPP #include "pythonic/include/numpy/cbrt.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME cbrt #define NUMPY_NARY_FUNC_SYM xsimd::cbrt #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ceil.hpp000066400000000000000000000006341416264035500220710ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_CEIL_HPP #define PYTHONIC_NUMPY_CEIL_HPP #include "pythonic/include/numpy/ceil.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME ceil #define NUMPY_NARY_FUNC_SYM xsimd::ceil #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/clip.hpp000066400000000000000000000015031416264035500221000ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_CLIP_HPP #define PYTHONIC_NUMPY_CLIP_HPP #include "pythonic/include/numpy/clip.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template typename __combined::type clip(T const &v, Mi a_min, Ma a_max) { if (v < a_min) return a_min; else if (v > a_max) return a_max; else return v; } template typename __combined::type clip(T const &v, Mi a_min) { if (v < a_min) return a_min; else return v; } } #define NUMPY_NARY_FUNC_NAME clip #define NUMPY_NARY_FUNC_SYM wrapper::clip #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/complex.hpp000066400000000000000000000007701416264035500226250ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_COMPLEX_HPP #define PYTHONIC_NUMPY_COMPLEX_HPP #include "pythonic/include/numpy/complex.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/complex.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { std::complex complex(double v, double v2) { return {v, v2}; } } #define NUMPY_NARY_FUNC_NAME complex #define NUMPY_NARY_FUNC_SYM details::complex #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/complex128.hpp000066400000000000000000000012451416264035500230560ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_COMPLEX128_HPP #define PYTHONIC_NUMPY_COMPLEX128_HPP #include "pythonic/include/numpy/complex128.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { std::complex complex128() { return {}; } template std::complex complex128(V v) { return v; } } #define NUMPY_NARY_FUNC_NAME complex128 #define NUMPY_NARY_FUNC_SYM details::complex128 #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/complex256.hpp000066400000000000000000000012571416264035500230630ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_COMPLEX256_HPP #define PYTHONIC_NUMPY_COMPLEX256_HPP #include "pythonic/include/numpy/complex256.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { std::complex complex256() { return {}; } template std::complex complex256(V v) { return v; } } #define NUMPY_NARY_FUNC_NAME complex256 #define NUMPY_NARY_FUNC_SYM details::complex256 #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/complex64.hpp000066400000000000000000000015141416264035500227740ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_COMPLEX64_HPP #define PYTHONIC_NUMPY_COMPLEX64_HPP #include "pythonic/include/numpy/complex64.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/complex.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { std::complex complex64() { return {}; } template std::complex complex64(V v) { return v; } template std::complex complex64(std::complex v) { return {(float)v.real(), (float)v.imag()}; } } #define NUMPY_NARY_FUNC_NAME complex64 #define NUMPY_NARY_FUNC_SYM details::complex64 #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/concatenate.hpp000066400000000000000000000151701416264035500234420ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_CONCATENATE_HPP #define PYTHONIC_NUMPY_CONCATENATE_HPP #include "pythonic/include/numpy/concatenate.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/sum.hpp" #include "pythonic/builtins/ValueError.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { template struct concatenate_helper { // list version template void operator()(Out &&out, A const &from, long axis) const { if (axis == 0) { auto out_iter = out.begin(); for (auto &&ifrom : from) out_iter = std::copy(ifrom.begin(), ifrom.end(), out_iter); } else { std::vector::type> ifroms; for (auto &&ifrom : from) ifroms.emplace_back(ifrom.begin()); std::vector::type> difroms; for (auto &&iout : out) { difroms.clear(); for (auto &&ifrom : ifroms) difroms.emplace_back(*ifrom); concatenate_helper()(iout, difroms, axis - 1); for (auto &ifrom : ifroms) ++ifrom; } } } // array version template void operator()(Out &&out, A const &from, long axis, utils::index_sequence) const { if (axis == 0) { auto out_iter = out.begin(); (void)std::initializer_list{ (out_iter = std::copy(std::get(from).begin(), std::get(from).end(), out_iter), 1)...}; } else { types::array ifroms = {std::get(from).begin()...}; for (auto &&iout : out) { types::array< typename std::iterator_traits< typename A::value_type::const_iterator>::value_type, sizeof...(I)> difroms = {*std::get(ifroms)...}; concatenate_helper()(iout, difroms, axis - 1, utils::index_sequence{}); (void)std::initializer_list{(++std::get(ifroms), 0)...}; } } } // tuple version template void operator()(Out &&out, std::tuple const &from, long axis, utils::index_sequence) const { if (axis == 0) { auto out_iter = out.begin(); (void)std::initializer_list{ (out_iter = std::copy(std::get(from).begin(), std::get(from).end(), out_iter), 1)...}; } else { auto ifroms = std::make_tuple(std::get(from).begin()...); for (auto &&iout : out) { auto difroms = std::make_tuple(*std::get(ifroms)...); concatenate_helper()(iout, difroms, axis - 1, utils::index_sequence{}); (void)std::initializer_list{(++std::get(ifroms), 0)...}; } } } }; template <> struct concatenate_helper<0> { // list version - sentinel template void operator()(Out &&buffer, A const &from, long axis) const { } // array version template void operator()(Out &&, E const &, long, utils::index_sequence) const { } // tuple version - sentinel template void operator()(Out &&, std::tuple const &, long, utils::index_sequence) const { } }; template long concatenate_axis_size(A const &from, long axis, utils::index_sequence) { long sizes[] = {sutils::getshape(std::get(from))[axis]...}; return std::accumulate(std::begin(sizes), std::end(sizes), 0L, std::plus()); } } template auto concatenate(std::tuple const &args, long axis) -> types::ndarray< typename __combined::type::dtype...>::type, types::array< long, std::tuple_element<0, std::tuple>::type::value>> { using T = typename __combined::type::dtype...>::type; auto constexpr N = std::decay(args))>::type::value; auto shape = sutils::getshape(std::get<0>(args)); shape[axis] = details::concatenate_axis_size( args, axis, utils::make_index_sequence{}); types::ndarray< typename __combined::type::dtype...>::type, types::array< long, std::decay(args))>::type::value>> result{ shape, types::none_type{}}; details::concatenate_helper()( result, args, axis, utils::make_index_sequence{}); return result; } template types::ndarray> concatenate(types::array_base const &args, long axis) { using T = typename E::dtype; auto constexpr N = E::value; auto shape = sutils::getshape(std::get<0>(args)); shape[axis] = details::concatenate_axis_size( args, axis, utils::make_index_sequence{}); types::ndarray> out( shape, types::none_type{}); details::concatenate_helper()(out, args, axis, utils::make_index_sequence{}); return out; } template types::ndarray> concatenate(types::list const &ai, long axis) { using return_type = types::ndarray>; using T = typename return_type::dtype; auto constexpr N = return_type::value; auto shape = sutils::getshape(ai[0]); shape[axis] = std::accumulate(ai.begin(), ai.end(), 0L, [axis](long v, E const &from) { return v + sutils::getshape(from)[axis]; }); return_type out{shape, types::none_type{}}; details::concatenate_helper()(out, ai, axis); return out; ; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/conj.hpp000066400000000000000000000002341416264035500221020ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_CONJ_HPP #define PYTHONIC_NUMPY_CONJ_HPP #include "pythonic/include/numpy/conj.hpp" #include "pythonic/numpy/conjugate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/conjugate.hpp000066400000000000000000000006211416264035500231300ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_CONJUGATE_HPP #define PYTHONIC_NUMPY_CONJUGATE_HPP #include "pythonic/include/numpy/conjugate.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME conjugate #define NUMPY_NARY_FUNC_SYM wrapper::conjugate #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/convolve.hpp000066400000000000000000000020541416264035500230060ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_CONVOLVE_HPP #define PYTHONIC_NUMPY_CONVOLVE_HPP #include "pythonic/include/numpy/convolve.hpp" #include "pythonic/numpy/correlate.hpp" #include "pythonic/numpy/flip.hpp" #include "pythonic/numpy/conjugate.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> convolve(A const &inA, B const &inB, U type) { auto inB_flipped = functor::flip{}(inB, 0); auto inB_flip_conj = functor::conjugate{}(inB_flipped); return functor::correlate{}(inA, inB_flip_conj, type); } template types::ndarray> convolve(A const &inA, B const &inB) { auto inB_flipped = functor::flip{}(inB, 0); auto inB_flip_conj = functor::conjugate{}(inB_flipped); return functor::correlate{}(inA, inB_flip_conj, "full"); } NUMPY_EXPR_TO_NDARRAY0_IMPL(convolve) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/copy.hpp000066400000000000000000000024121416264035500221230ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_COPY_HPP #define PYTHONIC_NUMPY_COPY_HPP #include "pythonic/include/numpy/copy.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_conversion.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { // list case template typename std::enable_if< !types::is_array::value && !types::is_dtype::value, types::ndarray>>::type copy(E const &v) { return {v}; } // scalar / complex case template auto copy(E const &v) -> typename std::enable_if::value, E>::type { return v; } // No copy is required for numpy_expr template auto copy(E &&v) -> typename std::enable_if::value, decltype(std::forward(v))>::type { return std::forward(v); } // ndarray case template types::ndarray copy(types::ndarray const &a) { return a.copy(); } // transposed ndarray case template types::numpy_texpr> copy(types::numpy_texpr> const &a) { return a.arg.copy(); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/copysign.hpp000066400000000000000000000007361416264035500230130ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_COPYSIGN_HPP #define PYTHONIC_NUMPY_COPYSIGN_HPP #include "pythonic/include/numpy/copysign.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME copysign #define NUMPY_NARY_FUNC_SYM xsimd::copysign #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/copysign/000077500000000000000000000000001416264035500222745ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/copysign/accumulate.hpp000066400000000000000000000002621416264035500251300ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_COPYSIGN_ACCUMULATE_HPP #define PYTHONIC_NUMPY_COPYSIGN_ACCUMULATE_HPP #define UFUNC_NAME copysign #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/copyto.hpp000066400000000000000000000007101416264035500224650ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_COPYTO_HPP #define PYTHONIC_NUMPY_COPYTO_HPP #include "pythonic/include/numpy/copyto.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray copyto(types::ndarray &out, E const &expr) { out[types::contiguous_slice(0, types::none_type{})] = expr; return out; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/correlate.hpp000066400000000000000000000076641416264035500231470ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_CORRELATE_HPP #define PYTHONIC_NUMPY_CORRELATE_HPP #include "pythonic/include/numpy/correlate.hpp" #include "pythonic/numpy/dot.hpp" #include "pythonic/numpy/conjugate.hpp" #include "pythonic/numpy/asarray.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> do_correlate(A const &inA, B const &inB, types::str const &type, int out_inc) // out_inc is used to indicate the inputs were swapped, which means that the // output must be time reversed and conjugated { auto shapeA = sutils::getshape(inA); auto shapeB = sutils::getshape(inB); long NA = shapeA[0]; long NB = shapeB[0]; using out_type = typename __combined::type; // At this point, handling views would slow things down tremendously auto inA_ = functor::asarray{}(inA); auto inB_ = functor::asarray{}(inB); auto outN = 0; int iLeft; if (type == "full") { outN = NA + NB - 1; iLeft = -NB + 1; } else if (type == "valid") { outN = NA - NB + 1; iLeft = 0; } else { assert(type == "same" && "valid type"); outN = NA; iLeft = -NB + 1 + (NB - 1) / 2; } // We need outN output values, no matter what. int iRight = iLeft + outN; // Allocate output array types::ndarray> out = {outN, out_type()}; out_type *out_ptr = (out_type *)out.buffer; // if out_inc is -1, we reverse the output. if (out_inc == -1) out_ptr += outN - 1; // For small correlations, numpy uses small_correlate, far more efficient. // see numpy/core/src/multiarray/arraytypes.c.src if (out_inc == 1) { // Incomplete overlap left for (int i = iLeft; i < 0; i++, out_ptr++) { *out_ptr = numpy::dot(inA_(types::contiguous_slice(0, NB + i)), inB_(types::contiguous_slice(-i, NB))); } // Complete overlap middle for (int i = 0; i <= NA - NB; i++, out_ptr++) { *out_ptr = numpy::dot(inA_(types::contiguous_slice(i, i + NB)), inB_(types::contiguous_slice(0, NB))); } // Incomplete overlap right. for (int i = NA - NB + 1; i < iRight; i++, out_ptr++) { *out_ptr = numpy::dot(inA_(types::contiguous_slice(i, NA)), inB_(types::contiguous_slice(0, NA - i))); } } else { // Incomplete overlap left for (int i = iLeft; i < 0; i++, out_ptr += out_inc) { *out_ptr = wrapper::conjugate( numpy::dot(inA_(types::contiguous_slice(0, NB + i)), inB_(types::contiguous_slice(-i, NB)))); } // Complete overlap middle for (int i = 0; i <= NA - NB; i++, out_ptr += out_inc) { *out_ptr = wrapper::conjugate( numpy::dot(inA_(types::contiguous_slice(i, i + NB)), inB_(types::contiguous_slice(0, NB)))); } // Incomplete overlap right. for (int i = NA - NB + 1; i < iRight; i++, out_ptr += out_inc) { *out_ptr = wrapper::conjugate( numpy::dot(inA_(types::contiguous_slice(i, NA)), inB_(types::contiguous_slice(0, NA - i)))); } } return out; } template types::ndarray> correlate(A const &inA, B const &inB, types::str const &type) { long NA = inA.template shape<0>(); long NB = inB.template shape<0>(); // If inB is longer than inA, swap them, but time-reverse and conjugate the // output (-1 flag) if (NA > NB) { auto inB_conj = functor::conjugate{}(inB); return do_correlate(inA, inB_conj, type, 1); } else { auto inA_conj = functor::conjugate{}(inA); return do_correlate(inB, inA_conj, type, -1); } } NUMPY_EXPR_TO_NDARRAY0_IMPL(correlate) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/cos.hpp000066400000000000000000000006301416264035500217350ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_COS_HPP #define PYTHONIC_NUMPY_COS_HPP #include "pythonic/include/numpy/cos.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME cos #define NUMPY_NARY_FUNC_SYM xsimd::cos #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/cosh.hpp000066400000000000000000000006701416264035500221110ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_COSH_HPP #define PYTHONIC_NUMPY_COSH_HPP #include "pythonic/include/numpy/cosh.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME cosh #define NUMPY_NARY_FUNC_SYM xsimd::cosh #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/count_nonzero.hpp000066400000000000000000000025101416264035500240520ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_COUNT_NONZERO_HPP #define PYTHONIC_NUMPY_COUNT_NONZERO_HPP #include "pythonic/include/numpy/count_nonzero.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto _count_nonzero(E begin, E end, long &count, utils::int_<1>) -> typename std::enable_if::value>::type { for (; begin != end; ++begin) // Behaviour defined in the standard count += *begin; } template auto _count_nonzero(E begin, E end, long &count, utils::int_<1>) -> typename std::enable_if::value>::type { for (; begin != end; ++begin) if (*begin != static_cast(0)) ++count; } template void _count_nonzero(E begin, E end, long &count, utils::int_) { for (; begin != end; ++begin) _count_nonzero((*begin).begin(), (*begin).end(), count, utils::int_()); } template long count_nonzero(E const &array) { long count(0); _count_nonzero(array.begin(), array.end(), count, utils::int_()); return count; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/cross.hpp000066400000000000000000000100341416264035500223010ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_CROSS_HPP #define PYTHONIC_NUMPY_CROSS_HPP #include "pythonic/include/numpy/cross.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template struct _cross { template void operator()(Out obegin, Out oend, E ebegin, F fbegin) { while (obegin != oend) { _cross{}((*obegin).begin(), (*obegin).end(), (*ebegin).begin(), (*fbegin).begin()); ++obegin, ++ebegin, ++fbegin; } } }; template <> struct _cross<1, 2, 2> { template void operator()(Out obegin, Out oend, E ebegin, F fbegin) { auto e0 = *ebegin; ++ebegin; auto e1 = *ebegin; auto f0 = *fbegin; ++fbegin; auto f1 = *fbegin; *obegin = e0 *f1 - e1 *f0; } }; template <> struct _cross<1, 2, 3> { template void operator()(Out obegin, Out oend, E ebegin, F fbegin) { auto e0 = *ebegin; ++ebegin; auto e1 = *ebegin; decltype(e1) e2 = 0; auto f0 = *fbegin; ++fbegin; auto f1 = *fbegin; ++fbegin; auto f2 = *fbegin; *obegin = e1 *f2 - e2 *f1; ++obegin; *obegin = e2 *f0 - e0 *f2; ++obegin; *obegin = e0 *f1 - e1 *f0; } }; template <> struct _cross<1, 3, 3> { template void operator()(Out obegin, Out oend, E ebegin, F fbegin) { auto e0 = *ebegin; ++ebegin; auto e1 = *ebegin; ++ebegin; auto e2 = *ebegin; auto f0 = *fbegin; ++fbegin; auto f1 = *fbegin; ++fbegin; auto f2 = *fbegin; *obegin = e1 *f2 - e2 *f1; ++obegin; *obegin = e2 *f0 - e0 *f2; ++obegin; *obegin = e0 *f1 - e1 *f0; } }; template <> struct _cross<1, 3, 2> { template void operator()(Out obegin, Out oend, E ebegin, F fbegin) { auto e0 = *ebegin; ++ebegin; auto e1 = *ebegin; ++ebegin; auto e2 = *ebegin; auto f0 = *fbegin; ++fbegin; auto f1 = *fbegin; decltype(f1) f2 = 0; *obegin = e1 *f2 - e2 *f1; ++obegin; *obegin = e2 *f0 - e0 *f2; ++obegin; *obegin = e0 *f1 - e1 *f0; } }; template types::ndarray< typename __combined::type, types::array> cross(E const &e, F const &f) { using dtype = typename __combined::type; types::array out_shape; sutils::copy_shape<0, 0>(out_shape, e, utils::make_index_sequence()); if (e.template shape() == 2) { if (f.template shape() == 2) { out_shape[E::value - 1] = 1; types::ndarray> out{ out_shape, types::none_type{}}; _cross{}(out.begin(), out.end(), e.begin(), f.begin()); return out; } else { out_shape[E::value - 1] = 3; types::ndarray> out{ out_shape, types::none_type{}}; _cross{}(out.begin(), out.end(), e.begin(), f.begin()); return out; } } else { if (f.template shape() == 2) { out_shape[E::value - 1] = 3; types::ndarray> out{ out_shape, types::none_type{}}; _cross{}(out.begin(), out.end(), e.begin(), f.begin()); return out; } else { out_shape[E::value - 1] = 3; types::ndarray> out{ out_shape, types::none_type{}}; _cross{}(out.begin(), out.end(), e.begin(), f.begin()); return out; } } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ctypeslib/000077500000000000000000000000001416264035500224375ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/ctypeslib/as_array.hpp000066400000000000000000000014571416264035500247600ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_CTYPESLIB_AS_ARRAY_HPP #define PYTHONIC_NUMPY_CTYPESLIB_AS_ARRAY_HPP #include "pythonic/include/numpy/ctypeslib/as_array.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/pointer.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace ctypeslib { template typename std::enable_if::value, types::ndarray>::type as_array(types::pointer ptr, pS shape) { return {ptr.data, shape, types::ownership::external}; } template types::ndarray> as_array(types::pointer ptr, long size) { return as_array(ptr, types::pshape{size}); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/cumprod.hpp000066400000000000000000000012631416264035500226250ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_CUMPROD_HPP #define PYTHONIC_NUMPY_CUMPROD_HPP #include "pythonic/include/numpy/cumprod.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/numpy/partial_sum.hpp" #include "pythonic/operator_/imul.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto cumprod(E &&e, Opts &&... opts) -> decltype(partial_sum( std::forward(e), std::forward(opts)...)) { return partial_sum(std::forward(e), std::forward(opts)...); } NUMPY_EXPR_TO_NDARRAY0_IMPL(cumprod); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/cumproduct.hpp000066400000000000000000000002541416264035500233400ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_CUMPRODUCT_HPP #define PYTHONIC_NUMPY_CUMPRODUCT_HPP #include "pythonic/include/numpy/cumproduct.hpp" #include "pythonic/numpy/cumprod.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/cumsum.hpp000066400000000000000000000012031416264035500224570ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_CUMSUM_HPP #define PYTHONIC_NUMPY_CUMSUM_HPP #include "pythonic/include/numpy/cumsum.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/numpy/partial_sum.hpp" #include "pythonic/operator_/iadd.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto cumsum(E &&e, Opts &&... opts) -> decltype(partial_sum( std::forward(e), std::forward(opts)...)) { return partial_sum(std::forward(e), std::forward(opts)...); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/deg2rad.hpp000066400000000000000000000007161416264035500224660ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_DEG2RAD_HPP #define PYTHONIC_NUMPY_DEG2RAD_HPP #include "pythonic/include/numpy/deg2rad.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/numpy/pi.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME deg2rad #define NUMPY_NARY_FUNC_SYM wrapper::deg2rad #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/degrees.hpp000066400000000000000000000002431416264035500225670ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_DEGREES_HPP #define PYTHONIC_NUMPY_DEGREES_HPP #include "pythonic/include/numpy/degrees.hpp" #include "pythonic/numpy/rad2deg.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/delete_.hpp000066400000000000000000000026671416264035500225660ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_DELETE_HPP #define PYTHONIC_NUMPY_DELETE_HPP #include "pythonic/include/numpy/delete_.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> delete_(types::ndarray const &a, long index, types::none_type axis) { types::ndarray> out( types::pshape(long(a.flat_size()) - 1), builtins::None); long n = a.flat_size(); index = std::min(n, index); std::copy(a.buffer + index + 1, a.buffer + n, std::copy(a.buffer, a.buffer + index, out.buffer)); return out; } template typename std::enable_if::value, types::ndarray>>::type delete_(types::ndarray const &in, I const &indices, types::none_type axis) { types::ndarray> out( types::pshape(long(in.flat_size()) - indices.flat_size()), builtins::None); auto out_iter = out.buffer; auto in_iter = in.buffer; for (long index : indices) { out_iter = std::copy(in_iter, in.buffer + index, out_iter); in_iter = in.buffer + index + 1; } std::copy(in_iter, in.buffer + in.flat_size(), out_iter); return out; } NUMPY_EXPR_TO_NDARRAY0_IMPL(delete_); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/diag.hpp000066400000000000000000000035131416264035500220600ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_DIAG_HPP #define PYTHONIC_NUMPY_DIAG_HPP #include "pythonic/include/numpy/diag.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_conversion.hpp" #include "pythonic/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if::value == 2, types::ndarray>>::type diag(types::ndarray const &a, long k) { auto &&a_shape = a._shape; utils::shared_ref> buffer( std::max(std::get<0>(a_shape), std::get<1>(a_shape))); types::pshape shape = 0; auto iter = buffer->data; if (k >= 0) for (int i = 0, j = k; i < std::get<0>(a_shape) && j < std::get<1>(a_shape); ++i, ++j, ++std::get<0>(shape)) *iter++ = a[i][j]; else for (int i = -k, j = 0; i < std::get<0>(a_shape) && j < std::get<1>(a_shape); ++i, ++j, ++std::get<0>(shape)) *iter++ = a[i][j]; return {buffer, shape}; } template typename std::enable_if::value == 1, types::ndarray>>::type diag(types::ndarray const &a, long k) { long n = a.flat_size() + std::abs(k); types::ndarray> out(types::make_tuple(n, n), 0); if (k >= 0) for (long i = 0, j = k; i < n && j < n; ++i, ++j) out[i][j] = a[i]; else for (long i = -k, j = 0; i < n && j < n; ++i, ++j) out[i][j] = a[j]; return out; } template auto diag(types::list const &a, long k) -> decltype(diag(asarray(a), k)) { return diag(asarray(a), k); } NUMPY_EXPR_TO_NDARRAY0_IMPL(diag); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/diagflat.hpp000066400000000000000000000002431416264035500227240ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_DIAGFLAT_HPP #define PYTHONIC_NUMPY_DIAGFLAT_HPP #include "pythonic/include/numpy/diagflat.hpp" #include "pythonic/numpy/diag.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/diagonal.hpp000066400000000000000000000002431416264035500227270ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_DIAGONAL_HPP #define PYTHONIC_NUMPY_DIAGONAL_HPP #include "pythonic/include/numpy/diagonal.hpp" #include "pythonic/numpy/diag.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/diff.hpp000066400000000000000000000034341416264035500220660ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_DIFF_HPP #define PYTHONIC_NUMPY_DIFF_HPP #include "pythonic/include/numpy/diff.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { template types::ndarray> diff(E const &arr, long n, long axis) { auto shape = sutils::getshape(arr); auto stride = (axis == E::value - 1) ? arr.template shape() : std::accumulate(shape.begin() + axis + 1, shape.end(), 1L, std::multiplies()); --shape[axis]; // this does not leak, but uses slightly too much memory auto out = arr.reshape(shape); auto iter = arr.fbegin(); auto out_iter = out.fbegin(); if (axis == E::value - 1) { for (long i = 0, sz = arr.flat_size(); i < sz; i += stride) { auto prev = *(iter + i); for (long k = 1; k < stride; ++k, ++out_iter) { auto nprev = *(iter + i + k); *(out_iter) = nprev - prev; prev = nprev; } } } else { iter += stride; for (auto out_end = out.fend(); out_iter != out_end; ++out_iter) { *out_iter = *iter++ - *out_iter; } } if (n == 1) return out; else return diff(out, n - 1, axis); } } template types::ndarray> diff(E const &expr, long n, long axis) { if (axis < 0) axis += E::value; // that's the only allocation that should happen return details::diff(array(expr), n, axis); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/digitize.hpp000066400000000000000000000032161416264035500227640ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_DIGITIZE_HPP #define PYTHONIC_NUMPY_DIGITIZE_HPP #include "pythonic/include/numpy/digitize.hpp" #include "pythonic/numpy/asarray.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/operator_/gt.hpp" #include "pythonic/operator_/lt.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace { template void _digitize(I begin, I end, O &out, B &bins, Op const &op, utils::int_<1>) { for (; begin != end; ++begin, ++out) *out = std::lower_bound(bins.begin(), bins.end(), *begin, op) - bins.begin(); } template void _digitize(I begin, I end, O &out, B &bins, Op const &op, utils::int_) { for (; begin != end; ++begin) _digitize((*begin).begin(), (*begin).end(), out, bins, op, utils::int_()); } } template types::ndarray> digitize(E const &expr, F const &b) { auto bins = asarray(b); bool is_increasing = bins.flat_size() > 1 && *bins.fbegin() < *(bins.fbegin() + 1); types::ndarray> out( types::make_tuple(long(expr.flat_size())), builtins::None); auto out_iter = out.fbegin(); if (is_increasing) _digitize(expr.begin(), expr.end(), out_iter, bins, operator_::functor::lt(), utils::int_()); else _digitize(expr.begin(), expr.end(), out_iter, bins, operator_::functor::gt(), utils::int_()); return out; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/divide.hpp000066400000000000000000000010061416264035500224130ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_DIVIDE_HPP #define PYTHONIC_NUMPY_DIVIDE_HPP #include "pythonic/include/numpy/divide.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/operator_/div.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME divide #define NUMPY_NARY_FUNC_SYM pythonic::operator_::div #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/divide/000077500000000000000000000000001416264035500217055ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/divide/accumulate.hpp000066400000000000000000000002541416264035500245420ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_DIVIDE_ACCUMULATE_HPP #define PYTHONIC_NUMPY_DIVIDE_ACCUMULATE_HPP #define UFUNC_NAME divide #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/dot.hpp000066400000000000000000000533311416264035500217450ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_DOT_HPP #define PYTHONIC_NUMPY_DOT_HPP #include "pythonic/include/numpy/dot.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/numpy/sum.hpp" #include "pythonic/numpy/multiply.hpp" #include "pythonic/types/traits.hpp" #ifdef PYTHRAN_BLAS_NONE #error pythran configured without BLAS but BLAS seem needed #endif #if defined(PYTHRAN_BLAS_ATLAS) || defined(PYTHRAN_BLAS_SATLAS) extern "C" { #endif #include #if defined(PYTHRAN_BLAS_ATLAS) || defined(PYTHRAN_BLAS_SATLAS) } #endif PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if::value && types::is_dtype::value, decltype(std::declval() * std::declval())>::type dot(E const &e, F const &f) { return e * f; } template struct blas_buffer_t { typename E::dtype const *operator()(E const &e) const { return e.buffer; } }; template struct blas_buffer_t> { T const *operator()(types::list const &e) const { return &e.fast(0); } }; template struct blas_buffer_t> { T const *operator()(types::array const &e) const { return e.data(); } }; template auto blas_buffer(E const &e) -> decltype(blas_buffer_t{}(e)) { return blas_buffer_t{}(e); } template typename std::enable_if< types::is_numexpr_arg::value && types::is_numexpr_arg::value // Arguments are array_like && E::value == 1 && F::value == 1 // It is a two vectors. && (!is_blas_array::value || !is_blas_array::value || !std::is_same::value), typename __combined::type>::type dot(E const &e, F const &f) { return sum(functor::multiply{}(e, f)); } template typename std::enable_if::value && std::is_same::value && is_blas_array::value && is_blas_array::value, float>::type dot(E const &e, F const &f) { return cblas_sdot(e.size(), blas_buffer(e), 1, blas_buffer(f), 1); } template typename std::enable_if::value && std::is_same::value && is_blas_array::value && is_blas_array::value, double>::type dot(E const &e, F const &f) { return cblas_ddot(e.size(), blas_buffer(e), 1, blas_buffer(f), 1); } template typename std::enable_if< E::value == 1 && F::value == 1 && std::is_same>::value && std::is_same>::value && is_blas_array::value && is_blas_array::value, std::complex>::type dot(E const &e, F const &f) { std::complex out; cblas_cdotu_sub(e.size(), blas_buffer(e), 1, blas_buffer(f), 1, &out); return out; } template typename std::enable_if< E::value == 1 && F::value == 1 && std::is_same>::value && std::is_same>::value && is_blas_array::value && is_blas_array::value, std::complex>::type dot(E const &e, F const &f) { std::complex out; cblas_zdotu_sub(e.size(), blas_buffer(e), 1, blas_buffer(f), 1, &out); return out; } /// Matrice / Vector multiplication #define MV_DEF(T, L) \ void mv(int m, int n, T *A, T *B, T *C) \ { \ cblas_##L##gemv(CblasRowMajor, CblasNoTrans, n, m, 1, A, m, B, 1, 0, C, \ 1); \ } MV_DEF(double, d) MV_DEF(float, s) #undef MV_DEF #define MV_DEF(T, K, L) \ void mv(int m, int n, T *A, T *B, T *C) \ { \ T alpha = 1, beta = 0; \ cblas_##L##gemv(CblasRowMajor, CblasNoTrans, n, m, (K *)&alpha, (K *)A, m, \ (K *)B, 1, (K *)&beta, (K *)C, 1); \ } MV_DEF(std::complex, float, c) MV_DEF(std::complex, double, z) #undef MV_DEF template typename std::enable_if::value && std::tuple_size::value == 2 && std::tuple_size::value == 1, types::ndarray>>::type dot(types::ndarray const &f, types::ndarray const &e) { types::ndarray> out( types::pshape{f.template shape<0>()}, builtins::None); const int m = f.template shape<1>(), n = f.template shape<0>(); mv(m, n, f.buffer, e.buffer, out.buffer); return out; } // The trick is to not transpose the matrix so that MV become VM #define VM_DEF(T, L) \ void vm(int m, int n, T *A, T *B, T *C) \ { \ cblas_##L##gemv(CblasRowMajor, CblasTrans, n, m, 1, A, m, B, 1, 0, C, 1); \ } VM_DEF(double, d) VM_DEF(float, s) #undef VM_DEF #define VM_DEF(T, K, L) \ void vm(int m, int n, T *A, T *B, T *C) \ { \ T alpha = 1, beta = 0; \ cblas_##L##gemv(CblasRowMajor, CblasTrans, n, m, (K *)&alpha, (K *)A, m, \ (K *)B, 1, (K *)&beta, (K *)C, 1); \ } VM_DEF(std::complex, float, c) VM_DEF(std::complex, double, z) #undef VM_DEF template typename std::enable_if::value && std::tuple_size::value == 1 && std::tuple_size::value == 2, types::ndarray>>::type dot(types::ndarray const &e, types::ndarray const &f) { types::ndarray> out( types::pshape{f.template shape<1>()}, builtins::None); const int m = f.template shape<1>(), n = f.template shape<0>(); vm(m, n, f.buffer, e.buffer, out.buffer); return out; } // If arguments could be use with blas, we evaluate them as we need pointer // on array for blas template typename std::enable_if< types::is_numexpr_arg::value && types::is_numexpr_arg::value // It is an array_like && (!(types::is_ndarray::value && types::is_ndarray::value) || !std::is_same::value) && is_blas_type::value && is_blas_type::value // With dtype compatible with // blas && E::value == 2 && F::value == 1, // And it is matrix / vect types::ndarray< typename __combined::type, types::pshape>>::type dot(E const &e, F const &f) { types::ndarray< typename __combined::type, typename E::shape_t> e_ = e; types::ndarray< typename __combined::type, typename F::shape_t> f_ = f; return dot(e_, f_); } // If arguments could be use with blas, we evaluate them as we need pointer // on array for blas template typename std::enable_if< types::is_numexpr_arg::value && types::is_numexpr_arg::value // It is an array_like && (!(types::is_ndarray::value && types::is_ndarray::value) || !std::is_same::value) && is_blas_type::value && is_blas_type::value // With dtype compatible with // blas && E::value == 1 && F::value == 2, // And it is vect / matrix types::ndarray< typename __combined::type, types::pshape>>::type dot(E const &e, F const &f) { types::ndarray< typename __combined::type, typename E::shape_t> e_ = e; types::ndarray< typename __combined::type, typename F::shape_t> f_ = f; return dot(e_, f_); } // If one of the arg doesn't have a "blas compatible type", we use a slow // matrix vector multiplication. template typename std::enable_if< (!is_blas_type::value || !is_blas_type::value) && E::value == 1 && F::value == 2, // And it is vect / matrix types::ndarray< typename __combined::type, types::pshape>>::type dot(E const &e, F const &f) { types::ndarray< typename __combined::type, types::pshape> out(types::pshape{f.template shape<1>()}, 0); for (long i = 0; i < out.template shape<0>(); i++) for (long j = 0; j < f.template shape<0>(); j++) out[i] += e[j] * f[types::array{{j, i}}]; return out; } // If one of the arg doesn't have a "blas compatible type", we use a slow // matrix vector multiplication. template typename std::enable_if< (!is_blas_type::value || !is_blas_type::value) && E::value == 2 && F::value == 1, // And it is vect / matrix types::ndarray< typename __combined::type, types::pshape>>::type dot(E const &e, F const &f) { types::ndarray< typename __combined::type, types::pshape> out(types::pshape{e.template shape<0>()}, 0); for (long i = 0; i < out.template shape<0>(); i++) for (long j = 0; j < f.template shape<0>(); j++) out[i] += e[types::array{{i, j}}] * f[j]; return out; } /// Matrix / Matrix multiplication #define MM_DEF(T, L) \ void mm(int m, int n, int k, T *A, T *B, T *C) \ { \ cblas_##L##gemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, m, n, k, 1, A, \ k, B, n, 0, C, n); \ } MM_DEF(double, d) MM_DEF(float, s) #undef MM_DEF #define MM_DEF(T, K, L) \ void mm(int m, int n, int k, T *A, T *B, T *C) \ { \ T alpha = 1, beta = 0; \ cblas_##L##gemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, m, n, k, \ (K *)&alpha, (K *)A, k, (K *)B, n, (K *)&beta, (K *)C, n); \ } MM_DEF(std::complex, float, c) MM_DEF(std::complex, double, z) #undef MM_DEF template typename std::enable_if::value && std::tuple_size::value == 2 && std::tuple_size::value == 2, types::ndarray>>::type dot(types::ndarray const &a, types::ndarray const &b) { int n = b.template shape<1>(), m = a.template shape<0>(), k = b.template shape<0>(); types::ndarray> out(types::array{{m, n}}, builtins::None); mm(m, n, k, a.buffer, b.buffer, out.buffer); return out; } template typename std::enable_if< is_blas_type::value && std::tuple_size::value == 2 && std::tuple_size::value == 2 && std::tuple_size::value == 2, types::ndarray>::type & dot(types::ndarray const &a, types::ndarray const &b, types::ndarray &c) { int n = b.template shape<1>(), m = a.template shape<0>(), k = b.template shape<0>(); mm(m, n, k, a.buffer, b.buffer, c.buffer); return c; } #define TM_DEF(T, L) \ void tm(int m, int n, int k, T *A, T *B, T *C) \ { \ cblas_##L##gemm(CblasRowMajor, CblasTrans, CblasNoTrans, m, n, k, 1, A, m, \ B, n, 0, C, n); \ } TM_DEF(double, d) TM_DEF(float, s) #undef TM_DEF #define TM_DEF(T, K, L) \ void tm(int m, int n, int k, T *A, T *B, T *C) \ { \ T alpha = 1, beta = 0; \ cblas_##L##gemm(CblasRowMajor, CblasTrans, CblasNoTrans, m, n, k, \ (K *)&alpha, (K *)A, m, (K *)B, n, (K *)&beta, (K *)C, n); \ } TM_DEF(std::complex, float, c) TM_DEF(std::complex, double, z) #undef TM_DEF template typename std::enable_if::value && std::tuple_size::value == 2 && std::tuple_size::value == 2, types::ndarray>>::type dot(types::numpy_texpr> const &a, types::ndarray const &b) { int n = b.template shape<1>(), m = a.template shape<0>(), k = b.template shape<0>(); types::ndarray> out(types::array{{m, n}}, builtins::None); tm(m, n, k, a.arg.buffer, b.buffer, out.buffer); return out; } #define MT_DEF(T, L) \ void mt(int m, int n, int k, T *A, T *B, T *C) \ { \ cblas_##L##gemm(CblasRowMajor, CblasNoTrans, CblasTrans, m, n, k, 1, A, k, \ B, k, 0, C, n); \ } MT_DEF(double, d) MT_DEF(float, s) #undef MT_DEF #define MT_DEF(T, K, L) \ void mt(int m, int n, int k, T *A, T *B, T *C) \ { \ T alpha = 1, beta = 0; \ cblas_##L##gemm(CblasRowMajor, CblasNoTrans, CblasTrans, m, n, k, \ (K *)&alpha, (K *)A, k, (K *)B, k, (K *)&beta, (K *)C, n); \ } MT_DEF(std::complex, float, c) MT_DEF(std::complex, double, z) #undef MT_DEF template typename std::enable_if::value && std::tuple_size::value == 2 && std::tuple_size::value == 2, types::ndarray>>::type dot(types::ndarray const &a, types::numpy_texpr> const &b) { int n = b.template shape<1>(), m = a.template shape<0>(), k = b.template shape<0>(); types::ndarray> out(types::array{{m, n}}, builtins::None); mt(m, n, k, a.buffer, b.arg.buffer, out.buffer); return out; } #define TT_DEF(T, L) \ void tt(int m, int n, int k, T *A, T *B, T *C) \ { \ cblas_##L##gemm(CblasRowMajor, CblasTrans, CblasTrans, m, n, k, 1, A, m, \ B, k, 0, C, n); \ } TT_DEF(double, d) TT_DEF(float, s) #undef TT_DEF #define TT_DEF(T, K, L) \ void tt(int m, int n, int k, T *A, T *B, T *C) \ { \ T alpha = 1, beta = 0; \ cblas_##L##gemm(CblasRowMajor, CblasTrans, CblasTrans, m, n, k, \ (K *)&alpha, (K *)A, m, (K *)B, k, (K *)&beta, (K *)C, n); \ } TT_DEF(std::complex, float, c) TT_DEF(std::complex, double, z) #undef TT_DEF template typename std::enable_if::value && std::tuple_size::value == 2 && std::tuple_size::value == 2, types::ndarray>>::type dot(types::numpy_texpr> const &a, types::numpy_texpr> const &b) { int n = b.template shape<1>(), m = a.template shape<0>(), k = b.template shape<0>(); types::ndarray> out(types::array{{m, n}}, builtins::None); tt(m, n, k, a.arg.buffer, b.arg.buffer, out.buffer); return out; } // If arguments could be use with blas, we evaluate them as we need pointer // on array for blas template typename std::enable_if< types::is_numexpr_arg::value && types::is_numexpr_arg::value // It is an array_like && (!(types::is_ndarray::value && types::is_ndarray::value) || !std::is_same::value) && is_blas_type::value && is_blas_type::value // With dtype compatible with // blas && E::value == 2 && F::value == 2, // And both are matrix types::ndarray< typename __combined::type, types::array>>::type dot(E const &e, F const &f) { types::ndarray< typename __combined::type, typename E::shape_t> e_ = e; types::ndarray< typename __combined::type, typename F::shape_t> f_ = f; return dot(e_, f_); } // If one of the arg doesn't have a "blas compatible type", we use a slow // matrix multiplication. template typename std::enable_if< (!is_blas_type::value || !is_blas_type::value) && E::value == 2 && F::value == 2, // And it is matrix / matrix types::ndarray< typename __combined::type, types::array>>::type dot(E const &e, F const &f) { types::ndarray< typename __combined::type, types::array> out(types::array{{e.template shape<0>(), f.template shape<1>()}}, 0); for (long i = 0; i < out.template shape<0>(); i++) for (long j = 0; j < out.template shape<1>(); j++) for (long k = 0; k < e.template shape<1>(); k++) out[types::array{{i, j}}] += e[types::array{{i, k}}] * f[types::array{{k, j}}]; return out; } template typename std::enable_if< (E::value >= 3 && F::value == 1), // And it is matrix / matrix types::ndarray< typename __combined::type, types::array>>::type dot(E const &e, F const &f) { auto out = dot( e.reshape(types::array{{sutils::prod_head(e), f.size()}}), f); types::array out_shape; auto tmp = sutils::getshape(e); std::copy(tmp.begin(), tmp.end() - 1, out_shape.begin()); return out.reshape(out_shape); } template typename std::enable_if< (E::value >= 3 && F::value >= 2), types::ndarray< typename __combined::type, types::array>>::type dot(E const &e, F const &f) { static_assert(E::value == 0, "not implemented yet"); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/double_.hpp000066400000000000000000000005421416264035500225640ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_DOUBLE_HPP #define PYTHONIC_NUMPY_DOUBLE_HPP #include "pythonic/include/numpy/double_.hpp" #include "pythonic/include/numpy/float64.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME double_ #define NUMPY_NARY_FUNC_SYM details::float64 #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/dtype/000077500000000000000000000000001416264035500215665ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/dtype/type.hpp000066400000000000000000000005661416264035500232670ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_DTYPE_TYPE_HPP #define PYTHONIC_NUMPY_DTYPE_TYPE_HPP #include "pythonic/include/numpy/dtype/type.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace dtype { template auto type(T const &t, V const &v) -> decltype(t(v)) { return t(v); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/e.hpp000066400000000000000000000001631416264035500213760ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_E_HPP #define PYTHONIC_NUMPY_E_HPP #include "pythran/pythonic/include/numpy/e.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ediff1d.hpp000066400000000000000000000016061416264035500224570ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_EDIFF1D_HPP #define PYTHONIC_NUMPY_EDIFF1D_HPP #include "pythonic/include/numpy/ediff1d.hpp" #include "pythonic/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> ediff1d(E const &expr) { auto arr = asarray(expr); long n = arr.flat_size() - 1; types::ndarray> out( types::pshape(n), builtins::None); // Compute adjacent difference except for the first element std::adjacent_difference(arr.fbegin() + 1, arr.fend(), out.fbegin()); // First element can be done now (*out.fbegin()) = *(arr.fbegin() + 1) - *(arr.fbegin()); return out; } template auto ediff1d(types::list const &expr) -> decltype(ediff1d(asarray(expr))) { return ediff1d(asarray(expr)); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/empty.hpp000066400000000000000000000016631416264035500223160ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_EMPTY_HPP #define PYTHONIC_NUMPY_EMPTY_HPP #include "pythonic/include/numpy/empty.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> empty(pS const &shape, dtype) { return {(sutils::shape_t)shape, builtins::None}; } template types::ndarray> empty(long size, dtype d) { return empty(types::pshape(size), d); } template types::ndarray>> empty(std::integral_constant, dtype d) { return empty(types::pshape>({}), d); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/empty_like.hpp000066400000000000000000000013121416264035500233110ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_EMPTYLIKE_HPP #define PYTHONIC_NUMPY_EMPTYLIKE_HPP #include "pythonic/include/numpy/empty_like.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/numpy/empty.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto empty_like(E const &expr, dtype d) -> decltype(empty(sutils::getshape(expr), d)) { return empty(sutils::getshape(expr), d); } template auto empty_like(E const &expr, types::none_type) -> decltype(empty(sutils::getshape(expr), types::dtype_t())) { return empty(sutils::getshape(expr), types::dtype_t()); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/equal.hpp000066400000000000000000000010001416264035500222500ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_EQUAL_HPP #define PYTHONIC_NUMPY_EQUAL_HPP #include "pythonic/include/numpy/equal.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/operator_/eq.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME equal #define NUMPY_NARY_FUNC_SYM pythonic::operator_::eq #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/equal/000077500000000000000000000000001416264035500215505ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/equal/accumulate.hpp000066400000000000000000000002511416264035500244020ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_EQUAL_ACCUMULATE_HPP #define PYTHONIC_NUMPY_EQUAL_ACCUMULATE_HPP #define UFUNC_NAME equal #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/exp.hpp000066400000000000000000000006271416264035500217530ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_EXP_HPP #define PYTHONIC_NUMPY_EXP_HPP #include "pythonic/include/numpy/exp.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME exp #define NUMPY_NARY_FUNC_SYM xsimd::exp #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/expand_dims.hpp000066400000000000000000000015361416264035500234520ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_EXPAND_DIMS_HPP #define PYTHONIC_NUMPY_EXPAND_DIMS_HPP #include "pythonic/include/numpy/expand_dims.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/include/utils/array_helper.hpp" #include "pythonic/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> expand_dims(T const &input, int axis) { const long N = T::value; if (axis == -1) axis += N + 1; types::array dim_array; auto in_shape = sutils::getshape(input); long ii, jj; for (ii = jj = 0; ii < N + 1; ii++) { if (ii == axis) { dim_array[ii] = 1; } else { dim_array[ii] = in_shape[jj++]; } } return numpy::functor::asarray{}(input).reshape(dim_array); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/expm1.hpp000066400000000000000000000006411416264035500222050ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_EXPM1_HPP #define PYTHONIC_NUMPY_EXPM1_HPP #include "pythonic/include/numpy/expm1.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME expm1 #define NUMPY_NARY_FUNC_SYM xsimd::expm1 #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/eye.hpp000066400000000000000000000016151416264035500217370ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_EYE_HPP #define PYTHONIC_NUMPY_EYE_HPP #include "pythonic/include/numpy/eye.hpp" #include "pythonic/numpy/zeros.hpp" #include "pythonic/builtins/None.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> eye(long N, long M, long k, dtype d) { types::ndarray> out = zeros(types::make_tuple(N, M), d); if (k >= 0) for (int i = 0, j = k; i < N && j < M; ++i, ++j) out[i][j] = typename dtype::type(1); else for (int i = -k, j = 0; i < N && j < M; ++i, ++j) out[i][j] = typename dtype::type(1); return out; } template types::ndarray> eye(long N, types::none_type M, long k, dtype d) { return eye(N, N, k, d); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/fabs.hpp000066400000000000000000000002261416264035500220650ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FABS_HPP #define PYTHONIC_NUMPY_FABS_HPP #include "pythonic/include/numpy/fabs.hpp" #include "pythonic/numpy/abs.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/fft/000077500000000000000000000000001416264035500212205ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/fft/c2c.hpp000066400000000000000000000260301416264035500224010ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FFT_C2C_HPP #define PYTHONIC_NUMPY_FFT_C2C_HPP #include "pythonic/include/numpy/fft/c2c.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/include/utils/array_helper.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/numpy/concatenate.hpp" #include "pythonic/numpy/zeros.hpp" #include "pythonic/numpy/empty.hpp" #include #include #include #include "pythonic/numpy/fft/pocketfft.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace fft { using pocketfft::stride_t; using pocketfft::shape_t; using ldbl_t = typename std::conditional::type; template types::ndarray< typename std::enable_if::value, double>::type, types::array::value>> _copy_to_double(types::ndarray const &in_array) { auto out_shape = sutils::getshape(in_array); size_t l = in_array.flat_size(); auto out_array = numpy::empty(out_shape, types::dtype_t()); std::copy(in_array.buffer, in_array.buffer + l, out_array.buffer); return out_array; } template types::ndarray::value, std::complex>::type, types::array::value>> _copy_to_complex(types::ndarray const &in_array) { auto out_shape = sutils::getshape(in_array); size_t l = in_array.flat_size(); auto out_array = numpy::empty(out_shape, types::dtype_t>()); std::copy(in_array.buffer, in_array.buffer + l, out_array.buffer); return out_array; } template types::ndarray::value, std::complex>::type, types::array::value>> _copy_to_complex(types::ndarray const &in_array) { auto out_shape = sutils::getshape(in_array); size_t l = in_array.flat_size(); auto out_array = numpy::empty( out_shape, types::dtype_t>()); std::copy(in_array.buffer, in_array.buffer + l, out_array.buffer); return out_array; } enum class Inorm : int { forward, ortho, backward, }; Inorm _get_inorm(types::str const &norm, bool forward) { if (norm == "ortho") { return Inorm::ortho; } else if (!forward) { return Inorm::backward; } else { return Inorm::forward; } } template T norm_fct(Inorm inorm, size_t N) { switch (inorm) { case Inorm::ortho: return T(1 / sqrt(ldbl_t(N))); case Inorm::backward: return T(1 / ldbl_t(N)); case Inorm::forward: return T(1); default: assert(false && "unreachable"); return T(0); } } template T norm_fct(Inorm inorm, const shape_t &shape, const shape_t &axes, size_t fct = 1, int delta = 0) { if (inorm == Inorm::forward) return T(1); size_t N(1); for (auto a : axes) N *= fct * size_t(int64_t(shape[a]) + delta); return norm_fct(inorm, N); } template stride_t create_strides(types::ndarray const &in_array) { auto constexpr N = std::tuple_size::value; auto shape = sutils::getshape(in_array); stride_t strides = stride_t(N); strides[N - 1] = sizeof(T); std::transform(strides.rbegin(), strides.rend() - 1, shape.rbegin(), strides.rbegin() + 1, std::multiplies()); return strides; } template types::ndarray::value>> _pad_in_array(types::ndarray const &in_array, long axis, long n) { types::ndarray::value>> extended_array; auto tmp_shape = sutils::getshape(in_array); tmp_shape[axis] = n; auto tmp_array = zeros(tmp_shape, types::dtype_t()); types::list::value>>> bi(0); bi.push_back(in_array); bi.push_back(tmp_array); extended_array = concatenate(bi, axis); return extended_array; } template types::ndarray::value>> c2r(types::ndarray, pS> const &in_array, long n, long axis, types::str const &norm, bool forward) { auto constexpr N = std::tuple_size::value; Inorm inorm = _get_inorm(norm, forward); if (axis < 0) axis = N + axis; auto in_shape = sutils::getshape(in_array); long npts = in_shape[axis]; if (n == -1) n = 2 * npts - 2; auto out_shape = sutils::getshape(in_array); out_shape[axis] = n; // Create output array. types::ndarray::value>> out_array(out_shape, builtins::None); std::complex *d_in; types::ndarray, types::array::value>> extended_array; stride_t in_strides; auto out_strides = create_strides(out_array); if (n > 2 * npts - 2) { // extend array with zeros along axis direction extended_array = _pad_in_array(in_array, axis, n - (2 * npts - 2)); in_strides = create_strides(extended_array); d_in = reinterpret_cast *>(extended_array.buffer); } else { in_strides = create_strides( in_array); // for cropped arrays we need to use different strides d_in = reinterpret_cast *>(in_array.buffer); } auto d_out = reinterpret_cast(out_array.buffer); // axes calculation is for 1D transform shape_t axes = shape_t(1); axes[0] = axis; shape_t shapes = shape_t(size_t(N)); std::copy(out_shape.begin(), out_shape.begin() + N, shapes.begin()); auto fct = norm_fct(inorm, shapes, axes); pocketfft::c2r(shapes, in_strides, out_strides, axes, forward, d_in, d_out, fct, size_t(0)); return out_array; } template types::ndarray, types::array::value>> c2c(types::ndarray, pS> const &in_array, long n, long axis, types::str const &norm, bool forward) { auto constexpr N = std::tuple_size::value; Inorm inorm = _get_inorm(norm, forward); if (axis < 0) axis = N + axis; auto in_shape = sutils::getshape(in_array); long npts = in_shape[axis]; if (n == -1) n = npts; auto out_shape = sutils::getshape(in_array); out_shape[axis] = n; // Create output array. types::ndarray, types::array::value>> out_array(out_shape, builtins::None); std::complex *d_in; types::ndarray, types::array::value>> extended_array; stride_t in_strides; if (n > npts) { // extend array with zeros along axis direction extended_array = _pad_in_array(in_array, axis, n - npts); d_in = reinterpret_cast *>(extended_array.buffer); in_strides = create_strides(extended_array); // } else { d_in = reinterpret_cast *>(in_array.buffer); in_strides = create_strides( in_array); // for cropped arrays we need to use different strides } auto d_out = reinterpret_cast *>(out_array.buffer); // axes calculation is for 1D transform shape_t axes = shape_t(1); axes[0] = axis; auto out_strides = create_strides(out_array); shape_t shapes = shape_t(size_t(N)); for (size_t i = 0; i < N; ++i) shapes[i] = size_t(out_shape[i]); auto fct = norm_fct(inorm, shapes, axes); pocketfft::c2c(shapes, in_strides, out_strides, axes, forward, d_in, d_out, fct, size_t(0)); return out_array; } template types::ndarray::value, std::complex>::type, types::array::value>> r2c(types::ndarray const &in_array, long n, long axis, types::str const &norm, bool forward, bool extend = true) { auto constexpr N = std::tuple_size::value; Inorm inorm = _get_inorm(norm, forward); if (axis < 0) axis = N + axis; auto in_shape = sutils::getshape(in_array); long npts = in_shape[axis]; if (n == -1) n = npts; auto out_shape = sutils::getshape(in_array); if (extend) { out_shape[axis] = n; } else { out_shape[axis] = n / 2 + 1; } // Create output array. types::ndarray, types::array::value>> out_array(out_shape, builtins::None); T *d_in; types::ndarray::value>> extended_array; shape_t shapes = shape_t(size_t(N)); stride_t in_strides; if (n > npts) { // extend array with zeros along axis direction extended_array = _pad_in_array(in_array, axis, n - npts); auto ext_shape = sutils::getshape(extended_array); std::copy(ext_shape.begin(), ext_shape.begin() + N, shapes.begin()); d_in = reinterpret_cast(extended_array.buffer); in_strides = create_strides(extended_array); } else { d_in = reinterpret_cast(in_array.buffer); in_shape[axis] = n; std::copy(in_shape.begin(), in_shape.begin() + N, shapes.begin()); in_strides = create_strides( in_array); // for cropped arrays we need to use different strides } auto d_out = reinterpret_cast *>(out_array.buffer); // axes calculation is for 1D transform shape_t axes = shape_t(1); axes[0] = axis; auto out_strides = create_strides(out_array); auto fct = norm_fct(inorm, shapes, axes); pocketfft::r2c(shapes, in_strides, out_strides, axes, forward, d_in, d_out, fct, size_t(0)); if (extend) { using namespace pocketfft::detail; ndarr> ares(out_array.buffer, shapes, out_strides); rev_iter iter(ares, axes); while (iter.remaining() > 0) { auto v = ares[iter.ofs()]; ares[iter.rev_ofs()] = conj(v); iter.advance(); } } return out_array; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/fft/fft.hpp000066400000000000000000000123431416264035500225130ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FFT_FFT_HPP #define PYTHONIC_NUMPY_FFT_FFT_HPP #include "pythonic/include/numpy/fft/fft.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/include/utils/array_helper.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/numpy/fft/c2c.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace fft { template types::ndarray< typename std::enable_if::value, T>::type, types::array::value>> fft(types::ndarray const &in_array, types::none_type n, long axis, types::str const &norm) { return c2c(in_array, -1, axis, norm, true); } template types::ndarray< typename std::enable_if::value, T>::type, types::array::value>> fft(types::ndarray const &in_array, types::none_type n, long axis, types::none_type norm) { return c2c(in_array, -1, axis, "", true); } template types::ndarray< typename std::enable_if::value, T>::type, types::array::value>> fft(types::ndarray const &in_array, long n, long axis, types::none_type norm) { return c2c(in_array, n, axis, "", true); } template types::ndarray< typename std::enable_if::value, T>::type, types::array::value>> fft(types::ndarray const &in_array, long n, long axis, types::str const &norm) { return c2c(in_array, n, axis, norm, true); } template types::ndarray::value, std::complex>::type, types::array::value>> fft(types::ndarray const &in_array, types::none_type n, long axis, types::str const &norm) { return r2c(in_array, -1, axis, norm, true, true); } template types::ndarray::value, std::complex>::type, types::array::value>> fft(types::ndarray const &in_array, types::none_type n, long axis, types::none_type norm) { return r2c(in_array, -1, axis, "", true, true); } template types::ndarray::value, std::complex>::type, types::array::value>> fft(types::ndarray const &in_array, long n, long axis, types::none_type norm) { return r2c(in_array, n, axis, "", true, true); } template types::ndarray::value, std::complex>::type, types::array::value>> fft(types::ndarray const &in_array, long n, long axis, types::str const &norm) { return r2c(in_array, n, axis, norm, true, true); } template types::ndarray::value, std::complex>::type, types::array::value>> fft(types::ndarray const &in_array, types::none_type n, long axis, types::str const &norm) { auto tmp_array = _copy_to_double(in_array); return r2c(tmp_array, -1, axis, norm, true, true); } template types::ndarray::value, std::complex>::type, types::array::value>> fft(types::ndarray const &in_array, types::none_type n, long axis, types::none_type norm) { auto tmp_array = _copy_to_double(in_array); return r2c(tmp_array, -1, axis, "", true, true); } template types::ndarray::value, std::complex>::type, types::array::value>> fft(types::ndarray const &in_array, long n, long axis, types::none_type norm) { auto tmp_array = _copy_to_double(in_array); return r2c(tmp_array, n, axis, "", true, true); } template types::ndarray::value, std::complex>::type, types::array::value>> fft(types::ndarray const &in_array, long n, long axis, types::str const &norm) { auto tmp_array = _copy_to_double(in_array); return r2c(tmp_array, n, axis, norm, true, true); } NUMPY_EXPR_TO_NDARRAY0_IMPL(fft); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/fft/hfft.hpp000066400000000000000000000074171416264035500226710ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FFT_HFFT_HPP #define PYTHONIC_NUMPY_FFT_HFFT_HPP #include "pythonic/include/numpy/fft/hfft.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/include/utils/array_helper.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/numpy/fft/c2c.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace fft { template types::ndarray::value>> hfft(types::ndarray, pS> const &in_array, types::none_type n, long axis, types::str const &norm) { return c2r(in_array, -1, axis, norm, true); } template types::ndarray::value>> hfft(types::ndarray, pS> const &in_array, types::none_type n, long axis, types::none_type norm) { return c2r(in_array, -1, axis, "", true); } template types::ndarray::value>> hfft(types::ndarray, pS> const &in_array, long n, long axis, types::none_type norm) { return c2r(in_array, n, axis, "", true); } template types::ndarray::value>> hfft(types::ndarray, pS> const &in_array, long n, long axis, types::str const &norm) { return c2r(in_array, n, axis, norm, true); } template types::ndarray::value, typename std::conditional::value, double, T>::type>::type, types::array::value>> hfft(types::ndarray const &in_array, types::none_type n, long axis, types::str const &norm) { auto tmp_array = _copy_to_complex(in_array); return c2r(tmp_array, -1, axis, norm, true); } template types::ndarray::value, typename std::conditional::value, double, T>::type>::type, types::array::value>> hfft(types::ndarray const &in_array, types::none_type n, long axis, types::none_type norm) { auto tmp_array = _copy_to_complex(in_array); return c2r(tmp_array, -1, axis, "", true); } template types::ndarray::value, typename std::conditional::value, double, T>::type>::type, types::array::value>> hfft(types::ndarray const &in_array, long n, long axis, types::none_type norm) { auto tmp_array = _copy_to_complex(in_array); return c2r(tmp_array, n, axis, "", true); } template types::ndarray::value, typename std::conditional::value, double, T>::type>::type, types::array::value>> hfft(types::ndarray const &in_array, long n, long axis, types::str const &norm) { auto tmp_array = _copy_to_complex(in_array); return c2r(tmp_array, n, axis, norm, true); } NUMPY_EXPR_TO_NDARRAY0_IMPL(hfft); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/fft/ifft.hpp000066400000000000000000000124051416264035500226630ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FFT_IFFT_HPP #define PYTHONIC_NUMPY_FFT_IFFT_HPP #include "pythonic/include/numpy/fft/ifft.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/include/utils/array_helper.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/numpy/fft/c2c.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace fft { template types::ndarray< typename std::enable_if::value, T>::type, types::array::value>> ifft(types::ndarray const &in_array, types::none_type n, long axis, types::str const &norm) { return c2c(in_array, -1, axis, norm, false); } template types::ndarray< typename std::enable_if::value, T>::type, types::array::value>> ifft(types::ndarray const &in_array, types::none_type n, long axis, types::none_type norm) { return c2c(in_array, -1, axis, "", false); } template types::ndarray< typename std::enable_if::value, T>::type, types::array::value>> ifft(types::ndarray const &in_array, long n, long axis, types::none_type norm) { return c2c(in_array, n, axis, "", false); } template types::ndarray< typename std::enable_if::value, T>::type, types::array::value>> ifft(types::ndarray const &in_array, long n, long axis, types::str const &norm) { return c2c(in_array, n, axis, norm, false); } template types::ndarray::value, std::complex>::type, types::array::value>> ifft(types::ndarray const &in_array, types::none_type n, long axis, types::str const &norm) { return r2c(in_array, -1, axis, norm, false); } template types::ndarray::value, std::complex>::type, types::array::value>> ifft(types::ndarray const &in_array, types::none_type n, long axis, types::none_type norm) { return r2c(in_array, -1, axis, "", false, true); } template types::ndarray::value, std::complex>::type, types::array::value>> ifft(types::ndarray const &in_array, long n, long axis, types::none_type norm) { return r2c(in_array, n, axis, "", false, true); } template types::ndarray::value, std::complex>::type, types::array::value>> ifft(types::ndarray const &in_array, long n, long axis, types::str const &norm) { return r2c(in_array, n, axis, norm, false, true); } template types::ndarray::value, std::complex>::type, types::array::value>> ifft(types::ndarray const &in_array, types::none_type n, long axis, types::str const &norm) { auto tmp_array = _copy_to_double(in_array); return r2c(tmp_array, -1, axis, norm, false, true); } template types::ndarray::value, std::complex>::type, types::array::value>> ifft(types::ndarray const &in_array, types::none_type n, long axis, types::none_type norm) { auto tmp_array = _copy_to_double(in_array); return r2c(tmp_array, -1, axis, "", false, true); } template types::ndarray::value, std::complex>::type, types::array::value>> ifft(types::ndarray const &in_array, long n, long axis, types::none_type norm) { auto tmp_array = _copy_to_double(in_array); return r2c(tmp_array, n, axis, "", false, true); } template types::ndarray::value, std::complex>::type, types::array::value>> ifft(types::ndarray const &in_array, long n, long axis, types::str const &norm) { auto tmp_array = _copy_to_double(in_array); return r2c(tmp_array, n, axis, norm, false, true); } NUMPY_EXPR_TO_NDARRAY0_IMPL(ifft); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/fft/ihfft.hpp000066400000000000000000000076661416264035500230500ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FFT_IHFFT_HPP #define PYTHONIC_NUMPY_FFT_IHFFT_HPP #include "pythonic/include/numpy/fft/ihfft.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/include/utils/array_helper.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/numpy/fft/c2c.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace fft { template types::ndarray::value, std::complex>::type, types::array::value>> ihfft(types::ndarray const &in_array, types::none_type n, long axis, types::str const &norm) { return r2c(in_array, -1, axis, norm, false, false); } template types::ndarray::value, std::complex>::type, types::array::value>> ihfft(types::ndarray const &in_array, types::none_type n, long axis, types::none_type norm) { return r2c(in_array, -1, axis, "", false, false); } template types::ndarray::value, std::complex>::type, types::array::value>> ihfft(types::ndarray const &in_array, long n, long axis, types::none_type norm) { return r2c(in_array, n, axis, "", false, false); } template types::ndarray::value, std::complex>::type, types::array::value>> ihfft(types::ndarray const &in_array, long n, long axis, types::str const &norm) { return r2c(in_array, n, axis, norm, false, false); } template types::ndarray::value, std::complex>::type, types::array::value>> ihfft(types::ndarray const &in_array, types::none_type n, long axis, types::str const &norm) { auto tmp_array = _copy_to_double(in_array); return r2c(tmp_array, -1, axis, norm, false, false); } template types::ndarray::value, std::complex>::type, types::array::value>> ihfft(types::ndarray const &in_array, types::none_type n, long axis, types::none_type norm) { auto tmp_array = _copy_to_double(in_array); return r2c(tmp_array, -1, axis, "", false, false); } template types::ndarray::value, std::complex>::type, types::array::value>> ihfft(types::ndarray const &in_array, long n, long axis, types::none_type norm) { auto tmp_array = _copy_to_double(in_array); return r2c(tmp_array, n, axis, "", false, false); } template types::ndarray::value, std::complex>::type, types::array::value>> ihfft(types::ndarray const &in_array, long n, long axis, types::str const &norm) { auto tmp_array = _copy_to_double(in_array); return r2c(tmp_array, n, axis, norm, false, false); } NUMPY_EXPR_TO_NDARRAY0_IMPL(ihfft); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/fft/irfft.hpp000066400000000000000000000074531416264035500230540ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FFT_IRFFT_HPP #define PYTHONIC_NUMPY_FFT_IRFFT_HPP #include "pythonic/include/numpy/fft/irfft.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/include/utils/array_helper.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/numpy/fft/c2c.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace fft { template types::ndarray::value>> irfft(types::ndarray, pS> const &in_array, types::none_type n, long axis, types::str const &norm) { return c2r(in_array, -1, axis, norm, false); } template types::ndarray::value>> irfft(types::ndarray, pS> const &in_array, types::none_type n, long axis, types::none_type norm) { return c2r(in_array, -1, axis, "", false); } template types::ndarray::value>> irfft(types::ndarray, pS> const &in_array, long n, long axis, types::none_type norm) { return c2r(in_array, n, axis, "", false); } template types::ndarray::value>> irfft(types::ndarray, pS> const &in_array, long n, long axis, types::str const &norm) { return c2r(in_array, n, axis, norm, false); } template types::ndarray::value, typename std::conditional::value, double, T>::type>::type, types::array::value>> irfft(types::ndarray const &in_array, types::none_type n, long axis, types::str const &norm) { auto tmp_array = _copy_to_complex(in_array); return c2r(tmp_array, -1, axis, norm, false); } template types::ndarray::value, typename std::conditional::value, double, T>::type>::type, types::array::value>> irfft(types::ndarray const &in_array, types::none_type n, long axis, types::none_type norm) { auto tmp_array = _copy_to_complex(in_array); return c2r(tmp_array, -1, axis, "", false); } template types::ndarray::value, typename std::conditional::value, double, T>::type>::type, types::array::value>> irfft(types::ndarray const &in_array, long n, long axis, types::none_type norm) { auto tmp_array = _copy_to_complex(in_array); return c2r(tmp_array, n, axis, "", false); } template types::ndarray::value, typename std::conditional::value, double, T>::type>::type, types::array::value>> irfft(types::ndarray const &in_array, long n, long axis, types::str const &norm) { auto tmp_array = _copy_to_complex(in_array); return c2r(tmp_array, n, axis, norm, false); } NUMPY_EXPR_TO_NDARRAY0_IMPL(irfft); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/fft/pocketfft.hpp000066400000000000000000004115721416264035500237300ustar00rootroot00000000000000/* This file is part of pocketfft. Copyright (C) 2010-2019 Max-Planck-Society Copyright (C) 2019 Peter Bell For the odd-sized DCT-IV transforms: Copyright (C) 2003, 2007-14 Matteo Frigo Copyright (C) 2003, 2007-14 Massachusetts Institute of Technology Authors: Martin Reinecke, Peter Bell All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef PYTHONIC_INCLUDE_NUMPY_FFT_POCKETFFT_HPP #define PYTHONIC_INCLUDE_NUMPY_FFT_POCKETFFT_HPP #ifndef POCKETFFT_HDRONLY_H #define POCKETFFT_HDRONLY_H #ifndef __cplusplus #error This file is C++ and requires a C++ compiler. #endif #if !(__cplusplus >= 201103L || _MSVC_LANG + 0L >= 201103L) #error This file requires at least C++11 support. #endif #ifndef POCKETFFT_CACHE_SIZE #define POCKETFFT_CACHE_SIZE 16 #endif #include #include #include #include #include #include #include #if POCKETFFT_CACHE_SIZE != 0 #include #include #endif #ifndef POCKETFFT_NO_MULTITHREADING #include #include #include #include #include #include #ifdef POCKETFFT_PTHREADS #include #endif #endif #if defined(__GNUC__) #define POCKETFFT_NOINLINE __attribute__((noinline)) #define POCKETFFT_RESTRICT __restrict__ #elif defined(_MSC_VER) #define POCKETFFT_NOINLINE __declspec(noinline) #define POCKETFFT_RESTRICT __restrict #else #define POCKETFFT_NOINLINE #define POCKETFFT_RESTRICT #endif namespace pocketfft { namespace detail { using std::size_t; using std::ptrdiff_t; // Always use std:: for functions template T cos(T) = delete; template T sin(T) = delete; template T sqrt(T) = delete; using shape_t = std::vector; using stride_t = std::vector; constexpr bool FORWARD = true, BACKWARD = false; // only enable vector support for gcc>=5.0 and clang>=5.0 #ifndef POCKETFFT_NO_VECTORS #define POCKETFFT_NO_VECTORS #if defined(__INTEL_COMPILER) // do nothing. This is necessary because this compiler also sets __GNUC__. #elif defined(__clang__) // AppleClang has their own version numbering #ifdef __apple_build_version__ #if (__clang_major__ > 9) || (__clang_major__ == 9 && __clang_minor__ >= 1) #undef POCKETFFT_NO_VECTORS #endif #elif __clang_major__ >= 5 #undef POCKETFFT_NO_VECTORS #endif #elif defined(__GNUC__) #if __GNUC__ >= 5 #undef POCKETFFT_NO_VECTORS #endif #endif #endif template struct VLEN { static constexpr size_t val = 1; }; #ifndef POCKETFFT_NO_VECTORS #if (defined(__AVX512F__)) template <> struct VLEN { static constexpr size_t val = 16; }; template <> struct VLEN { static constexpr size_t val = 8; }; #elif(defined(__AVX__)) template <> struct VLEN { static constexpr size_t val = 8; }; template <> struct VLEN { static constexpr size_t val = 4; }; #elif(defined(__SSE2__)) template <> struct VLEN { static constexpr size_t val = 4; }; template <> struct VLEN { static constexpr size_t val = 2; }; #elif(defined(__VSX__)) template <> struct VLEN { static constexpr size_t val = 4; }; template <> struct VLEN { static constexpr size_t val = 2; }; #else #define POCKETFFT_NO_VECTORS #endif #endif template class arr { private: T *p; size_t sz; #if defined(POCKETFFT_NO_VECTORS) static T *ralloc(size_t num) { if (num == 0) return nullptr; void *res = malloc(num * sizeof(T)); if (!res) throw std::bad_alloc(); return reinterpret_cast(res); } static void dealloc(T *ptr) { free(ptr); } // C++17 in principle has "aligned_alloc", but unfortunately not everywhere ... #elif(__cplusplus >= 201703L) && \ ((!defined(__MINGW32__)) || defined(_GLIBCXX_HAVE_ALIGNED_ALLOC)) && \ (!defined(__APPLE__)) static T *ralloc(size_t num) { if (num == 0) return nullptr; void *res = aligned_alloc(64, num * sizeof(T)); if (!res) throw std::bad_alloc(); return reinterpret_cast(res); } static void dealloc(T *ptr) { free(ptr); } #else // portable emulation static T *ralloc(size_t num) { if (num == 0) return nullptr; void *ptr = malloc(num * sizeof(T) + 64); if (!ptr) throw std::bad_alloc(); T *res = reinterpret_cast( (reinterpret_cast(ptr) & ~(size_t(63))) + 64); (reinterpret_cast(res))[-1] = ptr; return res; } static void dealloc(T *ptr) { if (ptr) free((reinterpret_cast(ptr))[-1]); } #endif public: arr() : p(0), sz(0) { } arr(size_t n) : p(ralloc(n)), sz(n) { } arr(arr &&other) : p(other.p), sz(other.sz) { other.p = nullptr; other.sz = 0; } ~arr() { dealloc(p); } void resize(size_t n) { if (n == sz) return; dealloc(p); p = ralloc(n); sz = n; } T &operator[](size_t idx) { return p[idx]; } const T &operator[](size_t idx) const { return p[idx]; } T *data() { return p; } const T *data() const { return p; } size_t size() const { return sz; } }; template struct cmplx { T r, i; cmplx() { } cmplx(T r_, T i_) : r(r_), i(i_) { } void Set(T r_, T i_) { r = r_; i = i_; } void Set(T r_) { r = r_; i = T(0); } cmplx &operator+=(const cmplx &other) { r += other.r; i += other.i; return *this; } template cmplx &operator*=(T2 other) { r *= other; i *= other; return *this; } template cmplx &operator*=(const cmplx &other) { T tmp = r * other.r - i * other.i; i = r * other.i + i * other.r; r = tmp; return *this; } template cmplx &operator+=(const cmplx &other) { r += other.r; i += other.i; return *this; } template cmplx &operator-=(const cmplx &other) { r -= other.r; i -= other.i; return *this; } template auto operator*(const T2 &other) const -> cmplx { return {r * other, i * other}; } template auto operator+(const cmplx &other) const -> cmplx { return {r + other.r, i + other.i}; } template auto operator-(const cmplx &other) const -> cmplx { return {r - other.r, i - other.i}; } template auto operator*(const cmplx &other) const -> cmplx { return {r * other.r - i * other.i, r * other.i + i * other.r}; } template auto special_mul(const cmplx &other) const -> cmplx { using Tres = cmplx; return fwd ? Tres(r * other.r + i * other.i, i * other.r - r * other.i) : Tres(r * other.r - i * other.i, r * other.i + i * other.r); } }; template inline void PM(T &a, T &b, T c, T d) { a = c + d; b = c - d; } template inline void PMINPLACE(T &a, T &b) { T t = a; a += b; b = t - b; } template inline void MPINPLACE(T &a, T &b) { T t = a; a -= b; b = t + b; } template cmplx conj(const cmplx &a) { return {a.r, -a.i}; } template void special_mul(const cmplx &v1, const cmplx &v2, cmplx &res) { res = fwd ? cmplx(v1.r * v2.r + v1.i * v2.i, v1.i * v2.r - v1.r * v2.i) : cmplx(v1.r * v2.r - v1.i * v2.i, v1.r * v2.i + v1.i * v2.r); } template void ROT90(cmplx &a) { auto tmp_ = a.r; a.r = -a.i; a.i = tmp_; } template void ROTX90(cmplx &a) { auto tmp_ = fwd ? -a.r : a.r; a.r = fwd ? a.i : -a.i; a.i = tmp_; } // // twiddle factor section // template class sincos_2pibyn { private: using Thigh = typename std::conditional<(sizeof(T) > sizeof(double)), T, double>::type; size_t N, mask, shift; arr> v1, v2; static cmplx calc(size_t x, size_t n, Thigh ang) { x <<= 3; if (x < 4 * n) // first half { if (x < 2 * n) // first quadrant { if (x < n) return cmplx(std::cos(Thigh(x) * ang), std::sin(Thigh(x) * ang)); return cmplx(std::sin(Thigh(2 * n - x) * ang), std::cos(Thigh(2 * n - x) * ang)); } else // second quadrant { x -= 2 * n; if (x < n) return cmplx(-std::sin(Thigh(x) * ang), std::cos(Thigh(x) * ang)); return cmplx(-std::cos(Thigh(2 * n - x) * ang), std::sin(Thigh(2 * n - x) * ang)); } } else { x = 8 * n - x; if (x < 2 * n) // third quadrant { if (x < n) return cmplx(std::cos(Thigh(x) * ang), -std::sin(Thigh(x) * ang)); return cmplx(std::sin(Thigh(2 * n - x) * ang), -std::cos(Thigh(2 * n - x) * ang)); } else // fourth quadrant { x -= 2 * n; if (x < n) return cmplx(-std::sin(Thigh(x) * ang), -std::cos(Thigh(x) * ang)); return cmplx(-std::cos(Thigh(2 * n - x) * ang), -std::sin(Thigh(2 * n - x) * ang)); } } } public: POCKETFFT_NOINLINE sincos_2pibyn(size_t n) : N(n) { constexpr auto pi = 3.141592653589793238462643383279502884197L; Thigh ang = Thigh(0.25L * pi / n); size_t nval = (n + 2) / 2; shift = 1; while ((size_t(1) << shift) * (size_t(1) << shift) < nval) ++shift; mask = (size_t(1) << shift) - 1; v1.resize(mask + 1); v1[0].Set(Thigh(1), Thigh(0)); for (size_t i = 1; i < v1.size(); ++i) v1[i] = calc(i, n, ang); v2.resize((nval + mask) / (mask + 1)); v2[0].Set(Thigh(1), Thigh(0)); for (size_t i = 1; i < v2.size(); ++i) v2[i] = calc(i * (mask + 1), n, ang); } cmplx operator[](size_t idx) const { if (2 * idx <= N) { auto x1 = v1[idx & mask], x2 = v2[idx >> shift]; return cmplx(T(x1.r * x2.r - x1.i * x2.i), T(x1.r * x2.i + x1.i * x2.r)); } idx = N - idx; auto x1 = v1[idx & mask], x2 = v2[idx >> shift]; return cmplx(T(x1.r * x2.r - x1.i * x2.i), -T(x1.r * x2.i + x1.i * x2.r)); } }; struct util // hack to avoid duplicate symbols { static POCKETFFT_NOINLINE size_t largest_prime_factor(size_t n) { size_t res = 1; while ((n & 1) == 0) { res = 2; n >>= 1; } for (size_t x = 3; x * x <= n; x += 2) while ((n % x) == 0) { res = x; n /= x; } if (n > 1) res = n; return res; } static POCKETFFT_NOINLINE double cost_guess(size_t n) { constexpr double lfp = 1.1; // penalty for non-hardcoded larger factors size_t ni = n; double result = 0.; while ((n & 1) == 0) { result += 2; n >>= 1; } for (size_t x = 3; x * x <= n; x += 2) while ((n % x) == 0) { result += (x <= 5) ? double(x) : lfp * double(x); // penalize larger prime factors n /= x; } if (n > 1) result += (n <= 5) ? double(n) : lfp * double(n); return result * double(ni); } /* returns the smallest composite of 2, 3, 5, 7 and 11 which is >= n */ static POCKETFFT_NOINLINE size_t good_size_cmplx(size_t n) { if (n <= 12) return n; size_t bestfac = 2 * n; for (size_t f11 = 1; f11 < bestfac; f11 *= 11) for (size_t f117 = f11; f117 < bestfac; f117 *= 7) for (size_t f1175 = f117; f1175 < bestfac; f1175 *= 5) { size_t x = f1175; while (x < n) x *= 2; for (;;) { if (x < n) x *= 3; else if (x > n) { if (x < bestfac) bestfac = x; if (x & 1) break; x >>= 1; } else return n; } } return bestfac; } /* returns the smallest composite of 2, 3, 5 which is >= n */ static POCKETFFT_NOINLINE size_t good_size_real(size_t n) { if (n <= 6) return n; size_t bestfac = 2 * n; for (size_t f5 = 1; f5 < bestfac; f5 *= 5) { size_t x = f5; while (x < n) x *= 2; for (;;) { if (x < n) x *= 3; else if (x > n) { if (x < bestfac) bestfac = x; if (x & 1) break; x >>= 1; } else return n; } } return bestfac; } static size_t prod(const shape_t &shape) { size_t res = 1; for (auto sz : shape) res *= sz; return res; } static POCKETFFT_NOINLINE void sanity_check(const shape_t &shape, const stride_t &stride_in, const stride_t &stride_out, bool inplace) { auto ndim = shape.size(); if (ndim < 1) throw std::runtime_error("ndim must be >= 1"); if ((stride_in.size() != ndim) || (stride_out.size() != ndim)) throw std::runtime_error("stride dimension mismatch"); if (inplace && (stride_in != stride_out)) throw std::runtime_error("stride mismatch"); } static POCKETFFT_NOINLINE void sanity_check(const shape_t &shape, const stride_t &stride_in, const stride_t &stride_out, bool inplace, const shape_t &axes) { sanity_check(shape, stride_in, stride_out, inplace); auto ndim = shape.size(); shape_t tmp(ndim, 0); for (auto ax : axes) { if (ax >= ndim) throw std::invalid_argument("bad axis number"); if (++tmp[ax] > 1) throw std::invalid_argument("axis specified repeatedly"); } } static POCKETFFT_NOINLINE void sanity_check(const shape_t &shape, const stride_t &stride_in, const stride_t &stride_out, bool inplace, size_t axis) { sanity_check(shape, stride_in, stride_out, inplace); if (axis >= shape.size()) throw std::invalid_argument("bad axis number"); } #ifdef POCKETFFT_NO_MULTITHREADING static size_t thread_count(size_t /*nthreads*/, const shape_t & /*shape*/, size_t /*axis*/, size_t /*vlen*/) { return 1; } #else static size_t thread_count(size_t nthreads, const shape_t &shape, size_t axis, size_t vlen) { if (nthreads == 1) return 1; size_t size = prod(shape); size_t parallel = size / (shape[axis] * vlen); if (shape[axis] < 1000) parallel /= 4; size_t max_threads = nthreads == 0 ? std::thread::hardware_concurrency() : nthreads; return std::max(size_t(1), std::min(parallel, max_threads)); } #endif }; namespace threading { #ifdef POCKETFFT_NO_MULTITHREADING constexpr inline size_t thread_id() { return 0; } constexpr inline size_t num_threads() { return 1; } template void thread_map(size_t /* nthreads */, Func f) { f(); } #else inline size_t &thread_id() { static thread_local size_t thread_id_ = 0; return thread_id_; } inline size_t &num_threads() { static thread_local size_t num_threads_ = 1; return num_threads_; } static const size_t max_threads = std::max(1u, std::thread::hardware_concurrency()); class latch { std::atomic num_left_; std::mutex mut_; std::condition_variable completed_; using lock_t = std::unique_lock; public: latch(size_t n) : num_left_(n) { } void count_down() { lock_t lock(mut_); if (--num_left_) return; completed_.notify_all(); } void wait() { lock_t lock(mut_); completed_.wait(lock, [this] { return is_ready(); }); } bool is_ready() { return num_left_ == 0; } }; template class concurrent_queue { std::queue q_; std::mutex mut_; std::condition_variable item_added_; bool shutdown_; using lock_t = std::unique_lock; public: concurrent_queue() : shutdown_(false) { } void push(T val) { { lock_t lock(mut_); if (shutdown_) throw std::runtime_error("Item added to queue after shutdown"); q_.push(move(val)); } item_added_.notify_one(); } bool pop(T &val) { lock_t lock(mut_); item_added_.wait(lock, [this] { return (!q_.empty() || shutdown_); }); if (q_.empty()) return false; // We are shutting down val = std::move(q_.front()); q_.pop(); return true; } void shutdown() { { lock_t lock(mut_); shutdown_ = true; } item_added_.notify_all(); } void restart() { shutdown_ = false; } }; class thread_pool { concurrent_queue> work_queue_; std::vector threads_; void worker_main() { std::function work; while (work_queue_.pop(work)) work(); } void create_threads() { size_t nthreads = threads_.size(); for (size_t i = 0; i < nthreads; ++i) { try { threads_[i] = std::thread([this] { worker_main(); }); } catch (...) { shutdown(); throw; } } } public: explicit thread_pool(size_t nthreads) : threads_(nthreads) { create_threads(); } thread_pool() : thread_pool(max_threads) { } ~thread_pool() { shutdown(); } void submit(std::function work) { work_queue_.push(move(work)); } void shutdown() { work_queue_.shutdown(); for (auto &thread : threads_) if (thread.joinable()) thread.join(); } void restart() { work_queue_.restart(); create_threads(); } }; inline thread_pool &get_pool() { static thread_pool pool; #ifdef POCKETFFT_PTHREADS static std::once_flag f; std::call_once(f, [] { pthread_atfork(+[] { get_pool().shutdown(); }, // prepare +[] { get_pool().restart(); }, // parent +[] { get_pool().restart(); } // child ); }); #endif return pool; } /** Map a function f over nthreads */ template void thread_map(size_t nthreads, Func f) { if (nthreads == 0) nthreads = max_threads; if (nthreads == 1) { f(); return; } auto &pool = get_pool(); latch counter(nthreads); std::exception_ptr ex; std::mutex ex_mut; for (size_t i = 0; i < nthreads; ++i) { pool.submit([&f, &counter, &ex, &ex_mut, i, nthreads] { thread_id() = i; num_threads() = nthreads; try { f(); } catch (...) { std::lock_guard lock(ex_mut); ex = std::current_exception(); } counter.count_down(); }); } counter.wait(); if (ex) std::rethrow_exception(ex); } #endif } // // complex FFTPACK transforms // template class cfftp { private: struct fctdata { size_t fct; cmplx *tw, *tws; }; size_t length; arr> mem; std::vector fact; void add_factor(size_t factor) { fact.push_back({factor, nullptr, nullptr}); } template void pass2(size_t ido, size_t l1, const T *POCKETFFT_RESTRICT cc, T *POCKETFFT_RESTRICT ch, const cmplx *POCKETFFT_RESTRICT wa) const { auto CH = [ch, ido, l1](size_t a, size_t b, size_t c) -> T &{ return ch[a + ido * (b + l1 * c)]; }; auto CC = [cc, ido](size_t a, size_t b, size_t c) -> const T &{ return cc[a + ido * (b + 2 * c)]; }; auto WA = [wa, ido](size_t x, size_t i) { return wa[i - 1 + x * (ido - 1)]; }; if (ido == 1) for (size_t k = 0; k < l1; ++k) { CH(0, k, 0) = CC(0, 0, k) + CC(0, 1, k); CH(0, k, 1) = CC(0, 0, k) - CC(0, 1, k); } else for (size_t k = 0; k < l1; ++k) { CH(0, k, 0) = CC(0, 0, k) + CC(0, 1, k); CH(0, k, 1) = CC(0, 0, k) - CC(0, 1, k); for (size_t i = 1; i < ido; ++i) { CH(i, k, 0) = CC(i, 0, k) + CC(i, 1, k); special_mul(CC(i, 0, k) - CC(i, 1, k), WA(0, i), CH(i, k, 1)); } } } #define POCKETFFT_PREP3(idx) \ T t0 = CC(idx, 0, k), t1, t2; \ PM(t1, t2, CC(idx, 1, k), CC(idx, 2, k)); \ CH(idx, k, 0) = t0 + t1; #define POCKETFFT_PARTSTEP3a(u1, u2, twr, twi) \ { \ T ca = t0 + t1 * twr; \ T cb{-t2.i * twi, t2.r * twi}; \ PM(CH(0, k, u1), CH(0, k, u2), ca, cb); \ } #define POCKETFFT_PARTSTEP3b(u1, u2, twr, twi) \ { \ T ca = t0 + t1 * twr; \ T cb{-t2.i * twi, t2.r * twi}; \ special_mul(ca + cb, WA(u1 - 1, i), CH(i, k, u1)); \ special_mul(ca - cb, WA(u2 - 1, i), CH(i, k, u2)); \ } template void pass3(size_t ido, size_t l1, const T *POCKETFFT_RESTRICT cc, T *POCKETFFT_RESTRICT ch, const cmplx *POCKETFFT_RESTRICT wa) const { constexpr T0 tw1r = -0.5, tw1i = (fwd ? -1 : 1) * T0(0.8660254037844386467637231707529362L); auto CH = [ch, ido, l1](size_t a, size_t b, size_t c) -> T &{ return ch[a + ido * (b + l1 * c)]; }; auto CC = [cc, ido](size_t a, size_t b, size_t c) -> const T &{ return cc[a + ido * (b + 3 * c)]; }; auto WA = [wa, ido](size_t x, size_t i) { return wa[i - 1 + x * (ido - 1)]; }; if (ido == 1) for (size_t k = 0; k < l1; ++k) { POCKETFFT_PREP3(0) POCKETFFT_PARTSTEP3a(1, 2, tw1r, tw1i) } else for (size_t k = 0; k < l1; ++k) { { POCKETFFT_PREP3(0) POCKETFFT_PARTSTEP3a(1, 2, tw1r, tw1i) } for (size_t i = 1; i < ido; ++i) { POCKETFFT_PREP3(i) POCKETFFT_PARTSTEP3b(1, 2, tw1r, tw1i) } } } #undef POCKETFFT_PARTSTEP3b #undef POCKETFFT_PARTSTEP3a #undef POCKETFFT_PREP3 template void pass4(size_t ido, size_t l1, const T *POCKETFFT_RESTRICT cc, T *POCKETFFT_RESTRICT ch, const cmplx *POCKETFFT_RESTRICT wa) const { auto CH = [ch, ido, l1](size_t a, size_t b, size_t c) -> T &{ return ch[a + ido * (b + l1 * c)]; }; auto CC = [cc, ido](size_t a, size_t b, size_t c) -> const T &{ return cc[a + ido * (b + 4 * c)]; }; auto WA = [wa, ido](size_t x, size_t i) { return wa[i - 1 + x * (ido - 1)]; }; if (ido == 1) for (size_t k = 0; k < l1; ++k) { T t1, t2, t3, t4; PM(t2, t1, CC(0, 0, k), CC(0, 2, k)); PM(t3, t4, CC(0, 1, k), CC(0, 3, k)); ROTX90(t4); PM(CH(0, k, 0), CH(0, k, 2), t2, t3); PM(CH(0, k, 1), CH(0, k, 3), t1, t4); } else for (size_t k = 0; k < l1; ++k) { { T t1, t2, t3, t4; PM(t2, t1, CC(0, 0, k), CC(0, 2, k)); PM(t3, t4, CC(0, 1, k), CC(0, 3, k)); ROTX90(t4); PM(CH(0, k, 0), CH(0, k, 2), t2, t3); PM(CH(0, k, 1), CH(0, k, 3), t1, t4); } for (size_t i = 1; i < ido; ++i) { T t1, t2, t3, t4; T cc0 = CC(i, 0, k), cc1 = CC(i, 1, k), cc2 = CC(i, 2, k), cc3 = CC(i, 3, k); PM(t2, t1, cc0, cc2); PM(t3, t4, cc1, cc3); ROTX90(t4); CH(i, k, 0) = t2 + t3; special_mul(t1 + t4, WA(0, i), CH(i, k, 1)); special_mul(t2 - t3, WA(1, i), CH(i, k, 2)); special_mul(t1 - t4, WA(2, i), CH(i, k, 3)); } } } #define POCKETFFT_PREP5(idx) \ T t0 = CC(idx, 0, k), t1, t2, t3, t4; \ PM(t1, t4, CC(idx, 1, k), CC(idx, 4, k)); \ PM(t2, t3, CC(idx, 2, k), CC(idx, 3, k)); \ CH(idx, k, 0).r = t0.r + t1.r + t2.r; \ CH(idx, k, 0).i = t0.i + t1.i + t2.i; #define POCKETFFT_PARTSTEP5a(u1, u2, twar, twbr, twai, twbi) \ { \ T ca, cb; \ ca.r = t0.r + twar * t1.r + twbr * t2.r; \ ca.i = t0.i + twar * t1.i + twbr * t2.i; \ cb.i = twai * t4.r twbi * t3.r; \ cb.r = -(twai * t4.i twbi * t3.i); \ PM(CH(0, k, u1), CH(0, k, u2), ca, cb); \ } #define POCKETFFT_PARTSTEP5b(u1, u2, twar, twbr, twai, twbi) \ { \ T ca, cb, da, db; \ ca.r = t0.r + twar * t1.r + twbr * t2.r; \ ca.i = t0.i + twar * t1.i + twbr * t2.i; \ cb.i = twai * t4.r twbi * t3.r; \ cb.r = -(twai * t4.i twbi * t3.i); \ special_mul(ca + cb, WA(u1 - 1, i), CH(i, k, u1)); \ special_mul(ca - cb, WA(u2 - 1, i), CH(i, k, u2)); \ } template void pass5(size_t ido, size_t l1, const T *POCKETFFT_RESTRICT cc, T *POCKETFFT_RESTRICT ch, const cmplx *POCKETFFT_RESTRICT wa) const { constexpr T0 tw1r = T0(0.3090169943749474241022934171828191L), tw1i = (fwd ? -1 : 1) * T0(0.9510565162951535721164393333793821L), tw2r = T0(-0.8090169943749474241022934171828191L), tw2i = (fwd ? -1 : 1) * T0(0.5877852522924731291687059546390728L); auto CH = [ch, ido, l1](size_t a, size_t b, size_t c) -> T &{ return ch[a + ido * (b + l1 * c)]; }; auto CC = [cc, ido](size_t a, size_t b, size_t c) -> const T &{ return cc[a + ido * (b + 5 * c)]; }; auto WA = [wa, ido](size_t x, size_t i) { return wa[i - 1 + x * (ido - 1)]; }; if (ido == 1) for (size_t k = 0; k < l1; ++k) { POCKETFFT_PREP5(0) POCKETFFT_PARTSTEP5a(1, 4, tw1r, tw2r, +tw1i, +tw2i) POCKETFFT_PARTSTEP5a(2, 3, tw2r, tw1r, +tw2i, -tw1i) } else for (size_t k = 0; k < l1; ++k) { { POCKETFFT_PREP5(0) POCKETFFT_PARTSTEP5a(1, 4, tw1r, tw2r, +tw1i, +tw2i) POCKETFFT_PARTSTEP5a(2, 3, tw2r, tw1r, +tw2i, -tw1i) } for (size_t i = 1; i < ido; ++i) { POCKETFFT_PREP5(i) POCKETFFT_PARTSTEP5b(1, 4, tw1r, tw2r, +tw1i, +tw2i) POCKETFFT_PARTSTEP5b(2, 3, tw2r, tw1r, +tw2i, -tw1i) } } } #undef POCKETFFT_PARTSTEP5b #undef POCKETFFT_PARTSTEP5a #undef POCKETFFT_PREP5 #define POCKETFFT_PREP7(idx) \ T t1 = CC(idx, 0, k), t2, t3, t4, t5, t6, t7; \ PM(t2, t7, CC(idx, 1, k), CC(idx, 6, k)); \ PM(t3, t6, CC(idx, 2, k), CC(idx, 5, k)); \ PM(t4, t5, CC(idx, 3, k), CC(idx, 4, k)); \ CH(idx, k, 0).r = t1.r + t2.r + t3.r + t4.r; \ CH(idx, k, 0).i = t1.i + t2.i + t3.i + t4.i; #define POCKETFFT_PARTSTEP7a0(u1, u2, x1, x2, x3, y1, y2, y3, out1, out2) \ { \ T ca, cb; \ ca.r = t1.r + x1 * t2.r + x2 * t3.r + x3 * t4.r; \ ca.i = t1.i + x1 * t2.i + x2 * t3.i + x3 * t4.i; \ cb.i = y1 * t7.r y2 * t6.r y3 * t5.r; \ cb.r = -(y1 * t7.i y2 * t6.i y3 * t5.i); \ PM(out1, out2, ca, cb); \ } #define POCKETFFT_PARTSTEP7a(u1, u2, x1, x2, x3, y1, y2, y3) \ POCKETFFT_PARTSTEP7a0(u1, u2, x1, x2, x3, y1, y2, y3, CH(0, k, u1), \ CH(0, k, u2)) #define POCKETFFT_PARTSTEP7(u1, u2, x1, x2, x3, y1, y2, y3) \ { \ T da, db; \ POCKETFFT_PARTSTEP7a0(u1, u2, x1, x2, x3, y1, y2, y3, da, db) \ special_mul(da, WA(u1 - 1, i), CH(i, k, u1)); \ special_mul(db, WA(u2 - 1, i), CH(i, k, u2)); \ } template void pass7(size_t ido, size_t l1, const T *POCKETFFT_RESTRICT cc, T *POCKETFFT_RESTRICT ch, const cmplx *POCKETFFT_RESTRICT wa) const { constexpr T0 tw1r = T0(0.6234898018587335305250048840042398L), tw1i = (fwd ? -1 : 1) * T0(0.7818314824680298087084445266740578L), tw2r = T0(-0.2225209339563144042889025644967948L), tw2i = (fwd ? -1 : 1) * T0(0.9749279121818236070181316829939312L), tw3r = T0(-0.9009688679024191262361023195074451L), tw3i = (fwd ? -1 : 1) * T0(0.433883739117558120475768332848359L); auto CH = [ch, ido, l1](size_t a, size_t b, size_t c) -> T &{ return ch[a + ido * (b + l1 * c)]; }; auto CC = [cc, ido](size_t a, size_t b, size_t c) -> const T &{ return cc[a + ido * (b + 7 * c)]; }; auto WA = [wa, ido](size_t x, size_t i) { return wa[i - 1 + x * (ido - 1)]; }; if (ido == 1) for (size_t k = 0; k < l1; ++k) { POCKETFFT_PREP7(0) POCKETFFT_PARTSTEP7a(1, 6, tw1r, tw2r, tw3r, +tw1i, +tw2i, +tw3i) POCKETFFT_PARTSTEP7a(2, 5, tw2r, tw3r, tw1r, +tw2i, -tw3i, -tw1i) POCKETFFT_PARTSTEP7a(3, 4, tw3r, tw1r, tw2r, +tw3i, -tw1i, +tw2i) } else for (size_t k = 0; k < l1; ++k) { { POCKETFFT_PREP7(0) POCKETFFT_PARTSTEP7a(1, 6, tw1r, tw2r, tw3r, +tw1i, +tw2i, +tw3i) POCKETFFT_PARTSTEP7a(2, 5, tw2r, tw3r, tw1r, +tw2i, -tw3i, -tw1i) POCKETFFT_PARTSTEP7a(3, 4, tw3r, tw1r, tw2r, +tw3i, -tw1i, +tw2i) } for (size_t i = 1; i < ido; ++i) { POCKETFFT_PREP7(i) POCKETFFT_PARTSTEP7(1, 6, tw1r, tw2r, tw3r, +tw1i, +tw2i, +tw3i) POCKETFFT_PARTSTEP7(2, 5, tw2r, tw3r, tw1r, +tw2i, -tw3i, -tw1i) POCKETFFT_PARTSTEP7(3, 4, tw3r, tw1r, tw2r, +tw3i, -tw1i, +tw2i) } } } #undef POCKETFFT_PARTSTEP7 #undef POCKETFFT_PARTSTEP7a0 #undef POCKETFFT_PARTSTEP7a #undef POCKETFFT_PREP7 template void ROTX45(T &a) const { constexpr T0 hsqt2 = T0(0.707106781186547524400844362104849L); if (fwd) { auto tmp_ = a.r; a.r = hsqt2 * (a.r + a.i); a.i = hsqt2 * (a.i - tmp_); } else { auto tmp_ = a.r; a.r = hsqt2 * (a.r - a.i); a.i = hsqt2 * (a.i + tmp_); } } template void ROTX135(T &a) const { constexpr T0 hsqt2 = T0(0.707106781186547524400844362104849L); if (fwd) { auto tmp_ = a.r; a.r = hsqt2 * (a.i - a.r); a.i = hsqt2 * (-tmp_ - a.i); } else { auto tmp_ = a.r; a.r = hsqt2 * (-a.r - a.i); a.i = hsqt2 * (tmp_ - a.i); } } template void pass8(size_t ido, size_t l1, const T *POCKETFFT_RESTRICT cc, T *POCKETFFT_RESTRICT ch, const cmplx *POCKETFFT_RESTRICT wa) const { auto CH = [ch, ido, l1](size_t a, size_t b, size_t c) -> T &{ return ch[a + ido * (b + l1 * c)]; }; auto CC = [cc, ido](size_t a, size_t b, size_t c) -> const T &{ return cc[a + ido * (b + 8 * c)]; }; auto WA = [wa, ido](size_t x, size_t i) { return wa[i - 1 + x * (ido - 1)]; }; if (ido == 1) for (size_t k = 0; k < l1; ++k) { T a0, a1, a2, a3, a4, a5, a6, a7; PM(a1, a5, CC(0, 1, k), CC(0, 5, k)); PM(a3, a7, CC(0, 3, k), CC(0, 7, k)); PMINPLACE(a1, a3); ROTX90(a3); ROTX90(a7); PMINPLACE(a5, a7); ROTX45(a5); ROTX135(a7); PM(a0, a4, CC(0, 0, k), CC(0, 4, k)); PM(a2, a6, CC(0, 2, k), CC(0, 6, k)); PM(CH(0, k, 0), CH(0, k, 4), a0 + a2, a1); PM(CH(0, k, 2), CH(0, k, 6), a0 - a2, a3); ROTX90(a6); PM(CH(0, k, 1), CH(0, k, 5), a4 + a6, a5); PM(CH(0, k, 3), CH(0, k, 7), a4 - a6, a7); } else for (size_t k = 0; k < l1; ++k) { { T a0, a1, a2, a3, a4, a5, a6, a7; PM(a1, a5, CC(0, 1, k), CC(0, 5, k)); PM(a3, a7, CC(0, 3, k), CC(0, 7, k)); PMINPLACE(a1, a3); ROTX90(a3); ROTX90(a7); PMINPLACE(a5, a7); ROTX45(a5); ROTX135(a7); PM(a0, a4, CC(0, 0, k), CC(0, 4, k)); PM(a2, a6, CC(0, 2, k), CC(0, 6, k)); PM(CH(0, k, 0), CH(0, k, 4), a0 + a2, a1); PM(CH(0, k, 2), CH(0, k, 6), a0 - a2, a3); ROTX90(a6); PM(CH(0, k, 1), CH(0, k, 5), a4 + a6, a5); PM(CH(0, k, 3), CH(0, k, 7), a4 - a6, a7); } for (size_t i = 1; i < ido; ++i) { T a0, a1, a2, a3, a4, a5, a6, a7; PM(a1, a5, CC(i, 1, k), CC(i, 5, k)); PM(a3, a7, CC(i, 3, k), CC(i, 7, k)); ROTX90(a7); PMINPLACE(a1, a3); ROTX90(a3); PMINPLACE(a5, a7); ROTX45(a5); ROTX135(a7); PM(a0, a4, CC(i, 0, k), CC(i, 4, k)); PM(a2, a6, CC(i, 2, k), CC(i, 6, k)); PMINPLACE(a0, a2); CH(i, k, 0) = a0 + a1; special_mul(a0 - a1, WA(3, i), CH(i, k, 4)); special_mul(a2 + a3, WA(1, i), CH(i, k, 2)); special_mul(a2 - a3, WA(5, i), CH(i, k, 6)); ROTX90(a6); PMINPLACE(a4, a6); special_mul(a4 + a5, WA(0, i), CH(i, k, 1)); special_mul(a4 - a5, WA(4, i), CH(i, k, 5)); special_mul(a6 + a7, WA(2, i), CH(i, k, 3)); special_mul(a6 - a7, WA(6, i), CH(i, k, 7)); } } } #define POCKETFFT_PREP11(idx) \ T t1 = CC(idx, 0, k), t2, t3, t4, t5, t6, t7, t8, t9, t10, t11; \ PM(t2, t11, CC(idx, 1, k), CC(idx, 10, k)); \ PM(t3, t10, CC(idx, 2, k), CC(idx, 9, k)); \ PM(t4, t9, CC(idx, 3, k), CC(idx, 8, k)); \ PM(t5, t8, CC(idx, 4, k), CC(idx, 7, k)); \ PM(t6, t7, CC(idx, 5, k), CC(idx, 6, k)); \ CH(idx, k, 0).r = t1.r + t2.r + t3.r + t4.r + t5.r + t6.r; \ CH(idx, k, 0).i = t1.i + t2.i + t3.i + t4.i + t5.i + t6.i; #define POCKETFFT_PARTSTEP11a0(u1, u2, x1, x2, x3, x4, x5, y1, y2, y3, y4, y5, \ out1, out2) \ { \ T ca = t1 + t2 * x1 + t3 * x2 + t4 * x3 + t5 * x4 + t6 * x5, cb; \ cb.i = y1 * t11.r y2 * t10.r y3 * t9.r y4 * t8.r y5 * t7.r; \ cb.r = -(y1 * t11.i y2 * t10.i y3 * t9.i y4 * t8.i y5 * t7.i); \ PM(out1, out2, ca, cb); \ } #define POCKETFFT_PARTSTEP11a(u1, u2, x1, x2, x3, x4, x5, y1, y2, y3, y4, y5) \ POCKETFFT_PARTSTEP11a0(u1, u2, x1, x2, x3, x4, x5, y1, y2, y3, y4, y5, \ CH(0, k, u1), CH(0, k, u2)) #define POCKETFFT_PARTSTEP11(u1, u2, x1, x2, x3, x4, x5, y1, y2, y3, y4, y5) \ { \ T da, db; \ POCKETFFT_PARTSTEP11a0(u1, u2, x1, x2, x3, x4, x5, y1, y2, y3, y4, y5, da, \ db) \ special_mul(da, WA(u1 - 1, i), CH(i, k, u1)); \ special_mul(db, WA(u2 - 1, i), CH(i, k, u2)); \ } template void pass11(size_t ido, size_t l1, const T *POCKETFFT_RESTRICT cc, T *POCKETFFT_RESTRICT ch, const cmplx *POCKETFFT_RESTRICT wa) const { constexpr T0 tw1r = T0(0.8412535328311811688618116489193677L), tw1i = (fwd ? -1 : 1) * T0(0.5406408174555975821076359543186917L), tw2r = T0(0.4154150130018864255292741492296232L), tw2i = (fwd ? -1 : 1) * T0(0.9096319953545183714117153830790285L), tw3r = T0(-0.1423148382732851404437926686163697L), tw3i = (fwd ? -1 : 1) * T0(0.9898214418809327323760920377767188L), tw4r = T0(-0.6548607339452850640569250724662936L), tw4i = (fwd ? -1 : 1) * T0(0.7557495743542582837740358439723444L), tw5r = T0(-0.9594929736144973898903680570663277L), tw5i = (fwd ? -1 : 1) * T0(0.2817325568414296977114179153466169L); auto CH = [ch, ido, l1](size_t a, size_t b, size_t c) -> T &{ return ch[a + ido * (b + l1 * c)]; }; auto CC = [cc, ido](size_t a, size_t b, size_t c) -> const T &{ return cc[a + ido * (b + 11 * c)]; }; auto WA = [wa, ido](size_t x, size_t i) { return wa[i - 1 + x * (ido - 1)]; }; if (ido == 1) for (size_t k = 0; k < l1; ++k) { POCKETFFT_PREP11(0) POCKETFFT_PARTSTEP11a(1, 10, tw1r, tw2r, tw3r, tw4r, tw5r, +tw1i, +tw2i, +tw3i, +tw4i, +tw5i) POCKETFFT_PARTSTEP11a(2, 9, tw2r, tw4r, tw5r, tw3r, tw1r, +tw2i, +tw4i, -tw5i, -tw3i, -tw1i) POCKETFFT_PARTSTEP11a(3, 8, tw3r, tw5r, tw2r, tw1r, tw4r, +tw3i, -tw5i, -tw2i, +tw1i, +tw4i) POCKETFFT_PARTSTEP11a(4, 7, tw4r, tw3r, tw1r, tw5r, tw2r, +tw4i, -tw3i, +tw1i, +tw5i, -tw2i) POCKETFFT_PARTSTEP11a(5, 6, tw5r, tw1r, tw4r, tw2r, tw3r, +tw5i, -tw1i, +tw4i, -tw2i, +tw3i) } else for (size_t k = 0; k < l1; ++k) { { POCKETFFT_PREP11(0) POCKETFFT_PARTSTEP11a(1, 10, tw1r, tw2r, tw3r, tw4r, tw5r, +tw1i, +tw2i, +tw3i, +tw4i, +tw5i) POCKETFFT_PARTSTEP11a(2, 9, tw2r, tw4r, tw5r, tw3r, tw1r, +tw2i, +tw4i, -tw5i, -tw3i, -tw1i) POCKETFFT_PARTSTEP11a(3, 8, tw3r, tw5r, tw2r, tw1r, tw4r, +tw3i, -tw5i, -tw2i, +tw1i, +tw4i) POCKETFFT_PARTSTEP11a(4, 7, tw4r, tw3r, tw1r, tw5r, tw2r, +tw4i, -tw3i, +tw1i, +tw5i, -tw2i) POCKETFFT_PARTSTEP11a(5, 6, tw5r, tw1r, tw4r, tw2r, tw3r, +tw5i, -tw1i, +tw4i, -tw2i, +tw3i) } for (size_t i = 1; i < ido; ++i) { POCKETFFT_PREP11(i) POCKETFFT_PARTSTEP11(1, 10, tw1r, tw2r, tw3r, tw4r, tw5r, +tw1i, +tw2i, +tw3i, +tw4i, +tw5i) POCKETFFT_PARTSTEP11(2, 9, tw2r, tw4r, tw5r, tw3r, tw1r, +tw2i, +tw4i, -tw5i, -tw3i, -tw1i) POCKETFFT_PARTSTEP11(3, 8, tw3r, tw5r, tw2r, tw1r, tw4r, +tw3i, -tw5i, -tw2i, +tw1i, +tw4i) POCKETFFT_PARTSTEP11(4, 7, tw4r, tw3r, tw1r, tw5r, tw2r, +tw4i, -tw3i, +tw1i, +tw5i, -tw2i) POCKETFFT_PARTSTEP11(5, 6, tw5r, tw1r, tw4r, tw2r, tw3r, +tw5i, -tw1i, +tw4i, -tw2i, +tw3i) } } } #undef PARTSTEP11 #undef PARTSTEP11a0 #undef PARTSTEP11a #undef POCKETFFT_PREP11 template void passg(size_t ido, size_t ip, size_t l1, T *POCKETFFT_RESTRICT cc, T *POCKETFFT_RESTRICT ch, const cmplx *POCKETFFT_RESTRICT wa, const cmplx *POCKETFFT_RESTRICT csarr) const { const size_t cdim = ip; size_t ipph = (ip + 1) / 2; size_t idl1 = ido * l1; auto CH = [ch, ido, l1](size_t a, size_t b, size_t c) -> T &{ return ch[a + ido * (b + l1 * c)]; }; auto CC = [cc, ido, cdim](size_t a, size_t b, size_t c) -> const T &{ return cc[a + ido * (b + cdim * c)]; }; auto CX = [cc, ido, l1](size_t a, size_t b, size_t c) -> T &{ return cc[a + ido * (b + l1 * c)]; }; auto CX2 = [cc, idl1](size_t a, size_t b) -> T &{ return cc[a + idl1 * b]; }; auto CH2 = [ch, idl1](size_t a, size_t b) -> const T &{ return ch[a + idl1 * b]; }; arr> wal(ip); wal[0] = cmplx(1., 0.); for (size_t i = 1; i < ip; ++i) wal[i] = cmplx(csarr[i].r, fwd ? -csarr[i].i : csarr[i].i); for (size_t k = 0; k < l1; ++k) for (size_t i = 0; i < ido; ++i) CH(i, k, 0) = CC(i, 0, k); for (size_t j = 1, jc = ip - 1; j < ipph; ++j, --jc) for (size_t k = 0; k < l1; ++k) for (size_t i = 0; i < ido; ++i) PM(CH(i, k, j), CH(i, k, jc), CC(i, j, k), CC(i, jc, k)); for (size_t k = 0; k < l1; ++k) for (size_t i = 0; i < ido; ++i) { T tmp = CH(i, k, 0); for (size_t j = 1; j < ipph; ++j) tmp += CH(i, k, j); CX(i, k, 0) = tmp; } for (size_t l = 1, lc = ip - 1; l < ipph; ++l, --lc) { // j=0 for (size_t ik = 0; ik < idl1; ++ik) { CX2(ik, l).r = CH2(ik, 0).r + wal[l].r * CH2(ik, 1).r + wal[2 * l].r * CH2(ik, 2).r; CX2(ik, l).i = CH2(ik, 0).i + wal[l].r * CH2(ik, 1).i + wal[2 * l].r * CH2(ik, 2).i; CX2(ik, lc).r = -wal[l].i * CH2(ik, ip - 1).i - wal[2 * l].i * CH2(ik, ip - 2).i; CX2(ik, lc).i = wal[l].i * CH2(ik, ip - 1).r + wal[2 * l].i * CH2(ik, ip - 2).r; } size_t iwal = 2 * l; size_t j = 3, jc = ip - 3; for (; j < ipph - 1; j += 2, jc -= 2) { iwal += l; if (iwal > ip) iwal -= ip; cmplx xwal = wal[iwal]; iwal += l; if (iwal > ip) iwal -= ip; cmplx xwal2 = wal[iwal]; for (size_t ik = 0; ik < idl1; ++ik) { CX2(ik, l).r += CH2(ik, j).r * xwal.r + CH2(ik, j + 1).r * xwal2.r; CX2(ik, l).i += CH2(ik, j).i * xwal.r + CH2(ik, j + 1).i * xwal2.r; CX2(ik, lc).r -= CH2(ik, jc).i * xwal.i + CH2(ik, jc - 1).i * xwal2.i; CX2(ik, lc).i += CH2(ik, jc).r * xwal.i + CH2(ik, jc - 1).r * xwal2.i; } } for (; j < ipph; ++j, --jc) { iwal += l; if (iwal > ip) iwal -= ip; cmplx xwal = wal[iwal]; for (size_t ik = 0; ik < idl1; ++ik) { CX2(ik, l).r += CH2(ik, j).r * xwal.r; CX2(ik, l).i += CH2(ik, j).i * xwal.r; CX2(ik, lc).r -= CH2(ik, jc).i * xwal.i; CX2(ik, lc).i += CH2(ik, jc).r * xwal.i; } } } // shuffling and twiddling if (ido == 1) for (size_t j = 1, jc = ip - 1; j < ipph; ++j, --jc) for (size_t ik = 0; ik < idl1; ++ik) { T t1 = CX2(ik, j), t2 = CX2(ik, jc); PM(CX2(ik, j), CX2(ik, jc), t1, t2); } else { for (size_t j = 1, jc = ip - 1; j < ipph; ++j, --jc) for (size_t k = 0; k < l1; ++k) { T t1 = CX(0, k, j), t2 = CX(0, k, jc); PM(CX(0, k, j), CX(0, k, jc), t1, t2); for (size_t i = 1; i < ido; ++i) { T x1, x2; PM(x1, x2, CX(i, k, j), CX(i, k, jc)); size_t idij = (j - 1) * (ido - 1) + i - 1; special_mul(x1, wa[idij], CX(i, k, j)); idij = (jc - 1) * (ido - 1) + i - 1; special_mul(x2, wa[idij], CX(i, k, jc)); } } } } template void pass_all(T c[], T0 fct) const { if (length == 1) { c[0] *= fct; return; } size_t l1 = 1; arr ch(length); T *p1 = c, *p2 = ch.data(); for (size_t k1 = 0; k1 < fact.size(); k1++) { size_t ip = fact[k1].fct; size_t l2 = ip * l1; size_t ido = length / l2; if (ip == 4) pass4(ido, l1, p1, p2, fact[k1].tw); else if (ip == 8) pass8(ido, l1, p1, p2, fact[k1].tw); else if (ip == 2) pass2(ido, l1, p1, p2, fact[k1].tw); else if (ip == 3) pass3(ido, l1, p1, p2, fact[k1].tw); else if (ip == 5) pass5(ido, l1, p1, p2, fact[k1].tw); else if (ip == 7) pass7(ido, l1, p1, p2, fact[k1].tw); else if (ip == 11) pass11(ido, l1, p1, p2, fact[k1].tw); else { passg(ido, ip, l1, p1, p2, fact[k1].tw, fact[k1].tws); std::swap(p1, p2); } std::swap(p1, p2); l1 = l2; } if (p1 != c) { if (fct != 1.) for (size_t i = 0; i < length; ++i) c[i] = ch[i] * fct; else memcpy(c, p1, length * sizeof(T)); } else if (fct != 1.) for (size_t i = 0; i < length; ++i) c[i] *= fct; } public: template void exec(T c[], T0 fct, bool fwd) const { fwd ? pass_all(c, fct) : pass_all(c, fct); } private: POCKETFFT_NOINLINE void factorize() { size_t len = length; while ((len & 7) == 0) { add_factor(8); len >>= 3; } while ((len & 3) == 0) { add_factor(4); len >>= 2; } if ((len & 1) == 0) { len >>= 1; // factor 2 should be at the front of the factor list add_factor(2); std::swap(fact[0].fct, fact.back().fct); } for (size_t divisor = 3; divisor * divisor <= len; divisor += 2) while ((len % divisor) == 0) { add_factor(divisor); len /= divisor; } if (len > 1) add_factor(len); } size_t twsize() const { size_t twsize = 0, l1 = 1; for (size_t k = 0; k < fact.size(); ++k) { size_t ip = fact[k].fct, ido = length / (l1 * ip); twsize += (ip - 1) * (ido - 1); if (ip > 11) twsize += ip; l1 *= ip; } return twsize; } void comp_twiddle() { sincos_2pibyn twiddle(length); size_t l1 = 1; size_t memofs = 0; for (size_t k = 0; k < fact.size(); ++k) { size_t ip = fact[k].fct, ido = length / (l1 * ip); fact[k].tw = mem.data() + memofs; memofs += (ip - 1) * (ido - 1); for (size_t j = 1; j < ip; ++j) for (size_t i = 1; i < ido; ++i) fact[k].tw[(j - 1) * (ido - 1) + i - 1] = twiddle[j * l1 * i]; if (ip > 11) { fact[k].tws = mem.data() + memofs; memofs += ip; for (size_t j = 0; j < ip; ++j) fact[k].tws[j] = twiddle[j * l1 * ido]; } l1 *= ip; } } public: POCKETFFT_NOINLINE cfftp(size_t length_) : length(length_) { if (length == 0) throw std::runtime_error("zero-length FFT requested"); if (length == 1) return; factorize(); mem.resize(twsize()); comp_twiddle(); } }; // // real-valued FFTPACK transforms // template class rfftp { private: struct fctdata { size_t fct; T0 *tw, *tws; }; size_t length; arr mem; std::vector fact; void add_factor(size_t factor) { fact.push_back({factor, nullptr, nullptr}); } /* (a+ib) = conj(c+id) * (e+if) */ template inline void MULPM(T1 &a, T1 &b, T2 c, T2 d, T3 e, T3 f) const { a = c * e + d * f; b = c * f - d * e; } template void radf2(size_t ido, size_t l1, const T *POCKETFFT_RESTRICT cc, T *POCKETFFT_RESTRICT ch, const T0 *POCKETFFT_RESTRICT wa) const { auto WA = [wa, ido](size_t x, size_t i) { return wa[i + x * (ido - 1)]; }; auto CC = [cc, ido, l1](size_t a, size_t b, size_t c) -> const T &{ return cc[a + ido * (b + l1 * c)]; }; auto CH = [ch, ido](size_t a, size_t b, size_t c) -> T &{ return ch[a + ido * (b + 2 * c)]; }; for (size_t k = 0; k < l1; k++) PM(CH(0, 0, k), CH(ido - 1, 1, k), CC(0, k, 0), CC(0, k, 1)); if ((ido & 1) == 0) for (size_t k = 0; k < l1; k++) { CH(0, 1, k) = -CC(ido - 1, k, 1); CH(ido - 1, 0, k) = CC(ido - 1, k, 0); } if (ido <= 2) return; for (size_t k = 0; k < l1; k++) for (size_t i = 2; i < ido; i += 2) { size_t ic = ido - i; T tr2, ti2; MULPM(tr2, ti2, WA(0, i - 2), WA(0, i - 1), CC(i - 1, k, 1), CC(i, k, 1)); PM(CH(i - 1, 0, k), CH(ic - 1, 1, k), CC(i - 1, k, 0), tr2); PM(CH(i, 0, k), CH(ic, 1, k), ti2, CC(i, k, 0)); } } // a2=a+b; b2=i*(b-a); #define POCKETFFT_REARRANGE(rx, ix, ry, iy) \ { \ auto t1 = rx + ry, t2 = ry - rx, t3 = ix + iy, t4 = ix - iy; \ rx = t1; \ ix = t3; \ ry = t4; \ iy = t2; \ } template void radf3(size_t ido, size_t l1, const T *POCKETFFT_RESTRICT cc, T *POCKETFFT_RESTRICT ch, const T0 *POCKETFFT_RESTRICT wa) const { constexpr T0 taur = -0.5, taui = T0(0.8660254037844386467637231707529362L); auto WA = [wa, ido](size_t x, size_t i) { return wa[i + x * (ido - 1)]; }; auto CC = [cc, ido, l1](size_t a, size_t b, size_t c) -> const T &{ return cc[a + ido * (b + l1 * c)]; }; auto CH = [ch, ido](size_t a, size_t b, size_t c) -> T &{ return ch[a + ido * (b + 3 * c)]; }; for (size_t k = 0; k < l1; k++) { T cr2 = CC(0, k, 1) + CC(0, k, 2); CH(0, 0, k) = CC(0, k, 0) + cr2; CH(0, 2, k) = taui * (CC(0, k, 2) - CC(0, k, 1)); CH(ido - 1, 1, k) = CC(0, k, 0) + taur * cr2; } if (ido == 1) return; for (size_t k = 0; k < l1; k++) for (size_t i = 2; i < ido; i += 2) { size_t ic = ido - i; T di2, di3, dr2, dr3; MULPM(dr2, di2, WA(0, i - 2), WA(0, i - 1), CC(i - 1, k, 1), CC(i, k, 1)); // d2=conj(WA0)*CC1 MULPM(dr3, di3, WA(1, i - 2), WA(1, i - 1), CC(i - 1, k, 2), CC(i, k, 2)); // d3=conj(WA1)*CC2 POCKETFFT_REARRANGE(dr2, di2, dr3, di3); CH(i - 1, 0, k) = CC(i - 1, k, 0) + dr2; // c add CH(i, 0, k) = CC(i, k, 0) + di2; T tr2 = CC(i - 1, k, 0) + taur * dr2; // c add T ti2 = CC(i, k, 0) + taur * di2; T tr3 = taui * dr3; // t3 = taui*i*(d3-d2)? T ti3 = taui * di3; PM(CH(i - 1, 2, k), CH(ic - 1, 1, k), tr2, tr3); // PM(i) = t2+t3 PM(CH(i, 2, k), CH(ic, 1, k), ti3, ti2); // PM(ic) = conj(t2-t3) } } template void radf4(size_t ido, size_t l1, const T *POCKETFFT_RESTRICT cc, T *POCKETFFT_RESTRICT ch, const T0 *POCKETFFT_RESTRICT wa) const { constexpr T0 hsqt2 = T0(0.707106781186547524400844362104849L); auto WA = [wa, ido](size_t x, size_t i) { return wa[i + x * (ido - 1)]; }; auto CC = [cc, ido, l1](size_t a, size_t b, size_t c) -> const T &{ return cc[a + ido * (b + l1 * c)]; }; auto CH = [ch, ido](size_t a, size_t b, size_t c) -> T &{ return ch[a + ido * (b + 4 * c)]; }; for (size_t k = 0; k < l1; k++) { T tr1, tr2; PM(tr1, CH(0, 2, k), CC(0, k, 3), CC(0, k, 1)); PM(tr2, CH(ido - 1, 1, k), CC(0, k, 0), CC(0, k, 2)); PM(CH(0, 0, k), CH(ido - 1, 3, k), tr2, tr1); } if ((ido & 1) == 0) for (size_t k = 0; k < l1; k++) { T ti1 = -hsqt2 * (CC(ido - 1, k, 1) + CC(ido - 1, k, 3)); T tr1 = hsqt2 * (CC(ido - 1, k, 1) - CC(ido - 1, k, 3)); PM(CH(ido - 1, 0, k), CH(ido - 1, 2, k), CC(ido - 1, k, 0), tr1); PM(CH(0, 3, k), CH(0, 1, k), ti1, CC(ido - 1, k, 2)); } if (ido <= 2) return; for (size_t k = 0; k < l1; k++) for (size_t i = 2; i < ido; i += 2) { size_t ic = ido - i; T ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4; MULPM(cr2, ci2, WA(0, i - 2), WA(0, i - 1), CC(i - 1, k, 1), CC(i, k, 1)); MULPM(cr3, ci3, WA(1, i - 2), WA(1, i - 1), CC(i - 1, k, 2), CC(i, k, 2)); MULPM(cr4, ci4, WA(2, i - 2), WA(2, i - 1), CC(i - 1, k, 3), CC(i, k, 3)); PM(tr1, tr4, cr4, cr2); PM(ti1, ti4, ci2, ci4); PM(tr2, tr3, CC(i - 1, k, 0), cr3); PM(ti2, ti3, CC(i, k, 0), ci3); PM(CH(i - 1, 0, k), CH(ic - 1, 3, k), tr2, tr1); PM(CH(i, 0, k), CH(ic, 3, k), ti1, ti2); PM(CH(i - 1, 2, k), CH(ic - 1, 1, k), tr3, ti4); PM(CH(i, 2, k), CH(ic, 1, k), tr4, ti3); } } template void radf5(size_t ido, size_t l1, const T *POCKETFFT_RESTRICT cc, T *POCKETFFT_RESTRICT ch, const T0 *POCKETFFT_RESTRICT wa) const { constexpr T0 tr11 = T0(0.3090169943749474241022934171828191L), ti11 = T0(0.9510565162951535721164393333793821L), tr12 = T0(-0.8090169943749474241022934171828191L), ti12 = T0(0.5877852522924731291687059546390728L); auto WA = [wa, ido](size_t x, size_t i) { return wa[i + x * (ido - 1)]; }; auto CC = [cc, ido, l1](size_t a, size_t b, size_t c) -> const T &{ return cc[a + ido * (b + l1 * c)]; }; auto CH = [ch, ido](size_t a, size_t b, size_t c) -> T &{ return ch[a + ido * (b + 5 * c)]; }; for (size_t k = 0; k < l1; k++) { T cr2, cr3, ci4, ci5; PM(cr2, ci5, CC(0, k, 4), CC(0, k, 1)); PM(cr3, ci4, CC(0, k, 3), CC(0, k, 2)); CH(0, 0, k) = CC(0, k, 0) + cr2 + cr3; CH(ido - 1, 1, k) = CC(0, k, 0) + tr11 * cr2 + tr12 * cr3; CH(0, 2, k) = ti11 * ci5 + ti12 * ci4; CH(ido - 1, 3, k) = CC(0, k, 0) + tr12 * cr2 + tr11 * cr3; CH(0, 4, k) = ti12 * ci5 - ti11 * ci4; } if (ido == 1) return; for (size_t k = 0; k < l1; ++k) for (size_t i = 2, ic = ido - 2; i < ido; i += 2, ic -= 2) { T di2, di3, di4, di5, dr2, dr3, dr4, dr5; MULPM(dr2, di2, WA(0, i - 2), WA(0, i - 1), CC(i - 1, k, 1), CC(i, k, 1)); MULPM(dr3, di3, WA(1, i - 2), WA(1, i - 1), CC(i - 1, k, 2), CC(i, k, 2)); MULPM(dr4, di4, WA(2, i - 2), WA(2, i - 1), CC(i - 1, k, 3), CC(i, k, 3)); MULPM(dr5, di5, WA(3, i - 2), WA(3, i - 1), CC(i - 1, k, 4), CC(i, k, 4)); POCKETFFT_REARRANGE(dr2, di2, dr5, di5); POCKETFFT_REARRANGE(dr3, di3, dr4, di4); CH(i - 1, 0, k) = CC(i - 1, k, 0) + dr2 + dr3; CH(i, 0, k) = CC(i, k, 0) + di2 + di3; T tr2 = CC(i - 1, k, 0) + tr11 * dr2 + tr12 * dr3; T ti2 = CC(i, k, 0) + tr11 * di2 + tr12 * di3; T tr3 = CC(i - 1, k, 0) + tr12 * dr2 + tr11 * dr3; T ti3 = CC(i, k, 0) + tr12 * di2 + tr11 * di3; T tr5 = ti11 * dr5 + ti12 * dr4; T ti5 = ti11 * di5 + ti12 * di4; T tr4 = ti12 * dr5 - ti11 * dr4; T ti4 = ti12 * di5 - ti11 * di4; PM(CH(i - 1, 2, k), CH(ic - 1, 1, k), tr2, tr5); PM(CH(i, 2, k), CH(ic, 1, k), ti5, ti2); PM(CH(i - 1, 4, k), CH(ic - 1, 3, k), tr3, tr4); PM(CH(i, 4, k), CH(ic, 3, k), ti4, ti3); } } #undef POCKETFFT_REARRANGE template void radfg(size_t ido, size_t ip, size_t l1, T *POCKETFFT_RESTRICT cc, T *POCKETFFT_RESTRICT ch, const T0 *POCKETFFT_RESTRICT wa, const T0 *POCKETFFT_RESTRICT csarr) const { const size_t cdim = ip; size_t ipph = (ip + 1) / 2; size_t idl1 = ido * l1; auto CC = [cc, ido, cdim](size_t a, size_t b, size_t c) -> T &{ return cc[a + ido * (b + cdim * c)]; }; auto CH = [ch, ido, l1](size_t a, size_t b, size_t c) -> const T &{ return ch[a + ido * (b + l1 * c)]; }; auto C1 = [cc, ido, l1](size_t a, size_t b, size_t c) -> T &{ return cc[a + ido * (b + l1 * c)]; }; auto C2 = [cc, idl1](size_t a, size_t b) -> T &{ return cc[a + idl1 * b]; }; auto CH2 = [ch, idl1](size_t a, size_t b) -> T &{ return ch[a + idl1 * b]; }; if (ido > 1) { for (size_t j = 1, jc = ip - 1; j < ipph; ++j, --jc) // 114 { size_t is = (j - 1) * (ido - 1), is2 = (jc - 1) * (ido - 1); for (size_t k = 0; k < l1; ++k) // 113 { size_t idij = is; size_t idij2 = is2; for (size_t i = 1; i <= ido - 2; i += 2) // 112 { T t1 = C1(i, k, j), t2 = C1(i + 1, k, j), t3 = C1(i, k, jc), t4 = C1(i + 1, k, jc); T x1 = wa[idij] * t1 + wa[idij + 1] * t2, x2 = wa[idij] * t2 - wa[idij + 1] * t1, x3 = wa[idij2] * t3 + wa[idij2 + 1] * t4, x4 = wa[idij2] * t4 - wa[idij2 + 1] * t3; PM(C1(i, k, j), C1(i + 1, k, jc), x3, x1); PM(C1(i + 1, k, j), C1(i, k, jc), x2, x4); idij += 2; idij2 += 2; } } } } for (size_t j = 1, jc = ip - 1; j < ipph; ++j, --jc) // 123 for (size_t k = 0; k < l1; ++k) // 122 MPINPLACE(C1(0, k, jc), C1(0, k, j)); // everything in C // memset(ch,0,ip*l1*ido*sizeof(double)); for (size_t l = 1, lc = ip - 1; l < ipph; ++l, --lc) // 127 { for (size_t ik = 0; ik < idl1; ++ik) // 124 { CH2(ik, l) = C2(ik, 0) + csarr[2 * l] * C2(ik, 1) + csarr[4 * l] * C2(ik, 2); CH2(ik, lc) = csarr[2 * l + 1] * C2(ik, ip - 1) + csarr[4 * l + 1] * C2(ik, ip - 2); } size_t iang = 2 * l; size_t j = 3, jc = ip - 3; for (; j < ipph - 3; j += 4, jc -= 4) // 126 { iang += l; if (iang >= ip) iang -= ip; T0 ar1 = csarr[2 * iang], ai1 = csarr[2 * iang + 1]; iang += l; if (iang >= ip) iang -= ip; T0 ar2 = csarr[2 * iang], ai2 = csarr[2 * iang + 1]; iang += l; if (iang >= ip) iang -= ip; T0 ar3 = csarr[2 * iang], ai3 = csarr[2 * iang + 1]; iang += l; if (iang >= ip) iang -= ip; T0 ar4 = csarr[2 * iang], ai4 = csarr[2 * iang + 1]; for (size_t ik = 0; ik < idl1; ++ik) // 125 { CH2(ik, l) += ar1 * C2(ik, j) + ar2 * C2(ik, j + 1) + ar3 * C2(ik, j + 2) + ar4 * C2(ik, j + 3); CH2(ik, lc) += ai1 * C2(ik, jc) + ai2 * C2(ik, jc - 1) + ai3 * C2(ik, jc - 2) + ai4 * C2(ik, jc - 3); } } for (; j < ipph - 1; j += 2, jc -= 2) // 126 { iang += l; if (iang >= ip) iang -= ip; T0 ar1 = csarr[2 * iang], ai1 = csarr[2 * iang + 1]; iang += l; if (iang >= ip) iang -= ip; T0 ar2 = csarr[2 * iang], ai2 = csarr[2 * iang + 1]; for (size_t ik = 0; ik < idl1; ++ik) // 125 { CH2(ik, l) += ar1 * C2(ik, j) + ar2 * C2(ik, j + 1); CH2(ik, lc) += ai1 * C2(ik, jc) + ai2 * C2(ik, jc - 1); } } for (; j < ipph; ++j, --jc) // 126 { iang += l; if (iang >= ip) iang -= ip; T0 ar = csarr[2 * iang], ai = csarr[2 * iang + 1]; for (size_t ik = 0; ik < idl1; ++ik) // 125 { CH2(ik, l) += ar * C2(ik, j); CH2(ik, lc) += ai * C2(ik, jc); } } } for (size_t ik = 0; ik < idl1; ++ik) // 101 CH2(ik, 0) = C2(ik, 0); for (size_t j = 1; j < ipph; ++j) // 129 for (size_t ik = 0; ik < idl1; ++ik) // 128 CH2(ik, 0) += C2(ik, j); // everything in CH at this point! // memset(cc,0,ip*l1*ido*sizeof(double)); for (size_t k = 0; k < l1; ++k) // 131 for (size_t i = 0; i < ido; ++i) // 130 CC(i, 0, k) = CH(i, k, 0); for (size_t j = 1, jc = ip - 1; j < ipph; ++j, --jc) // 137 { size_t j2 = 2 * j - 1; for (size_t k = 0; k < l1; ++k) // 136 { CC(ido - 1, j2, k) = CH(0, k, j); CC(0, j2 + 1, k) = CH(0, k, jc); } } if (ido == 1) return; for (size_t j = 1, jc = ip - 1; j < ipph; ++j, --jc) // 140 { size_t j2 = 2 * j - 1; for (size_t k = 0; k < l1; ++k) // 139 for (size_t i = 1, ic = ido - i - 2; i <= ido - 2; i += 2, ic -= 2) // 138 { CC(i, j2 + 1, k) = CH(i, k, j) + CH(i, k, jc); CC(ic, j2, k) = CH(i, k, j) - CH(i, k, jc); CC(i + 1, j2 + 1, k) = CH(i + 1, k, j) + CH(i + 1, k, jc); CC(ic + 1, j2, k) = CH(i + 1, k, jc) - CH(i + 1, k, j); } } } template void radb2(size_t ido, size_t l1, const T *POCKETFFT_RESTRICT cc, T *POCKETFFT_RESTRICT ch, const T0 *POCKETFFT_RESTRICT wa) const { auto WA = [wa, ido](size_t x, size_t i) { return wa[i + x * (ido - 1)]; }; auto CC = [cc, ido](size_t a, size_t b, size_t c) -> const T &{ return cc[a + ido * (b + 2 * c)]; }; auto CH = [ch, ido, l1](size_t a, size_t b, size_t c) -> T &{ return ch[a + ido * (b + l1 * c)]; }; for (size_t k = 0; k < l1; k++) PM(CH(0, k, 0), CH(0, k, 1), CC(0, 0, k), CC(ido - 1, 1, k)); if ((ido & 1) == 0) for (size_t k = 0; k < l1; k++) { CH(ido - 1, k, 0) = 2 * CC(ido - 1, 0, k); CH(ido - 1, k, 1) = -2 * CC(0, 1, k); } if (ido <= 2) return; for (size_t k = 0; k < l1; ++k) for (size_t i = 2; i < ido; i += 2) { size_t ic = ido - i; T ti2, tr2; PM(CH(i - 1, k, 0), tr2, CC(i - 1, 0, k), CC(ic - 1, 1, k)); PM(ti2, CH(i, k, 0), CC(i, 0, k), CC(ic, 1, k)); MULPM(CH(i, k, 1), CH(i - 1, k, 1), WA(0, i - 2), WA(0, i - 1), ti2, tr2); } } template void radb3(size_t ido, size_t l1, const T *POCKETFFT_RESTRICT cc, T *POCKETFFT_RESTRICT ch, const T0 *POCKETFFT_RESTRICT wa) const { constexpr T0 taur = -0.5, taui = T0(0.8660254037844386467637231707529362L); auto WA = [wa, ido](size_t x, size_t i) { return wa[i + x * (ido - 1)]; }; auto CC = [cc, ido](size_t a, size_t b, size_t c) -> const T &{ return cc[a + ido * (b + 3 * c)]; }; auto CH = [ch, ido, l1](size_t a, size_t b, size_t c) -> T &{ return ch[a + ido * (b + l1 * c)]; }; for (size_t k = 0; k < l1; k++) { T tr2 = 2 * CC(ido - 1, 1, k); T cr2 = CC(0, 0, k) + taur * tr2; CH(0, k, 0) = CC(0, 0, k) + tr2; T ci3 = 2 * taui * CC(0, 2, k); PM(CH(0, k, 2), CH(0, k, 1), cr2, ci3); } if (ido == 1) return; for (size_t k = 0; k < l1; k++) for (size_t i = 2, ic = ido - 2; i < ido; i += 2, ic -= 2) { T tr2 = CC(i - 1, 2, k) + CC(ic - 1, 1, k); // t2=CC(I) + conj(CC(ic)) T ti2 = CC(i, 2, k) - CC(ic, 1, k); T cr2 = CC(i - 1, 0, k) + taur * tr2; // c2=CC +taur*t2 T ci2 = CC(i, 0, k) + taur * ti2; CH(i - 1, k, 0) = CC(i - 1, 0, k) + tr2; // CH=CC+t2 CH(i, k, 0) = CC(i, 0, k) + ti2; T cr3 = taui * (CC(i - 1, 2, k) - CC(ic - 1, 1, k)); // c3=taui*(CC(i)-conj(CC(ic))) T ci3 = taui * (CC(i, 2, k) + CC(ic, 1, k)); T di2, di3, dr2, dr3; PM(dr3, dr2, cr2, ci3); // d2= (cr2-ci3, ci2+cr3) = c2+i*c3 PM(di2, di3, ci2, cr3); // d3= (cr2+ci3, ci2-cr3) = c2-i*c3 MULPM(CH(i, k, 1), CH(i - 1, k, 1), WA(0, i - 2), WA(0, i - 1), di2, dr2); // ch = WA*d2 MULPM(CH(i, k, 2), CH(i - 1, k, 2), WA(1, i - 2), WA(1, i - 1), di3, dr3); } } template void radb4(size_t ido, size_t l1, const T *POCKETFFT_RESTRICT cc, T *POCKETFFT_RESTRICT ch, const T0 *POCKETFFT_RESTRICT wa) const { constexpr T0 sqrt2 = T0(1.414213562373095048801688724209698L); auto WA = [wa, ido](size_t x, size_t i) { return wa[i + x * (ido - 1)]; }; auto CC = [cc, ido](size_t a, size_t b, size_t c) -> const T &{ return cc[a + ido * (b + 4 * c)]; }; auto CH = [ch, ido, l1](size_t a, size_t b, size_t c) -> T &{ return ch[a + ido * (b + l1 * c)]; }; for (size_t k = 0; k < l1; k++) { T tr1, tr2; PM(tr2, tr1, CC(0, 0, k), CC(ido - 1, 3, k)); T tr3 = 2 * CC(ido - 1, 1, k); T tr4 = 2 * CC(0, 2, k); PM(CH(0, k, 0), CH(0, k, 2), tr2, tr3); PM(CH(0, k, 3), CH(0, k, 1), tr1, tr4); } if ((ido & 1) == 0) for (size_t k = 0; k < l1; k++) { T tr1, tr2, ti1, ti2; PM(ti1, ti2, CC(0, 3, k), CC(0, 1, k)); PM(tr2, tr1, CC(ido - 1, 0, k), CC(ido - 1, 2, k)); CH(ido - 1, k, 0) = tr2 + tr2; CH(ido - 1, k, 1) = sqrt2 * (tr1 - ti1); CH(ido - 1, k, 2) = ti2 + ti2; CH(ido - 1, k, 3) = -sqrt2 * (tr1 + ti1); } if (ido <= 2) return; for (size_t k = 0; k < l1; ++k) for (size_t i = 2; i < ido; i += 2) { T ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4; size_t ic = ido - i; PM(tr2, tr1, CC(i - 1, 0, k), CC(ic - 1, 3, k)); PM(ti1, ti2, CC(i, 0, k), CC(ic, 3, k)); PM(tr4, ti3, CC(i, 2, k), CC(ic, 1, k)); PM(tr3, ti4, CC(i - 1, 2, k), CC(ic - 1, 1, k)); PM(CH(i - 1, k, 0), cr3, tr2, tr3); PM(CH(i, k, 0), ci3, ti2, ti3); PM(cr4, cr2, tr1, tr4); PM(ci2, ci4, ti1, ti4); MULPM(CH(i, k, 1), CH(i - 1, k, 1), WA(0, i - 2), WA(0, i - 1), ci2, cr2); MULPM(CH(i, k, 2), CH(i - 1, k, 2), WA(1, i - 2), WA(1, i - 1), ci3, cr3); MULPM(CH(i, k, 3), CH(i - 1, k, 3), WA(2, i - 2), WA(2, i - 1), ci4, cr4); } } template void radb5(size_t ido, size_t l1, const T *POCKETFFT_RESTRICT cc, T *POCKETFFT_RESTRICT ch, const T0 *POCKETFFT_RESTRICT wa) const { constexpr T0 tr11 = T0(0.3090169943749474241022934171828191L), ti11 = T0(0.9510565162951535721164393333793821L), tr12 = T0(-0.8090169943749474241022934171828191L), ti12 = T0(0.5877852522924731291687059546390728L); auto WA = [wa, ido](size_t x, size_t i) { return wa[i + x * (ido - 1)]; }; auto CC = [cc, ido](size_t a, size_t b, size_t c) -> const T &{ return cc[a + ido * (b + 5 * c)]; }; auto CH = [ch, ido, l1](size_t a, size_t b, size_t c) -> T &{ return ch[a + ido * (b + l1 * c)]; }; for (size_t k = 0; k < l1; k++) { T ti5 = CC(0, 2, k) + CC(0, 2, k); T ti4 = CC(0, 4, k) + CC(0, 4, k); T tr2 = CC(ido - 1, 1, k) + CC(ido - 1, 1, k); T tr3 = CC(ido - 1, 3, k) + CC(ido - 1, 3, k); CH(0, k, 0) = CC(0, 0, k) + tr2 + tr3; T cr2 = CC(0, 0, k) + tr11 * tr2 + tr12 * tr3; T cr3 = CC(0, 0, k) + tr12 * tr2 + tr11 * tr3; T ci4, ci5; MULPM(ci5, ci4, ti5, ti4, ti11, ti12); PM(CH(0, k, 4), CH(0, k, 1), cr2, ci5); PM(CH(0, k, 3), CH(0, k, 2), cr3, ci4); } if (ido == 1) return; for (size_t k = 0; k < l1; ++k) for (size_t i = 2, ic = ido - 2; i < ido; i += 2, ic -= 2) { T tr2, tr3, tr4, tr5, ti2, ti3, ti4, ti5; PM(tr2, tr5, CC(i - 1, 2, k), CC(ic - 1, 1, k)); PM(ti5, ti2, CC(i, 2, k), CC(ic, 1, k)); PM(tr3, tr4, CC(i - 1, 4, k), CC(ic - 1, 3, k)); PM(ti4, ti3, CC(i, 4, k), CC(ic, 3, k)); CH(i - 1, k, 0) = CC(i - 1, 0, k) + tr2 + tr3; CH(i, k, 0) = CC(i, 0, k) + ti2 + ti3; T cr2 = CC(i - 1, 0, k) + tr11 * tr2 + tr12 * tr3; T ci2 = CC(i, 0, k) + tr11 * ti2 + tr12 * ti3; T cr3 = CC(i - 1, 0, k) + tr12 * tr2 + tr11 * tr3; T ci3 = CC(i, 0, k) + tr12 * ti2 + tr11 * ti3; T ci4, ci5, cr5, cr4; MULPM(cr5, cr4, tr5, tr4, ti11, ti12); MULPM(ci5, ci4, ti5, ti4, ti11, ti12); T dr2, dr3, dr4, dr5, di2, di3, di4, di5; PM(dr4, dr3, cr3, ci4); PM(di3, di4, ci3, cr4); PM(dr5, dr2, cr2, ci5); PM(di2, di5, ci2, cr5); MULPM(CH(i, k, 1), CH(i - 1, k, 1), WA(0, i - 2), WA(0, i - 1), di2, dr2); MULPM(CH(i, k, 2), CH(i - 1, k, 2), WA(1, i - 2), WA(1, i - 1), di3, dr3); MULPM(CH(i, k, 3), CH(i - 1, k, 3), WA(2, i - 2), WA(2, i - 1), di4, dr4); MULPM(CH(i, k, 4), CH(i - 1, k, 4), WA(3, i - 2), WA(3, i - 1), di5, dr5); } } template void radbg(size_t ido, size_t ip, size_t l1, T *POCKETFFT_RESTRICT cc, T *POCKETFFT_RESTRICT ch, const T0 *POCKETFFT_RESTRICT wa, const T0 *POCKETFFT_RESTRICT csarr) const { const size_t cdim = ip; size_t ipph = (ip + 1) / 2; size_t idl1 = ido * l1; auto CC = [cc, ido, cdim](size_t a, size_t b, size_t c) -> const T &{ return cc[a + ido * (b + cdim * c)]; }; auto CH = [ch, ido, l1](size_t a, size_t b, size_t c) -> T &{ return ch[a + ido * (b + l1 * c)]; }; auto C1 = [cc, ido, l1](size_t a, size_t b, size_t c) -> const T &{ return cc[a + ido * (b + l1 * c)]; }; auto C2 = [cc, idl1](size_t a, size_t b) -> T &{ return cc[a + idl1 * b]; }; auto CH2 = [ch, idl1](size_t a, size_t b) -> T &{ return ch[a + idl1 * b]; }; for (size_t k = 0; k < l1; ++k) // 102 for (size_t i = 0; i < ido; ++i) // 101 CH(i, k, 0) = CC(i, 0, k); for (size_t j = 1, jc = ip - 1; j < ipph; ++j, --jc) // 108 { size_t j2 = 2 * j - 1; for (size_t k = 0; k < l1; ++k) { CH(0, k, j) = 2 * CC(ido - 1, j2, k); CH(0, k, jc) = 2 * CC(0, j2 + 1, k); } } if (ido != 1) { for (size_t j = 1, jc = ip - 1; j < ipph; ++j, --jc) // 111 { size_t j2 = 2 * j - 1; for (size_t k = 0; k < l1; ++k) for (size_t i = 1, ic = ido - i - 2; i <= ido - 2; i += 2, ic -= 2) // 109 { CH(i, k, j) = CC(i, j2 + 1, k) + CC(ic, j2, k); CH(i, k, jc) = CC(i, j2 + 1, k) - CC(ic, j2, k); CH(i + 1, k, j) = CC(i + 1, j2 + 1, k) - CC(ic + 1, j2, k); CH(i + 1, k, jc) = CC(i + 1, j2 + 1, k) + CC(ic + 1, j2, k); } } } for (size_t l = 1, lc = ip - 1; l < ipph; ++l, --lc) { for (size_t ik = 0; ik < idl1; ++ik) { C2(ik, l) = CH2(ik, 0) + csarr[2 * l] * CH2(ik, 1) + csarr[4 * l] * CH2(ik, 2); C2(ik, lc) = csarr[2 * l + 1] * CH2(ik, ip - 1) + csarr[4 * l + 1] * CH2(ik, ip - 2); } size_t iang = 2 * l; size_t j = 3, jc = ip - 3; for (; j < ipph - 3; j += 4, jc -= 4) { iang += l; if (iang > ip) iang -= ip; T0 ar1 = csarr[2 * iang], ai1 = csarr[2 * iang + 1]; iang += l; if (iang > ip) iang -= ip; T0 ar2 = csarr[2 * iang], ai2 = csarr[2 * iang + 1]; iang += l; if (iang > ip) iang -= ip; T0 ar3 = csarr[2 * iang], ai3 = csarr[2 * iang + 1]; iang += l; if (iang > ip) iang -= ip; T0 ar4 = csarr[2 * iang], ai4 = csarr[2 * iang + 1]; for (size_t ik = 0; ik < idl1; ++ik) { C2(ik, l) += ar1 * CH2(ik, j) + ar2 * CH2(ik, j + 1) + ar3 * CH2(ik, j + 2) + ar4 * CH2(ik, j + 3); C2(ik, lc) += ai1 * CH2(ik, jc) + ai2 * CH2(ik, jc - 1) + ai3 * CH2(ik, jc - 2) + ai4 * CH2(ik, jc - 3); } } for (; j < ipph - 1; j += 2, jc -= 2) { iang += l; if (iang > ip) iang -= ip; T0 ar1 = csarr[2 * iang], ai1 = csarr[2 * iang + 1]; iang += l; if (iang > ip) iang -= ip; T0 ar2 = csarr[2 * iang], ai2 = csarr[2 * iang + 1]; for (size_t ik = 0; ik < idl1; ++ik) { C2(ik, l) += ar1 * CH2(ik, j) + ar2 * CH2(ik, j + 1); C2(ik, lc) += ai1 * CH2(ik, jc) + ai2 * CH2(ik, jc - 1); } } for (; j < ipph; ++j, --jc) { iang += l; if (iang > ip) iang -= ip; T0 war = csarr[2 * iang], wai = csarr[2 * iang + 1]; for (size_t ik = 0; ik < idl1; ++ik) { C2(ik, l) += war * CH2(ik, j); C2(ik, lc) += wai * CH2(ik, jc); } } } for (size_t j = 1; j < ipph; ++j) for (size_t ik = 0; ik < idl1; ++ik) CH2(ik, 0) += CH2(ik, j); for (size_t j = 1, jc = ip - 1; j < ipph; ++j, --jc) // 124 for (size_t k = 0; k < l1; ++k) PM(CH(0, k, jc), CH(0, k, j), C1(0, k, j), C1(0, k, jc)); if (ido == 1) return; for (size_t j = 1, jc = ip - 1; j < ipph; ++j, --jc) // 127 for (size_t k = 0; k < l1; ++k) for (size_t i = 1; i <= ido - 2; i += 2) { CH(i, k, j) = C1(i, k, j) - C1(i + 1, k, jc); CH(i, k, jc) = C1(i, k, j) + C1(i + 1, k, jc); CH(i + 1, k, j) = C1(i + 1, k, j) + C1(i, k, jc); CH(i + 1, k, jc) = C1(i + 1, k, j) - C1(i, k, jc); } // All in CH for (size_t j = 1; j < ip; ++j) { size_t is = (j - 1) * (ido - 1); for (size_t k = 0; k < l1; ++k) { size_t idij = is; for (size_t i = 1; i <= ido - 2; i += 2) { T t1 = CH(i, k, j), t2 = CH(i + 1, k, j); CH(i, k, j) = wa[idij] * t1 - wa[idij + 1] * t2; CH(i + 1, k, j) = wa[idij] * t2 + wa[idij + 1] * t1; idij += 2; } } } } template void copy_and_norm(T *c, T *p1, size_t n, T0 fct) const { if (p1 != c) { if (fct != 1.) for (size_t i = 0; i < n; ++i) c[i] = fct * p1[i]; else memcpy(c, p1, n * sizeof(T)); } else if (fct != 1.) for (size_t i = 0; i < n; ++i) c[i] *= fct; } public: template void exec(T c[], T0 fct, bool r2hc) const { if (length == 1) { c[0] *= fct; return; } size_t n = length, nf = fact.size(); arr ch(n); T *p1 = c, *p2 = ch.data(); if (r2hc) for (size_t k1 = 0, l1 = n; k1 < nf; ++k1) { size_t k = nf - k1 - 1; size_t ip = fact[k].fct; size_t ido = n / l1; l1 /= ip; if (ip == 4) radf4(ido, l1, p1, p2, fact[k].tw); else if (ip == 2) radf2(ido, l1, p1, p2, fact[k].tw); else if (ip == 3) radf3(ido, l1, p1, p2, fact[k].tw); else if (ip == 5) radf5(ido, l1, p1, p2, fact[k].tw); else { radfg(ido, ip, l1, p1, p2, fact[k].tw, fact[k].tws); std::swap(p1, p2); } std::swap(p1, p2); } else for (size_t k = 0, l1 = 1; k < nf; k++) { size_t ip = fact[k].fct, ido = n / (ip * l1); if (ip == 4) radb4(ido, l1, p1, p2, fact[k].tw); else if (ip == 2) radb2(ido, l1, p1, p2, fact[k].tw); else if (ip == 3) radb3(ido, l1, p1, p2, fact[k].tw); else if (ip == 5) radb5(ido, l1, p1, p2, fact[k].tw); else radbg(ido, ip, l1, p1, p2, fact[k].tw, fact[k].tws); std::swap(p1, p2); l1 *= ip; } copy_and_norm(c, p1, n, fct); } private: void factorize() { size_t len = length; while ((len % 4) == 0) { add_factor(4); len >>= 2; } if ((len % 2) == 0) { len >>= 1; // factor 2 should be at the front of the factor list add_factor(2); std::swap(fact[0].fct, fact.back().fct); } for (size_t divisor = 3; divisor * divisor <= len; divisor += 2) while ((len % divisor) == 0) { add_factor(divisor); len /= divisor; } if (len > 1) add_factor(len); } size_t twsize() const { size_t twsz = 0, l1 = 1; for (size_t k = 0; k < fact.size(); ++k) { size_t ip = fact[k].fct, ido = length / (l1 * ip); twsz += (ip - 1) * (ido - 1); if (ip > 5) twsz += 2 * ip; l1 *= ip; } return twsz; } void comp_twiddle() { sincos_2pibyn twid(length); size_t l1 = 1; T0 *ptr = mem.data(); for (size_t k = 0; k < fact.size(); ++k) { size_t ip = fact[k].fct, ido = length / (l1 * ip); if (k < fact.size() - 1) // last factor doesn't need twiddles { fact[k].tw = ptr; ptr += (ip - 1) * (ido - 1); for (size_t j = 1; j < ip; ++j) for (size_t i = 1; i <= (ido - 1) / 2; ++i) { fact[k].tw[(j - 1) * (ido - 1) + 2 * i - 2] = twid[j * l1 * i].r; fact[k].tw[(j - 1) * (ido - 1) + 2 * i - 1] = twid[j * l1 * i].i; } } if (ip > 5) // special factors required by *g functions { fact[k].tws = ptr; ptr += 2 * ip; fact[k].tws[0] = 1.; fact[k].tws[1] = 0.; for (size_t i = 2, ic = 2 * ip - 2; i <= ic; i += 2, ic -= 2) { fact[k].tws[i] = twid[i / 2 * (length / ip)].r; fact[k].tws[i + 1] = twid[i / 2 * (length / ip)].i; fact[k].tws[ic] = twid[i / 2 * (length / ip)].r; fact[k].tws[ic + 1] = -twid[i / 2 * (length / ip)].i; } } l1 *= ip; } } public: POCKETFFT_NOINLINE rfftp(size_t length_) : length(length_) { if (length == 0) throw std::runtime_error("zero-length FFT requested"); if (length == 1) return; factorize(); mem.resize(twsize()); comp_twiddle(); } }; // // complex Bluestein transforms // template class fftblue { private: size_t n, n2; cfftp plan; arr> mem; cmplx *bk, *bkf; template void fft(cmplx c[], T0 fct) const { arr> akf(n2); /* initialize a_k and FFT it */ for (size_t m = 0; m < n; ++m) special_mul(c[m], bk[m], akf[m]); auto zero = akf[0] * T0(0); for (size_t m = n; m < n2; ++m) akf[m] = zero; plan.exec(akf.data(), 1., true); /* do the convolution */ akf[0] = akf[0].template special_mul(bkf[0]); for (size_t m = 1; m < (n2 + 1) / 2; ++m) { akf[m] = akf[m].template special_mul(bkf[m]); akf[n2 - m] = akf[n2 - m].template special_mul(bkf[m]); } if ((n2 & 1) == 0) akf[n2 / 2] = akf[n2 / 2].template special_mul(bkf[n2 / 2]); /* inverse FFT */ plan.exec(akf.data(), 1., false); /* multiply by b_k */ for (size_t m = 0; m < n; ++m) c[m] = akf[m].template special_mul(bk[m]) * fct; } public: POCKETFFT_NOINLINE fftblue(size_t length) : n(length), n2(util::good_size_cmplx(n * 2 - 1)), plan(n2), mem(n + n2 / 2 + 1), bk(mem.data()), bkf(mem.data() + n) { /* initialize b_k */ sincos_2pibyn tmp(2 * n); bk[0].Set(1, 0); size_t coeff = 0; for (size_t m = 1; m < n; ++m) { coeff += 2 * m - 1; if (coeff >= 2 * n) coeff -= 2 * n; bk[m] = tmp[coeff]; } /* initialize the zero-padded, Fourier transformed b_k. Add * normalisation. */ arr> tbkf(n2); T0 xn2 = T0(1) / T0(n2); tbkf[0] = bk[0] * xn2; for (size_t m = 1; m < n; ++m) tbkf[m] = tbkf[n2 - m] = bk[m] * xn2; for (size_t m = n; m <= (n2 - n); ++m) tbkf[m].Set(0., 0.); plan.exec(tbkf.data(), 1., true); for (size_t i = 0; i < n2 / 2 + 1; ++i) bkf[i] = tbkf[i]; } template void exec(cmplx c[], T0 fct, bool fwd) const { fwd ? fft(c, fct) : fft(c, fct); } template void exec_r(T c[], T0 fct, bool fwd) { arr> tmp(n); if (fwd) { auto zero = T0(0) * c[0]; for (size_t m = 0; m < n; ++m) tmp[m].Set(c[m], zero); fft(tmp.data(), fct); c[0] = tmp[0].r; memcpy(c + 1, tmp.data() + 1, (n - 1) * sizeof(T)); } else { tmp[0].Set(c[0], c[0] * 0); memcpy(reinterpret_cast(tmp.data() + 1), reinterpret_cast(c + 1), (n - 1) * sizeof(T)); if ((n & 1) == 0) tmp[n / 2].i = T0(0) * c[0]; for (size_t m = 1; 2 * m < n; ++m) tmp[n - m].Set(tmp[m].r, -tmp[m].i); fft(tmp.data(), fct); for (size_t m = 0; m < n; ++m) c[m] = tmp[m].r; } } }; // // flexible (FFTPACK/Bluestein) complex 1D transform // template class pocketfft_c { private: std::unique_ptr> packplan; std::unique_ptr> blueplan; size_t len; public: POCKETFFT_NOINLINE pocketfft_c(size_t length) : len(length) { if (length == 0) throw std::runtime_error("zero-length FFT requested"); size_t tmp = (length < 50) ? 0 : util::largest_prime_factor(length); if (tmp * tmp <= length) { packplan = std::unique_ptr>(new cfftp(length)); return; } double comp1 = util::cost_guess(length); double comp2 = 2 * util::cost_guess(util::good_size_cmplx(2 * length - 1)); comp2 *= 1.5; /* fudge factor that appears to give good overall performance */ if (comp2 < comp1) // use Bluestein blueplan = std::unique_ptr>(new fftblue(length)); else packplan = std::unique_ptr>(new cfftp(length)); } template POCKETFFT_NOINLINE void exec(cmplx c[], T0 fct, bool fwd) const { packplan ? packplan->exec(c, fct, fwd) : blueplan->exec(c, fct, fwd); } size_t length() const { return len; } }; // // flexible (FFTPACK/Bluestein) real-valued 1D transform // template class pocketfft_r { private: std::unique_ptr> packplan; std::unique_ptr> blueplan; size_t len; public: POCKETFFT_NOINLINE pocketfft_r(size_t length) : len(length) { if (length == 0) throw std::runtime_error("zero-length FFT requested"); size_t tmp = (length < 50) ? 0 : util::largest_prime_factor(length); if (tmp * tmp <= length) { packplan = std::unique_ptr>(new rfftp(length)); return; } double comp1 = 0.5 * util::cost_guess(length); double comp2 = 2 * util::cost_guess(util::good_size_cmplx(2 * length - 1)); comp2 *= 1.5; /* fudge factor that appears to give good overall performance */ if (comp2 < comp1) // use Bluestein blueplan = std::unique_ptr>(new fftblue(length)); else packplan = std::unique_ptr>(new rfftp(length)); } template POCKETFFT_NOINLINE void exec(T c[], T0 fct, bool fwd) const { packplan ? packplan->exec(c, fct, fwd) : blueplan->exec_r(c, fct, fwd); } size_t length() const { return len; } }; // // sine/cosine transforms // template class T_dct1 { private: pocketfft_r fftplan; public: POCKETFFT_NOINLINE T_dct1(size_t length) : fftplan(2 * (length - 1)) { } template POCKETFFT_NOINLINE void exec(T c[], T0 fct, bool ortho, int /*type*/, bool /*cosine*/) const { constexpr T0 sqrt2 = T0(1.414213562373095048801688724209698L); size_t N = fftplan.length(), n = N / 2 + 1; if (ortho) { c[0] *= sqrt2; c[n - 1] *= sqrt2; } arr tmp(N); tmp[0] = c[0]; for (size_t i = 1; i < n; ++i) tmp[i] = tmp[N - i] = c[i]; fftplan.exec(tmp.data(), fct, true); c[0] = tmp[0]; for (size_t i = 1; i < n; ++i) c[i] = tmp[2 * i - 1]; if (ortho) { c[0] *= sqrt2 * T0(0.5); c[n - 1] *= sqrt2 * T0(0.5); } } size_t length() const { return fftplan.length() / 2 + 1; } }; template class T_dst1 { private: pocketfft_r fftplan; public: POCKETFFT_NOINLINE T_dst1(size_t length) : fftplan(2 * (length + 1)) { } template POCKETFFT_NOINLINE void exec(T c[], T0 fct, bool /*ortho*/, int /*type*/, bool /*cosine*/) const { size_t N = fftplan.length(), n = N / 2 - 1; arr tmp(N); tmp[0] = tmp[n + 1] = c[0] * 0; for (size_t i = 0; i < n; ++i) { tmp[i + 1] = c[i]; tmp[N - 1 - i] = -c[i]; } fftplan.exec(tmp.data(), fct, true); for (size_t i = 0; i < n; ++i) c[i] = -tmp[2 * i + 2]; } size_t length() const { return fftplan.length() / 2 - 1; } }; template class T_dcst23 { private: pocketfft_r fftplan; std::vector twiddle; public: POCKETFFT_NOINLINE T_dcst23(size_t length) : fftplan(length), twiddle(length) { sincos_2pibyn tw(4 * length); for (size_t i = 0; i < length; ++i) twiddle[i] = tw[i + 1].r; } template POCKETFFT_NOINLINE void exec(T c[], T0 fct, bool ortho, int type, bool cosine) const { constexpr T0 sqrt2 = T0(1.414213562373095048801688724209698L); size_t N = length(); size_t NS2 = (N + 1) / 2; if (type == 2) { if (!cosine) for (size_t k = 1; k < N; k += 2) c[k] = -c[k]; c[0] *= 2; if ((N & 1) == 0) c[N - 1] *= 2; for (size_t k = 1; k < N - 1; k += 2) MPINPLACE(c[k + 1], c[k]); fftplan.exec(c, fct, false); for (size_t k = 1, kc = N - 1; k < NS2; ++k, --kc) { T t1 = twiddle[k - 1] * c[kc] + twiddle[kc - 1] * c[k]; T t2 = twiddle[k - 1] * c[k] - twiddle[kc - 1] * c[kc]; c[k] = T0(0.5) * (t1 + t2); c[kc] = T0(0.5) * (t1 - t2); } if ((N & 1) == 0) c[NS2] *= twiddle[NS2 - 1]; if (!cosine) for (size_t k = 0, kc = N - 1; k < kc; ++k, --kc) std::swap(c[k], c[kc]); if (ortho) c[0] *= sqrt2 * T0(0.5); } else { if (ortho) c[0] *= sqrt2; if (!cosine) for (size_t k = 0, kc = N - 1; k < NS2; ++k, --kc) std::swap(c[k], c[kc]); for (size_t k = 1, kc = N - 1; k < NS2; ++k, --kc) { T t1 = c[k] + c[kc], t2 = c[k] - c[kc]; c[k] = twiddle[k - 1] * t2 + twiddle[kc - 1] * t1; c[kc] = twiddle[k - 1] * t1 - twiddle[kc - 1] * t2; } if ((N & 1) == 0) c[NS2] *= 2 * twiddle[NS2 - 1]; fftplan.exec(c, fct, true); for (size_t k = 1; k < N - 1; k += 2) MPINPLACE(c[k], c[k + 1]); if (!cosine) for (size_t k = 1; k < N; k += 2) c[k] = -c[k]; } } size_t length() const { return fftplan.length(); } }; template class T_dcst4 { private: size_t N; std::unique_ptr> fft; std::unique_ptr> rfft; arr> C2; public: POCKETFFT_NOINLINE T_dcst4(size_t length) : N(length), fft((N & 1) ? nullptr : new pocketfft_c(N / 2)), rfft((N & 1) ? new pocketfft_r(N) : nullptr), C2((N & 1) ? 0 : N / 2) { if ((N & 1) == 0) { sincos_2pibyn tw(16 * N); for (size_t i = 0; i < N / 2; ++i) C2[i] = conj(tw[8 * i + 1]); } } template POCKETFFT_NOINLINE void exec(T c[], T0 fct, bool /*ortho*/, int /*type*/, bool cosine) const { size_t n2 = N / 2; if (!cosine) for (size_t k = 0, kc = N - 1; k < n2; ++k, --kc) std::swap(c[k], c[kc]); if (N & 1) { // The following code is derived from the FFTW3 function apply_re11() // and is released under the 3-clause BSD license with friendly // permission of Matteo Frigo and Steven G. Johnson. arr y(N); { size_t i = 0, m = n2; for (; m < N; ++i, m += 4) y[i] = c[m]; for (; m < 2 * N; ++i, m += 4) y[i] = -c[2 * N - m - 1]; for (; m < 3 * N; ++i, m += 4) y[i] = -c[m - 2 * N]; for (; m < 4 * N; ++i, m += 4) y[i] = c[4 * N - m - 1]; for (; i < N; ++i, m += 4) y[i] = c[m - 4 * N]; } rfft->exec(y.data(), fct, true); { auto SGN = [](size_t i) { constexpr T0 sqrt2 = T0(1.414213562373095048801688724209698L); return (i & 2) ? -sqrt2 : sqrt2; }; c[n2] = y[0] * SGN(n2 + 1); size_t i = 0, i1 = 1, k = 1; for (; k < n2; ++i, ++i1, k += 2) { c[i] = y[2 * k - 1] * SGN(i1) + y[2 * k] * SGN(i); c[N - i1] = y[2 * k - 1] * SGN(N - i) - y[2 * k] * SGN(N - i1); c[n2 - i1] = y[2 * k + 1] * SGN(n2 - i) - y[2 * k + 2] * SGN(n2 - i1); c[n2 + i1] = y[2 * k + 1] * SGN(n2 + i + 2) + y[2 * k + 2] * SGN(n2 + i1); } if (k == n2) { c[i] = y[2 * k - 1] * SGN(i + 1) + y[2 * k] * SGN(i); c[N - i1] = y[2 * k - 1] * SGN(i + 2) + y[2 * k] * SGN(i1); } } // FFTW-derived code ends here } else { // even length algorithm from // https://www.appletonaudio.com/blog/2013/derivation-of-fast-dct-4-algorithm-based-on-dft/ arr> y(n2); for (size_t i = 0; i < n2; ++i) { y[i].Set(c[2 * i], c[N - 1 - 2 * i]); y[i] *= C2[i]; } fft->exec(y.data(), fct, true); for (size_t i = 0, ic = n2 - 1; i < n2; ++i, --ic) { c[2 * i] = 2 * (y[i].r * C2[i].r - y[i].i * C2[i].i); c[2 * i + 1] = -2 * (y[ic].i * C2[ic].r + y[ic].r * C2[ic].i); } } if (!cosine) for (size_t k = 1; k < N; k += 2) c[k] = -c[k]; } size_t length() const { return N; } }; // // multi-D infrastructure // template std::shared_ptr get_plan(size_t length) { #if POCKETFFT_CACHE_SIZE == 0 return std::make_shared(length); #else constexpr size_t nmax = POCKETFFT_CACHE_SIZE; static std::array, nmax> cache; static std::array last_access{{0}}; static size_t access_counter = 0; static std::mutex mut; auto find_in_cache = [&]() -> std::shared_ptr { for (size_t i = 0; i < nmax; ++i) if (cache[i] && (cache[i]->length() == length)) { // no need to update if this is already the most recent entry if (last_access[i] != access_counter) { last_access[i] = ++access_counter; // Guard against overflow if (access_counter == 0) last_access.fill(0); } return cache[i]; } return nullptr; }; { std::lock_guard lock(mut); auto p = find_in_cache(); if (p) return p; } auto plan = std::make_shared(length); { std::lock_guard lock(mut); auto p = find_in_cache(); if (p) return p; size_t lru = 0; for (size_t i = 1; i < nmax; ++i) if (last_access[i] < last_access[lru]) lru = i; cache[lru] = plan; last_access[lru] = ++access_counter; } return plan; #endif } class arr_info { protected: shape_t shp; stride_t str; public: arr_info(const shape_t &shape_, const stride_t &stride_) : shp(shape_), str(stride_) { } size_t ndim() const { return shp.size(); } size_t size() const { return util::prod(shp); } const shape_t &shape() const { return shp; } size_t shape(size_t i) const { return shp[i]; } const stride_t &stride() const { return str; } const ptrdiff_t &stride(size_t i) const { return str[i]; } }; template class cndarr : public arr_info { protected: const char *d; public: cndarr(const void *data_, const shape_t &shape_, const stride_t &stride_) : arr_info(shape_, stride_), d(reinterpret_cast(data_)) { } const T &operator[](ptrdiff_t ofs) const { return *reinterpret_cast(d + ofs); } }; template class ndarr : public cndarr { public: ndarr(void *data_, const shape_t &shape_, const stride_t &stride_) : cndarr::cndarr(const_cast(data_), shape_, stride_) { } T &operator[](ptrdiff_t ofs) { return *reinterpret_cast(const_cast(cndarr::d + ofs)); } }; template class multi_iter { private: shape_t pos; const arr_info &iarr, &oarr; ptrdiff_t p_ii, p_i[N], str_i, p_oi, p_o[N], str_o; size_t idim, rem; void advance_i() { for (int i_ = int(pos.size()) - 1; i_ >= 0; --i_) { auto i = size_t(i_); if (i == idim) continue; p_ii += iarr.stride(i); p_oi += oarr.stride(i); if (++pos[i] < iarr.shape(i)) return; pos[i] = 0; p_ii -= ptrdiff_t(iarr.shape(i)) * iarr.stride(i); p_oi -= ptrdiff_t(oarr.shape(i)) * oarr.stride(i); } } public: multi_iter(const arr_info &iarr_, const arr_info &oarr_, size_t idim_) : pos(iarr_.ndim(), 0), iarr(iarr_), oarr(oarr_), p_ii(0), str_i(iarr.stride(idim_)), p_oi(0), str_o(oarr.stride(idim_)), idim(idim_), rem(iarr.size() / iarr.shape(idim)) { auto nshares = threading::num_threads(); if (nshares == 1) return; if (nshares == 0) throw std::runtime_error("can't run with zero threads"); auto myshare = threading::thread_id(); if (myshare >= nshares) throw std::runtime_error("impossible share requested"); size_t nbase = rem / nshares; size_t additional = rem % nshares; size_t lo = myshare * nbase + ((myshare < additional) ? myshare : additional); size_t hi = lo + nbase + (myshare < additional); size_t todo = hi - lo; size_t chunk = rem; for (size_t i = 0; i < pos.size(); ++i) { if (i == idim) continue; chunk /= iarr.shape(i); size_t n_advance = lo / chunk; pos[i] += n_advance; p_ii += ptrdiff_t(n_advance) * iarr.stride(i); p_oi += ptrdiff_t(n_advance) * oarr.stride(i); lo -= n_advance * chunk; } rem = todo; } void advance(size_t n) { if (rem < n) throw std::runtime_error("underrun"); for (size_t i = 0; i < n; ++i) { p_i[i] = p_ii; p_o[i] = p_oi; advance_i(); } rem -= n; } ptrdiff_t iofs(size_t i) const { return p_i[0] + ptrdiff_t(i) * str_i; } ptrdiff_t iofs(size_t j, size_t i) const { return p_i[j] + ptrdiff_t(i) * str_i; } ptrdiff_t oofs(size_t i) const { return p_o[0] + ptrdiff_t(i) * str_o; } ptrdiff_t oofs(size_t j, size_t i) const { return p_o[j] + ptrdiff_t(i) * str_o; } size_t length_in() const { return iarr.shape(idim); } size_t length_out() const { return oarr.shape(idim); } ptrdiff_t stride_in() const { return str_i; } ptrdiff_t stride_out() const { return str_o; } size_t remaining() const { return rem; } }; class simple_iter { private: shape_t pos; const arr_info &arr; ptrdiff_t p; size_t rem; public: simple_iter(const arr_info &arr_) : pos(arr_.ndim(), 0), arr(arr_), p(0), rem(arr_.size()) { } void advance() { --rem; for (int i_ = int(pos.size()) - 1; i_ >= 0; --i_) { auto i = size_t(i_); p += arr.stride(i); if (++pos[i] < arr.shape(i)) return; pos[i] = 0; p -= ptrdiff_t(arr.shape(i)) * arr.stride(i); } } ptrdiff_t ofs() const { return p; } size_t remaining() const { return rem; } }; class rev_iter { private: shape_t pos; const arr_info &arr; std::vector rev_axis; std::vector rev_jump; size_t last_axis, last_size; shape_t shp; ptrdiff_t p, rp; size_t rem; public: rev_iter(const arr_info &arr_, const shape_t &axes) : pos(arr_.ndim(), 0), arr(arr_), rev_axis(arr_.ndim(), 0), rev_jump(arr_.ndim(), 1), p(0), rp(0) { for (auto ax : axes) rev_axis[ax] = 1; last_axis = axes.back(); last_size = arr.shape(last_axis) / 2 + 1; shp = arr.shape(); shp[last_axis] = last_size; rem = 1; for (auto i : shp) rem *= i; } void advance() { --rem; for (int i_ = int(pos.size()) - 1; i_ >= 0; --i_) { auto i = size_t(i_); p += arr.stride(i); if (!rev_axis[i]) rp += arr.stride(i); else { rp -= arr.stride(i); if (rev_jump[i]) { rp += ptrdiff_t(arr.shape(i)) * arr.stride(i); rev_jump[i] = 0; } } if (++pos[i] < shp[i]) return; pos[i] = 0; p -= ptrdiff_t(shp[i]) * arr.stride(i); if (rev_axis[i]) { rp -= ptrdiff_t(arr.shape(i) - shp[i]) * arr.stride(i); rev_jump[i] = 1; } else rp -= ptrdiff_t(shp[i]) * arr.stride(i); } } ptrdiff_t ofs() const { return p; } ptrdiff_t rev_ofs() const { return rp; } size_t remaining() const { return rem; } }; template struct VTYPE { }; template using vtype_t = typename VTYPE::type; #ifndef POCKETFFT_NO_VECTORS template <> struct VTYPE { using type = float __attribute__((vector_size(VLEN::val * sizeof(float)))); }; template <> struct VTYPE { using type = double __attribute__((vector_size(VLEN::val * sizeof(double)))); }; template <> struct VTYPE { using type = long double __attribute__((vector_size(VLEN::val * sizeof(long double)))); }; #endif template arr alloc_tmp(const shape_t &shape, size_t axsize, size_t elemsize) { auto othersize = util::prod(shape) / axsize; auto tmpsize = axsize * ((othersize >= VLEN::val) ? VLEN::val : 1); return arr(tmpsize * elemsize); } template arr alloc_tmp(const shape_t &shape, const shape_t &axes, size_t elemsize) { size_t fullsize = util::prod(shape); size_t tmpsize = 0; for (size_t i = 0; i < axes.size(); ++i) { auto axsize = shape[axes[i]]; auto othersize = fullsize / axsize; auto sz = axsize * ((othersize >= VLEN::val) ? VLEN::val : 1); if (sz > tmpsize) tmpsize = sz; } return arr(tmpsize * elemsize); } template void copy_input(const multi_iter &it, const cndarr> &src, cmplx> *POCKETFFT_RESTRICT dst) { for (size_t i = 0; i < it.length_in(); ++i) for (size_t j = 0; j < vlen; ++j) { dst[i].r[j] = src[it.iofs(j, i)].r; dst[i].i[j] = src[it.iofs(j, i)].i; } } template void copy_input(const multi_iter &it, const cndarr &src, vtype_t *POCKETFFT_RESTRICT dst) { for (size_t i = 0; i < it.length_in(); ++i) for (size_t j = 0; j < vlen; ++j) dst[i][j] = src[it.iofs(j, i)]; } template void copy_input(const multi_iter &it, const cndarr &src, T *POCKETFFT_RESTRICT dst) { if (dst == &src[it.iofs(0)]) return; // in-place for (size_t i = 0; i < it.length_in(); ++i) dst[i] = src[it.iofs(i)]; } template void copy_output(const multi_iter &it, const cmplx> *POCKETFFT_RESTRICT src, ndarr> &dst) { for (size_t i = 0; i < it.length_out(); ++i) for (size_t j = 0; j < vlen; ++j) dst[it.oofs(j, i)].Set(src[i].r[j], src[i].i[j]); } template void copy_output(const multi_iter &it, const vtype_t *POCKETFFT_RESTRICT src, ndarr &dst) { for (size_t i = 0; i < it.length_out(); ++i) for (size_t j = 0; j < vlen; ++j) dst[it.oofs(j, i)] = src[i][j]; } template void copy_output(const multi_iter &it, const T *POCKETFFT_RESTRICT src, ndarr &dst) { if (src == &dst[it.oofs(0)]) return; // in-place for (size_t i = 0; i < it.length_out(); ++i) dst[it.oofs(i)] = src[i]; } template struct add_vec { using type = vtype_t; }; template struct add_vec> { using type = cmplx>; }; template using add_vec_t = typename add_vec::type; template POCKETFFT_NOINLINE void general_nd(const cndarr &in, ndarr &out, const shape_t &axes, T0 fct, size_t nthreads, const Exec &exec, const bool allow_inplace = true) { std::shared_ptr plan; for (size_t iax = 0; iax < axes.size(); ++iax) { size_t len = in.shape(axes[iax]); if ((!plan) || (len != plan->length())) plan = get_plan(len); threading::thread_map( util::thread_count(nthreads, in.shape(), axes[iax], VLEN::val), [&] { constexpr auto vlen = VLEN::val; auto storage = alloc_tmp(in.shape(), len, sizeof(T)); const auto &tin(iax == 0 ? in : out); multi_iter it(tin, out, axes[iax]); #ifndef POCKETFFT_NO_VECTORS if (vlen > 1) while (it.remaining() >= vlen) { it.advance(vlen); auto tdatav = reinterpret_cast *>(storage.data()); exec(it, tin, out, tdatav, *plan, fct); } #endif while (it.remaining() > 0) { it.advance(1); auto buf = allow_inplace && it.stride_out() == sizeof(T) ? &out[it.oofs(0)] : reinterpret_cast(storage.data()); exec(it, tin, out, buf, *plan, fct); } }); // end of parallel region fct = T0(1); // factor has been applied, use 1 for remaining axes } } struct ExecC2C { bool forward; template void operator()(const multi_iter &it, const cndarr> &in, ndarr> &out, T *buf, const pocketfft_c &plan, T0 fct) const { copy_input(it, in, buf); plan.exec(buf, fct, forward); copy_output(it, buf, out); } }; template void copy_hartley(const multi_iter &it, const vtype_t *POCKETFFT_RESTRICT src, ndarr &dst) { for (size_t j = 0; j < vlen; ++j) dst[it.oofs(j, 0)] = src[0][j]; size_t i = 1, i1 = 1, i2 = it.length_out() - 1; for (i = 1; i < it.length_out() - 1; i += 2, ++i1, --i2) for (size_t j = 0; j < vlen; ++j) { dst[it.oofs(j, i1)] = src[i][j] + src[i + 1][j]; dst[it.oofs(j, i2)] = src[i][j] - src[i + 1][j]; } if (i < it.length_out()) for (size_t j = 0; j < vlen; ++j) dst[it.oofs(j, i1)] = src[i][j]; } template void copy_hartley(const multi_iter &it, const T *POCKETFFT_RESTRICT src, ndarr &dst) { dst[it.oofs(0)] = src[0]; size_t i = 1, i1 = 1, i2 = it.length_out() - 1; for (i = 1; i < it.length_out() - 1; i += 2, ++i1, --i2) { dst[it.oofs(i1)] = src[i] + src[i + 1]; dst[it.oofs(i2)] = src[i] - src[i + 1]; } if (i < it.length_out()) dst[it.oofs(i1)] = src[i]; } struct ExecHartley { template void operator()(const multi_iter &it, const cndarr &in, ndarr &out, T *buf, const pocketfft_r &plan, T0 fct) const { copy_input(it, in, buf); plan.exec(buf, fct, true); copy_hartley(it, buf, out); } }; struct ExecDcst { bool ortho; int type; bool cosine; template void operator()(const multi_iter &it, const cndarr &in, ndarr &out, T *buf, const Tplan &plan, T0 fct) const { copy_input(it, in, buf); plan.exec(buf, fct, ortho, type, cosine); copy_output(it, buf, out); } }; template POCKETFFT_NOINLINE void general_r2c(const cndarr &in, ndarr> &out, size_t axis, bool forward, T fct, size_t nthreads) { auto plan = get_plan>(in.shape(axis)); size_t len = in.shape(axis); threading::thread_map( util::thread_count(nthreads, in.shape(), axis, VLEN::val), [&] { constexpr auto vlen = VLEN::val; auto storage = alloc_tmp(in.shape(), len, sizeof(T)); multi_iter it(in, out, axis); #ifndef POCKETFFT_NO_VECTORS if (vlen > 1) while (it.remaining() >= vlen) { it.advance(vlen); auto tdatav = reinterpret_cast *>(storage.data()); copy_input(it, in, tdatav); plan->exec(tdatav, fct, true); for (size_t j = 0; j < vlen; ++j) out[it.oofs(j, 0)].Set(tdatav[0][j]); size_t i = 1, ii = 1; if (forward) for (; i < len - 1; i += 2, ++ii) for (size_t j = 0; j < vlen; ++j) out[it.oofs(j, ii)].Set(tdatav[i][j], tdatav[i + 1][j]); else for (; i < len - 1; i += 2, ++ii) for (size_t j = 0; j < vlen; ++j) out[it.oofs(j, ii)].Set(tdatav[i][j], -tdatav[i + 1][j]); if (i < len) for (size_t j = 0; j < vlen; ++j) out[it.oofs(j, ii)].Set(tdatav[i][j]); } #endif while (it.remaining() > 0) { it.advance(1); auto tdata = reinterpret_cast(storage.data()); copy_input(it, in, tdata); plan->exec(tdata, fct, true); out[it.oofs(0)].Set(tdata[0]); size_t i = 1, ii = 1; if (forward) for (; i < len - 1; i += 2, ++ii) out[it.oofs(ii)].Set(tdata[i], tdata[i + 1]); else for (; i < len - 1; i += 2, ++ii) out[it.oofs(ii)].Set(tdata[i], -tdata[i + 1]); if (i < len) out[it.oofs(ii)].Set(tdata[i]); } }); // end of parallel region } template POCKETFFT_NOINLINE void general_c2r(const cndarr> &in, ndarr &out, size_t axis, bool forward, T fct, size_t nthreads) { auto plan = get_plan>(out.shape(axis)); size_t len = out.shape(axis); threading::thread_map( util::thread_count(nthreads, in.shape(), axis, VLEN::val), [&] { constexpr auto vlen = VLEN::val; auto storage = alloc_tmp(out.shape(), len, sizeof(T)); multi_iter it(in, out, axis); #ifndef POCKETFFT_NO_VECTORS if (vlen > 1) while (it.remaining() >= vlen) { it.advance(vlen); auto tdatav = reinterpret_cast *>(storage.data()); for (size_t j = 0; j < vlen; ++j) tdatav[0][j] = in[it.iofs(j, 0)].r; { size_t i = 1, ii = 1; if (forward) for (; i < len - 1; i += 2, ++ii) for (size_t j = 0; j < vlen; ++j) { tdatav[i][j] = in[it.iofs(j, ii)].r; tdatav[i + 1][j] = -in[it.iofs(j, ii)].i; } else for (; i < len - 1; i += 2, ++ii) for (size_t j = 0; j < vlen; ++j) { tdatav[i][j] = in[it.iofs(j, ii)].r; tdatav[i + 1][j] = in[it.iofs(j, ii)].i; } if (i < len) for (size_t j = 0; j < vlen; ++j) tdatav[i][j] = in[it.iofs(j, ii)].r; } plan->exec(tdatav, fct, false); copy_output(it, tdatav, out); } #endif while (it.remaining() > 0) { it.advance(1); auto tdata = reinterpret_cast(storage.data()); tdata[0] = in[it.iofs(0)].r; { size_t i = 1, ii = 1; if (forward) for (; i < len - 1; i += 2, ++ii) { tdata[i] = in[it.iofs(ii)].r; tdata[i + 1] = -in[it.iofs(ii)].i; } else for (; i < len - 1; i += 2, ++ii) { tdata[i] = in[it.iofs(ii)].r; tdata[i + 1] = in[it.iofs(ii)].i; } if (i < len) tdata[i] = in[it.iofs(ii)].r; } plan->exec(tdata, fct, false); copy_output(it, tdata, out); } }); // end of parallel region } struct ExecR2R { bool r2c, forward; template void operator()(const multi_iter &it, const cndarr &in, ndarr &out, T *buf, const pocketfft_r &plan, T0 fct) const { copy_input(it, in, buf); if ((!r2c) && forward) for (size_t i = 2; i < it.length_out(); i += 2) buf[i] = -buf[i]; plan.exec(buf, fct, forward); if (r2c && (!forward)) for (size_t i = 2; i < it.length_out(); i += 2) buf[i] = -buf[i]; copy_output(it, buf, out); } }; template void c2c(const shape_t &shape, const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes, bool forward, const std::complex *data_in, std::complex *data_out, T fct, size_t nthreads = 1) { if (util::prod(shape) == 0) return; util::sanity_check(shape, stride_in, stride_out, data_in == data_out, axes); cndarr> ain(data_in, shape, stride_in); ndarr> aout(data_out, shape, stride_out); general_nd>(ain, aout, axes, fct, nthreads, ExecC2C{forward}); } template void dct(const shape_t &shape, const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes, int type, const T *data_in, T *data_out, T fct, bool ortho, size_t nthreads = 1) { if ((type < 1) || (type > 4)) throw std::invalid_argument("invalid DCT type"); if (util::prod(shape) == 0) return; util::sanity_check(shape, stride_in, stride_out, data_in == data_out, axes); cndarr ain(data_in, shape, stride_in); ndarr aout(data_out, shape, stride_out); const ExecDcst exec{ortho, type, true}; if (type == 1) general_nd>(ain, aout, axes, fct, nthreads, exec); else if (type == 4) general_nd>(ain, aout, axes, fct, nthreads, exec); else general_nd>(ain, aout, axes, fct, nthreads, exec); } template void dst(const shape_t &shape, const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes, int type, const T *data_in, T *data_out, T fct, bool ortho, size_t nthreads = 1) { if ((type < 1) || (type > 4)) throw std::invalid_argument("invalid DST type"); if (util::prod(shape) == 0) return; util::sanity_check(shape, stride_in, stride_out, data_in == data_out, axes); cndarr ain(data_in, shape, stride_in); ndarr aout(data_out, shape, stride_out); const ExecDcst exec{ortho, type, false}; if (type == 1) general_nd>(ain, aout, axes, fct, nthreads, exec); else if (type == 4) general_nd>(ain, aout, axes, fct, nthreads, exec); else general_nd>(ain, aout, axes, fct, nthreads, exec); } template void r2c(const shape_t &shape_in, const stride_t &stride_in, const stride_t &stride_out, size_t axis, bool forward, const T *data_in, std::complex *data_out, T fct, size_t nthreads = 1) { if (util::prod(shape_in) == 0) return; util::sanity_check(shape_in, stride_in, stride_out, false, axis); cndarr ain(data_in, shape_in, stride_in); shape_t shape_out(shape_in); shape_out[axis] = shape_in[axis] / 2 + 1; ndarr> aout(data_out, shape_out, stride_out); general_r2c(ain, aout, axis, forward, fct, nthreads); } template void r2c(const shape_t &shape_in, const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes, bool forward, const T *data_in, std::complex *data_out, T fct, size_t nthreads = 1) { if (util::prod(shape_in) == 0) return; util::sanity_check(shape_in, stride_in, stride_out, false, axes); r2c(shape_in, stride_in, stride_out, axes.back(), forward, data_in, data_out, fct, nthreads); if (axes.size() == 1) return; shape_t shape_out(shape_in); shape_out[axes.back()] = shape_in[axes.back()] / 2 + 1; auto newaxes = shape_t{axes.begin(), --axes.end()}; c2c(shape_out, stride_out, stride_out, newaxes, forward, data_out, data_out, T(1), nthreads); } template void c2r(const shape_t &shape_out, const stride_t &stride_in, const stride_t &stride_out, size_t axis, bool forward, const std::complex *data_in, T *data_out, T fct, size_t nthreads = 1) { if (util::prod(shape_out) == 0) return; util::sanity_check(shape_out, stride_in, stride_out, false, axis); shape_t shape_in(shape_out); shape_in[axis] = shape_out[axis] / 2 + 1; cndarr> ain(data_in, shape_in, stride_in); ndarr aout(data_out, shape_out, stride_out); general_c2r(ain, aout, axis, forward, fct, nthreads); } template void c2r(const shape_t &shape_out, const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes, bool forward, const std::complex *data_in, T *data_out, T fct, size_t nthreads = 1) { if (util::prod(shape_out) == 0) return; if (axes.size() == 1) return c2r(shape_out, stride_in, stride_out, axes[0], forward, data_in, data_out, fct, nthreads); util::sanity_check(shape_out, stride_in, stride_out, false, axes); auto shape_in = shape_out; shape_in[axes.back()] = shape_out[axes.back()] / 2 + 1; auto nval = util::prod(shape_in); stride_t stride_inter(shape_in.size()); stride_inter.back() = sizeof(cmplx); for (int i = int(shape_in.size()) - 2; i >= 0; --i) stride_inter[size_t(i)] = stride_inter[size_t(i + 1)] * ptrdiff_t(shape_in[size_t(i + 1)]); arr> tmp(nval); auto newaxes = shape_t{axes.begin(), --axes.end()}; c2c(shape_in, stride_in, stride_inter, newaxes, forward, data_in, tmp.data(), T(1), nthreads); c2r(shape_out, stride_inter, stride_out, axes.back(), forward, tmp.data(), data_out, fct, nthreads); } template void r2r_fftpack(const shape_t &shape, const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes, bool real2hermitian, bool forward, const T *data_in, T *data_out, T fct, size_t nthreads = 1) { if (util::prod(shape) == 0) return; util::sanity_check(shape, stride_in, stride_out, data_in == data_out, axes); cndarr ain(data_in, shape, stride_in); ndarr aout(data_out, shape, stride_out); general_nd>(ain, aout, axes, fct, nthreads, ExecR2R{real2hermitian, forward}); } template void r2r_separable_hartley(const shape_t &shape, const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes, const T *data_in, T *data_out, T fct, size_t nthreads = 1) { if (util::prod(shape) == 0) return; util::sanity_check(shape, stride_in, stride_out, data_in == data_out, axes); cndarr ain(data_in, shape, stride_in); ndarr aout(data_out, shape, stride_out); general_nd>(ain, aout, axes, fct, nthreads, ExecHartley{}, false); } template void r2r_genuine_hartley(const shape_t &shape, const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes, const T *data_in, T *data_out, T fct, size_t nthreads = 1) { if (util::prod(shape) == 0) return; if (axes.size() == 1) return r2r_separable_hartley(shape, stride_in, stride_out, axes, data_in, data_out, fct, nthreads); util::sanity_check(shape, stride_in, stride_out, data_in == data_out, axes); shape_t tshp(shape); tshp[axes.back()] = tshp[axes.back()] / 2 + 1; arr> tdata(util::prod(tshp)); stride_t tstride(shape.size()); tstride.back() = sizeof(std::complex); for (size_t i = tstride.size() - 1; i > 0; --i) tstride[i - 1] = tstride[i] * ptrdiff_t(tshp[i]); r2c(shape, stride_in, tstride, axes, true, data_in, tdata.data(), fct, nthreads); cndarr> atmp(tdata.data(), tshp, tstride); ndarr aout(data_out, shape, stride_out); simple_iter iin(atmp); rev_iter iout(aout, axes); while (iin.remaining() > 0) { auto v = atmp[iin.ofs()]; aout[iout.ofs()] = v.r + v.i; aout[iout.rev_ofs()] = v.r - v.i; iin.advance(); iout.advance(); } } } // namespace detail using detail::FORWARD; using detail::BACKWARD; using detail::shape_t; using detail::stride_t; using detail::c2c; using detail::c2r; using detail::r2c; using detail::r2r_fftpack; using detail::r2r_separable_hartley; using detail::r2r_genuine_hartley; using detail::dct; using detail::dst; } // namespace pocketfft #undef POCKETFFT_NOINLINE #undef POCKETFFT_RESTRICT #endif // POCKETFFT_HDRONLY_H #endif // PYTHONIC_INCLUDE_NUMPY_FFT_POCKETFFT_HPP pythran-0.10.0+ds2/pythran/pythonic/numpy/fft/rfft.hpp000066400000000000000000000076321416264035500227020ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FFT_RFFT_HPP #define PYTHONIC_NUMPY_FFT_RFFT_HPP #include "pythonic/include/numpy/fft/rfft.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/include/utils/array_helper.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/numpy/fft/c2c.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace fft { template types::ndarray::value, std::complex>::type, types::array::value>> rfft(types::ndarray const &in_array, types::none_type n, long axis, types::str const &norm) { return r2c(in_array, -1, axis, norm, true, false); } template types::ndarray::value, std::complex>::type, types::array::value>> rfft(types::ndarray const &in_array, types::none_type n, long axis, types::none_type norm) { return r2c(in_array, -1, axis, "", true, false); } template types::ndarray::value, std::complex>::type, types::array::value>> rfft(types::ndarray const &in_array, long n, long axis, types::none_type norm) { return r2c(in_array, n, axis, "", true, false); } template types::ndarray::value, std::complex>::type, types::array::value>> rfft(types::ndarray const &in_array, long n, long axis, types::str const &norm) { return r2c(in_array, n, axis, norm, true, false); } template types::ndarray::value, std::complex>::type, types::array::value>> rfft(types::ndarray const &in_array, types::none_type n, long axis, types::str const &norm) { auto tmp_array = _copy_to_double(in_array); return r2c(tmp_array, -1, axis, norm, true, false); } template types::ndarray::value, std::complex>::type, types::array::value>> rfft(types::ndarray const &in_array, types::none_type n, long axis, types::none_type norm) { auto tmp_array = _copy_to_double(in_array); return r2c(tmp_array, -1, axis, "", true, false); } template types::ndarray::value, std::complex>::type, types::array::value>> rfft(types::ndarray const &in_array, long n, long axis, types::none_type norm) { auto tmp_array = _copy_to_double(in_array); return r2c(tmp_array, n, axis, "", true, false); } template types::ndarray::value, std::complex>::type, types::array::value>> rfft(types::ndarray const &in_array, long n, long axis, types::str const &norm) { auto tmp_array = _copy_to_double(in_array); return r2c(tmp_array, n, axis, norm, true, false); } NUMPY_EXPR_TO_NDARRAY0_IMPL(rfft); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/fill_diagonal.hpp000066400000000000000000000012561416264035500237420ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FILL_DIAGONAL_HPP #define PYTHONIC_NUMPY_FILL_DIAGONAL_HPP #include "pythonic/include/numpy/fill_diagonal.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/NoneType.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::none_type fill_diagonal(E &&expr, typename std::decay::type::dtype fill_value) { constexpr auto N = std::decay::type::value; types::array indices; for (long i = 0, n = sutils::min(expr); i < n; ++i) { std::fill(indices.begin(), indices.end(), i); expr.fast(indices) = fill_value; } return {}; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/finfo.hpp000066400000000000000000000006001416264035500222470ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FINFO_HPP #define PYTHONIC_NUMPY_FINFO_HPP #include "pythonic/include/numpy/finfo.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/finfo.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::finfo finfo(dtype d) { return types::finfo(); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/fix.hpp000066400000000000000000000006301416264035500217370ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FIX_HPP #define PYTHONIC_NUMPY_FIX_HPP #include "pythonic/include/numpy/fix.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME fix #define NUMPY_NARY_FUNC_SYM std::trunc #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/flatnonzero.hpp000066400000000000000000000022151416264035500235130ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FLATNONZERO_HPP #define PYTHONIC_NUMPY_FLATNONZERO_HPP #include "pythonic/include/numpy/flatnonzero.hpp" #include "pythonic/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace { template void _flatnonzero(I begin, I end, O &out, long &i, utils::int_<1>) { for (; begin != end; ++begin, ++i) if (*begin) *out++ = i; } template void _flatnonzero(I begin, I end, O &out, long &i, utils::int_) { for (; begin != end; ++begin) _flatnonzero((*begin).begin(), (*begin).end(), out, i, utils::int_()); } } template types::ndarray> flatnonzero(E const &expr) { long n = expr.flat_size(); utils::shared_ref> buffer(n); long *iter = buffer->data; long i = 0; _flatnonzero(expr.begin(), expr.end(), iter, i, utils::int_()); types::pshape shape = iter - buffer->data; return types::ndarray>(std::move(buffer), shape); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/flip.hpp000066400000000000000000000016051416264035500221060ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FLIP_HPP #define PYTHONIC_NUMPY_FLIP_HPP #include "pythonic/include/numpy/flip.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_conversion.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { template auto flip(E const &expr, S const &slices, utils::index_sequence) -> decltype(expr(slices[I]...)) { return expr(slices[I]...); } } template auto flip(E const &expr, long axis) -> decltype(details::flip(expr, std::array{}, utils::make_index_sequence{})) { std::array slices; slices[axis].step = -1; return details::flip(expr, slices, utils::make_index_sequence{}); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/fliplr.hpp000066400000000000000000000011551416264035500224440ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FLIPLR_HPP #define PYTHONIC_NUMPY_FLIPLR_HPP #include "pythonic/include/numpy/fliplr.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto fliplr(E &&expr) -> decltype(std::forward(expr)( types::contiguous_slice{builtins::None, builtins::None}, types::slice{builtins::None, builtins::None, -1})) { return std::forward(expr)( types::contiguous_slice{builtins::None, builtins::None}, types::slice{builtins::None, builtins::None, -1}); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/flipud.hpp000066400000000000000000000007551416264035500224440ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FLIPUD_HPP #define PYTHONIC_NUMPY_FLIPUD_HPP #include "pythonic/include/numpy/flipud.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto flipud(E &&expr) -> decltype( std::forward(expr)[types::slice{builtins::None, builtins::None, -1}]) { return std::forward( expr)[types::slice{builtins::None, builtins::None, -1}]; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/float128.hpp000066400000000000000000000012371416264035500225150ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FLOAT128_HPP #define PYTHONIC_NUMPY_FLOAT128_HPP #include "pythonic/include/numpy/float128.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { long double float128() { return {}; } template long double float128(V v) { return static_cast(v); } } #define NUMPY_NARY_FUNC_NAME float128 #define NUMPY_NARY_FUNC_SYM details::float128 #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/float32.hpp000066400000000000000000000012141416264035500224220ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FLOAT32_HPP #define PYTHONIC_NUMPY_FLOAT32_HPP #include "pythonic/include/numpy/float32.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { float float32() { return float(); } template float float32(V v) { return static_cast(v); } } #define NUMPY_NARY_FUNC_NAME float32 #define NUMPY_NARY_FUNC_SYM details::float32 #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/float64.hpp000066400000000000000000000012171416264035500224320ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FLOAT64_HPP #define PYTHONIC_NUMPY_FLOAT64_HPP #include "pythonic/include/numpy/float64.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { double float64() { return double(); } template double float64(V v) { return static_cast(v); } } #define NUMPY_NARY_FUNC_NAME float64 #define NUMPY_NARY_FUNC_SYM details::float64 #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/float_.hpp000066400000000000000000000005361416264035500224220ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FLOAT_HPP #define PYTHONIC_NUMPY_FLOAT_HPP #include "pythonic/include/numpy/float_.hpp" #include "pythonic/include/numpy/float64.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME float_ #define NUMPY_NARY_FUNC_SYM details::float64 #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/floor.hpp000066400000000000000000000006411416264035500222740ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FLOOR_HPP #define PYTHONIC_NUMPY_FLOOR_HPP #include "pythonic/include/numpy/floor.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME floor #define NUMPY_NARY_FUNC_SYM xsimd::floor #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/floor_divide.hpp000066400000000000000000000010221416264035500236120ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FLOORDIVIDE_HPP #define PYTHONIC_NUMPY_FLOORDIVIDE_HPP #include "pythonic/include/numpy/floor_divide.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/numpy/floor.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME floor_divide #define NUMPY_NARY_FUNC_SYM wrapper::divfloor #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/floor_divide/000077500000000000000000000000001416264035500231065ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/floor_divide/accumulate.hpp000066400000000000000000000002761416264035500257470ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FLOOR_DIVIDE_ACCUMULATE_HPP #define PYTHONIC_NUMPY_FLOOR_DIVIDE_ACCUMULATE_HPP #define UFUNC_NAME floor_divide #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/fmax.hpp000066400000000000000000000002321416264035500221020ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FMAX_HPP #define PYTHONIC_NUMPY_FMAX_HPP #include "pythonic/include/numpy/fmin.hpp" #include "pythonic/numpy/maximum.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/fmax/000077500000000000000000000000001416264035500213745ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/fmax/accumulate.hpp000066400000000000000000000002461416264035500242320ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FMAX_ACCUMULATE_HPP #define PYTHONIC_NUMPY_FMAX_ACCUMULATE_HPP #define UFUNC_NAME fmax #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/fmax/reduce.hpp000066400000000000000000000004121416264035500233510ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FMAX_REDUCE_HPP #define PYTHONIC_NUMPY_FMAX_REDUCE_HPP #define UFUNC_NAME fmax #define UFUNC_INAME imax #include "pythonic/include/numpy/fmax/reduce.hpp" #include "pythonic/numpy/ufunc_reduce.hpp" #undef UFUNC_NAME #undef UFUNC_INAME #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/fmin.hpp000066400000000000000000000002321416264035500221000ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FMIN_HPP #define PYTHONIC_NUMPY_FMIN_HPP #include "pythonic/include/numpy/fmin.hpp" #include "pythonic/numpy/minimum.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/fmin/000077500000000000000000000000001416264035500213725ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/fmin/accumulate.hpp000066400000000000000000000002461416264035500242300ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FMIN_ACCUMULATE_HPP #define PYTHONIC_NUMPY_FMIN_ACCUMULATE_HPP #define UFUNC_NAME fmin #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/fmin/reduce.hpp000066400000000000000000000004121416264035500233470ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FMIN_REDUCE_HPP #define PYTHONIC_NUMPY_FMIN_REDUCE_HPP #define UFUNC_NAME fmin #define UFUNC_INAME imin #include "pythonic/include/numpy/fmin/reduce.hpp" #include "pythonic/numpy/ufunc_reduce.hpp" #undef UFUNC_NAME #undef UFUNC_INAME #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/fmod.hpp000066400000000000000000000007121416264035500220770ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FMOD_HPP #define PYTHONIC_NUMPY_FMOD_HPP #include "pythonic/include/numpy/fmod.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME fmod #define NUMPY_NARY_FUNC_SYM xsimd::fmod #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/fmod/000077500000000000000000000000001416264035500213665ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/fmod/accumulate.hpp000066400000000000000000000002461416264035500242240ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FMOD_ACCUMULATE_HPP #define PYTHONIC_NUMPY_FMOD_ACCUMULATE_HPP #define UFUNC_NAME fmod #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/frexp.hpp000066400000000000000000000035201416264035500222760ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FREXP_HPP #define PYTHONIC_NUMPY_FREXP_HPP #include "pythonic/include/numpy/frexp.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_conversion.hpp" #include "pythonic/types/traits.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if::value, std::tuple>::type frexp(T val) { int exp; T significand = std::frexp(val, &exp); return std::make_tuple(significand, exp); } namespace { template void _frexp(E begin, E end, F significands_iter, G exps_iter, utils::int_<1>) { for (; begin != end; ++begin, ++significands_iter, ++exps_iter) *significands_iter = std::frexp(*begin, exps_iter); } template void _frexp(E begin, E end, F significands_iter, G exps_iter, utils::int_) { for (; begin != end; ++begin, ++significands_iter, ++exps_iter) _frexp((*begin).begin(), (*begin).end(), (*significands_iter).begin(), (*exps_iter).begin(), utils::int_()); } } template typename std::enable_if< !types::is_dtype::value, std::tuple, types::ndarray>>::type frexp(E const &arr) { auto arr_shape = sutils::getshape(arr); types::ndarray significands( arr_shape, builtins::None); types::ndarray exps(arr_shape, builtins::None); _frexp(arr.begin(), arr.end(), significands.begin(), exps.begin(), utils::int_()); return std::make_tuple(significands, exps); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/fromfile.hpp000066400000000000000000000027731416264035500227660ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FROMFILE_HPP #define PYTHONIC_NUMPY_FROMFILE_HPP #include "pythonic/include/numpy/fromfile.hpp" #include "pythonic/builtins/FileNotFoundError.hpp" #include "pythonic/builtins/NotImplementedError.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> fromfile(types::str const &file_name, dtype d, long count, types::str const &sep, long offset) { if (sep.size() != 0) throw types::NotImplementedError( "Sep input is not implemented yet, should be left empty"); std::fstream fs; fs.open(file_name.c_str(), std::fstream::in | std::fstream::binary); if (fs.rdstate() != std::fstream::goodbit) { throw types::FileNotFoundError("Could not find file " + file_name); } fs.seekp(offset, std::fstream::beg); auto n1 = fs.tellp(); fs.seekp(0, std::fstream::end); long maxCount = (fs.tellp() - n1) / sizeof(typename dtype::type); fs.seekp(offset, std::fstream::beg); if (count < 0) { count = maxCount; } else if (count > maxCount) { count = maxCount; } types::ndarray> res( types::pshape{count}, types::none_type{}); fs.read((char *)res.buffer, sizeof(typename dtype::type) * count); return res; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/fromfunction.hpp000066400000000000000000000054561416264035500236750ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FROMFUNCTION_HPP #define PYTHONIC_NUMPY_FROMFUNCTION_HPP #include "pythonic/include/numpy/fromfunction.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/utils/tags.hpp" PYTHONIC_NS_BEGIN namespace numpy { template struct fromfunction_helper; template template types::ndarray::type>::type>::type, pS> fromfunction_helper:: operator()(F &&f, pS const &shape, dtype d) { types::ndarray::type>::type>::type, pS> out(shape, builtins::None); long n = out.template shape<0>(); #ifdef _OPENMP if (std::is_same::value && n >= PYTHRAN_OPENMP_MIN_ITERATION_COUNT) #pragma omp parallel for for (long i = 0; i < n; ++i) out[i] = f(i); else #endif for (long i = 0; i < n; ++i) out[i] = f(i); return out; } template template types::ndarray< typename std::remove_cv::type>::type>::type, pS> fromfunction_helper:: operator()(F &&f, pS const &shape, dtype d) { types::ndarray< typename std::remove_cv::type>::type>::type, pS> out(shape, builtins::None); long n = out.template shape<0>(); long m = out.template shape<1>(); #ifdef _OPENMP if (std::is_same::value && (m * n) >= PYTHRAN_OPENMP_MIN_ITERATION_COUNT) #pragma omp parallel for collapse(2) for (long i = 0; i < n; ++i) for (long j = 0; j < m; ++j) out[i][j] = f(i, j); else #endif for (long i = 0; i < n; ++i) for (long j = 0; j < m; ++j) out[i][j] = f(i, j); return out; } template auto fromfunction(F &&f, pS const &shape, dtype d) -> decltype(fromfunction_helper::value, dtype, typename pythonic::purity_of::type>()( std::forward(f), shape)) { return fromfunction_helper::value, dtype, typename pythonic::purity_of::type>()( std::forward(f), shape); } /* TODO: must specialize for higher order */ } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/fromiter.hpp000066400000000000000000000017421416264035500230050ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FROMITER_HPP #define PYTHONIC_NUMPY_FROMITER_HPP #include "pythonic/include/numpy/fromiter.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray::type>::type::value_type, 1> fromiter(Iterable &&iterable, dtype d, long count) { using T = typename std::remove_cv< typename std::remove_reference::type>::type::value_type; if (count < 0) { types::list buffer(0); std::copy(iterable.begin(), iterable.end(), std::back_inserter(buffer)); return {buffer}; } else { utils::shared_ref> buffer(count); std::copy_n(iterable.begin(), count, buffer->data); types::array shape = {count}; return {buffer, shape}; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/fromstring.hpp000066400000000000000000000030331416264035500233430ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FROMSTRING_HPP #define PYTHONIC_NUMPY_FROMSTRING_HPP #include "pythonic/include/numpy/fromstring.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/list.hpp" #include "pythonic/types/str.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> fromstring(types::str const &string, dtype d, long count, types::str const &sep) { if (sep) { types::list res(0); if (count < 0) count = std::numeric_limits::max(); else res.reserve(count); size_t current; size_t next = -1; long numsplit = 0; do { current = next + 1; next = string.find_first_of(sep, current); typename dtype::type item; std::istringstream iss(string.substr(current, next - current).chars()); iss >> item; res.push_back(item); } while (next != types::str::npos && ++numsplit < count); return {res}; } else { if (count < 0) count = string.size(); types::pshape shape = count; utils::shared_ref> buffer( std::get<0>(shape)); auto const *tstring = reinterpret_cast(string.c_str()); std::copy(tstring, tstring + std::get<0>(shape), buffer->data); return {buffer, shape}; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/full.hpp000066400000000000000000000033251416264035500221170ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FULL_HPP #define PYTHONIC_NUMPY_FULL_HPP #include "pythonic/include/numpy/full.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> full(pS const &shape, F fill_value, dtype d) { return {(sutils::shape_t)shape, typename dtype::type(fill_value)}; } template types::ndarray> full(long size, F fill_value, dtype d) { return full(types::pshape(size), fill_value, d); } template types::ndarray>> full(std::integral_constant, F fill_value, dtype d) { return full(types::pshape>({}), fill_value, d); } template types::ndarray> full(pS const &shape, F fill_value, types::none_type) { return {(sutils::shape_t)shape, fill_value}; } template types::ndarray> full(long size, F fill_value, types::none_type nt) { return full(types::pshape(size), fill_value, nt); } template types::ndarray>> full(std::integral_constant, F fill_value, types::none_type nt) { return full(types::pshape>({}), fill_value, nt); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/full_like.hpp000066400000000000000000000014561416264035500231260ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_FULLLIKE_HPP #define PYTHONIC_NUMPY_FULLLIKE_HPP #include "pythonic/include/numpy/full_like.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/numpy/full.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto full_like(E const &expr, F fill_value, dtype d) -> decltype(full(sutils::getshape(expr), fill_value, d)) { return full(sutils::getshape(expr), fill_value, d); } template auto full_like(E const &expr, F fill_value, types::none_type) -> decltype(full(sutils::getshape(expr), fill_value, types::dtype_t())) { return full(sutils::getshape(expr), fill_value, types::dtype_t()); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/greater.hpp000066400000000000000000000010101416264035500225730ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_GREATER_HPP #define PYTHONIC_NUMPY_GREATER_HPP #include "pythonic/include/numpy/greater.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/operator_/gt.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME greater #define NUMPY_NARY_FUNC_SYM pythonic::operator_::gt #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/greater/000077500000000000000000000000001416264035500220725ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/greater/accumulate.hpp000066400000000000000000000002571416264035500247320ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_GREATER_ACCUMULATE_HPP #define PYTHONIC_NUMPY_GREATER_ACCUMULATE_HPP #define UFUNC_NAME greater #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/greater_equal.hpp000066400000000000000000000010361416264035500237720ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_GREATEREQUAL_HPP #define PYTHONIC_NUMPY_GREATEREQUAL_HPP #include "pythonic/include/numpy/greater_equal.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/operator_/ge.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME greater_equal #define NUMPY_NARY_FUNC_SYM pythonic::operator_::ge #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/greater_equal/000077500000000000000000000000001416264035500232615ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/greater_equal/accumulate.hpp000066400000000000000000000003011416264035500261070ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_GREATER_EQUAL_ACCUMULATE_HPP #define PYTHONIC_NUMPY_GREATER_EQUAL_ACCUMULATE_HPP #define UFUNC_NAME greater_equal #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/heaviside.hpp000066400000000000000000000012431416264035500231130ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_HEAVISIDE_HPP #define PYTHONIC_NUMPY_HEAVISIDE_HPP #include "pythonic/include/numpy/cos.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { template T1 heaviside(T0 x0, T1 x1) { if (x0 == 0) return x1; if (x0 < 0) return 0; if (x0 > 0) return 1; return x0; // NaN } } #define NUMPY_NARY_FUNC_NAME heaviside #define NUMPY_NARY_FUNC_SYM details::heaviside #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/heaviside/000077500000000000000000000000001416264035500224025ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/heaviside/accumulate.hpp000066400000000000000000000002651416264035500252410ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_HEAVISIDE_ACCUMULATE_HPP #define PYTHONIC_NUMPY_HEAVISIDE_ACCUMULATE_HPP #define UFUNC_NAME heaviside #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/hstack.hpp000066400000000000000000000010721416264035500224270ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_HSTACK_HPP #define PYTHONIC_NUMPY_HSTACK_HPP #include #include PYTHONIC_NS_BEGIN namespace numpy { template auto hstack(ArraySequence &&seq) -> decltype(concatenate(std::forward(seq), 1)) { auto constexpr concatenate_axis = (decltype(concatenate(std::forward(seq), 1))::value != 1); return concatenate(std::forward(seq), concatenate_axis); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/hypot.hpp000066400000000000000000000007171416264035500223220ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_HYPOT_HPP #define PYTHONIC_NUMPY_HYPOT_HPP #include "pythonic/include/numpy/hypot.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME hypot #define NUMPY_NARY_FUNC_SYM xsimd::hypot #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/hypot/000077500000000000000000000000001416264035500216045ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/hypot/accumulate.hpp000066400000000000000000000002511416264035500244360ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_HYPOT_ACCUMULATE_HPP #define PYTHONIC_NUMPY_HYPOT_ACCUMULATE_HPP #define UFUNC_NAME hypot #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/identity.hpp000066400000000000000000000005261416264035500230060ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_IDENTITY_HPP #define PYTHONIC_NUMPY_IDENTITY_HPP #include "pythonic/include/numpy/identity.hpp" #include "pythonic/numpy/eye.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto identity(long n, dtype d) -> decltype(eye(n, n, 0, d)) { return eye(n, n, 0, d); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/imag.hpp000066400000000000000000000013011416264035500220620ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_IMAG_HPP #define PYTHONIC_NUMPY_IMAG_HPP #include "pythonic/include/numpy/imag.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/numpy/asarray.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/list.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto imag(E &&expr) -> decltype(builtins::getattr(types::attr::IMAG{}, std::forward(expr))) { return builtins::getattr(types::attr::IMAG{}, std::forward(expr)); } template auto imag(types::list const &expr) -> decltype(imag(numpy::functor::asarray{}(expr))) { return imag(numpy::functor::asarray{}(expr)); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/indices.hpp000066400000000000000000000025571416264035500226010ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_INDICES_HPP #define PYTHONIC_NUMPY_INDICES_HPP #include "pythonic/include/numpy/indices.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray< typename dtype::type, sutils::push_front_t< pS, std::integral_constant::value>>> indices(pS const &shape, dtype) { auto constexpr N = std::tuple_size::value; sutils::push_front_t> oshape; sutils::scopy_shape<1, -1>(oshape, shape, utils::make_index_sequence()); types::ndarray>> out(oshape, builtins::None); typename dtype::type *iters[N]; for (size_t n = 0; n < N; ++n) iters[n] = out[n].buffer; size_t lens[N]; lens[0] = out.flat_size() / std::get<0>(shape); auto ashape = sutils::array(shape); for (size_t n = 1; n < N; ++n) lens[n] = lens[n - 1] / ashape[n]; for (long i = 0, n = out.flat_size() / N; i < n; ++i) { long mult = 1; for (long n = N - 1; n > 0; n--) { *(iters[n]++) = (i / mult) % ashape[n]; mult *= ashape[n]; } *(iters[0]++) = i / mult; } return out; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/inf.hpp000066400000000000000000000001611416264035500217240ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_INF_HPP #define PYTHONIC_NUMPY_INF_HPP #include "pythonic/include/numpy/inf.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/inner.hpp000066400000000000000000000002311416264035500222610ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_INNER_HPP #define PYTHONIC_NUMPY_INNER_HPP #include "pythonic/include/numpy/inner.hpp" #include "pythonic/numpy/dot.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/insert.hpp000066400000000000000000000050761416264035500224660ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_INSERT_HPP #define PYTHONIC_NUMPY_INSERT_HPP #include "pythonic/include/numpy/insert.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/traits.hpp" #include "pythonic/builtins/None.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if::value && types::is_iterable::value, types::ndarray>>::type insert(types::ndarray in, I const &indices, F const &data, types::none_type axis) { types::ndarray> out( types::pshape(long( in.flat_size() + std::min(indices.flat_size(), data.flat_size()))), builtins::None); auto out_iter = out.fbegin(); auto in_iter = in.fbegin(); auto data_iter = data.begin(); for (long index : indices) { out_iter = std::copy(in_iter, in.fbegin() + index, out_iter); *out_iter++ = *data_iter++; in_iter = in.fbegin() + index; } std::copy(in_iter, in.fend(), out_iter); return out; } template typename std::enable_if::value && !types::is_iterable::value, types::ndarray>>::type insert(types::ndarray in, I const &indices, F const &data, types::none_type axis) { return insert(in, indices, types::list({data}), axis); } template typename std::enable_if::value && types::is_iterable::value, types::ndarray>>::type insert(types::ndarray in, I const &indices, F const &data, types::none_type axis) { return insert(in, types::list({indices}), {data}, axis); } template typename std::enable_if::value && !types::is_iterable::value, types::ndarray>>::type insert(types::ndarray in, I const &indices, F const &data, types::none_type axis) { return insert(in, types::list({indices}), types::list({data}), axis); } template E insert(E, Args const &...) { throw std::runtime_error("insert only partially supported"); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/int16.hpp000066400000000000000000000011601416264035500221110ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_INT16_HPP #define PYTHONIC_NUMPY_INT16_HPP #include "pythonic/include/numpy/int16.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { int16_t int16() { return int16_t(); } template int16_t int16(V v) { return v; } } #define NUMPY_NARY_FUNC_NAME int16 #define NUMPY_NARY_FUNC_SYM details::int16 #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/int32.hpp000066400000000000000000000011601416264035500221070ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_INT32_HPP #define PYTHONIC_NUMPY_INT32_HPP #include "pythonic/include/numpy/int32.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { int32_t int32() { return int32_t(); } template int32_t int32(V v) { return v; } } #define NUMPY_NARY_FUNC_NAME int32 #define NUMPY_NARY_FUNC_SYM details::int32 #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/int64.hpp000066400000000000000000000011601416264035500221140ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_INT64_HPP #define PYTHONIC_NUMPY_INT64_HPP #include "pythonic/include/numpy/int64.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { int64_t int64() { return int64_t(); } template int64_t int64(V v) { return v; } } #define NUMPY_NARY_FUNC_NAME int64 #define NUMPY_NARY_FUNC_SYM details::int64 #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/int8.hpp000066400000000000000000000011461416264035500220360ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_INT8_HPP #define PYTHONIC_NUMPY_INT8_HPP #include "pythonic/include/numpy/int8.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { int8_t int8() { return int8_t(); } template int8_t int8(V v) { return v; } } #define NUMPY_NARY_FUNC_NAME int8 #define NUMPY_NARY_FUNC_SYM details::int8 #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/int_.hpp000066400000000000000000000011341416264035500221020ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_INT__HPP #define PYTHONIC_NUMPY_INT__HPP #include "pythonic/include/numpy/int_.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { long int_() { return {}; } template long int_(V v) { return v; } } #define NUMPY_NARY_FUNC_NAME int_ #define NUMPY_NARY_FUNC_SYM details::int_ #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/intc.hpp000066400000000000000000000011321416264035500221040ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_INTC_HPP #define PYTHONIC_NUMPY_INTC_HPP #include "pythonic/include/numpy/intc.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { int intc() { return {}; } template int intc(V v) { return v; } } #define NUMPY_NARY_FUNC_NAME intc #define NUMPY_NARY_FUNC_SYM details::intc #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/interp.hpp000066400000000000000000000101511416264035500224510ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_INTERP_HPP #define PYTHONIC_NUMPY_INTERP_HPP #include "pythonic/include/numpy/interp.hpp" #include "pythonic/include/numpy/remainder.hpp" #include "pythonic/include/numpy/argsort.hpp" #include #include #include "pythonic/numpy/argsort.hpp" #include #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_conversion.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/numpy/interp_core.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> interp(T1 x, T2 xp, T3 fp, t1 _left, t2 _right, t3 _period) { double left = _left; double right = _right; double period = _period; // Todo: what to do if this condition isn't satisfied? Can't use a statis // assert because the size isn't known at compile time. assert(xp.template shape<0>() == fp.template shape<0>()); double outVal(0); types::ndarray> out = { (long)(x.template shape<0>()), outVal}; if (period) { auto x_rem = pythonic::numpy::functor::remainder{}(x, period); auto xp_rem = pythonic::numpy::functor::remainder{}(xp, period); auto idx = pythonic::numpy::functor::argsort{}(xp_rem); auto xp_sorted = xp_rem[idx]; auto fp_sorted = fp[idx]; auto left_pad_xp = types::ndarray>( types::pshape(1), xp_sorted[-1] - period); auto right_pad_xp = types::ndarray>( types::pshape(1), xp_sorted[0] + period); auto new_xp = pythonic::numpy::functor::concatenate{}( pythonic::types::make_tuple(left_pad_xp, xp_sorted, right_pad_xp)); auto left_pad_fp = types::ndarray>( types::pshape(1), fp_sorted[-1]); auto right_pad_fp = types::ndarray>( types::pshape(1), fp_sorted[0]); auto new_fp = pythonic::numpy::functor::concatenate{}( pythonic::types::make_tuple(left_pad_fp, fp_sorted, right_pad_fp)); auto lenxp = new_xp.size(); auto lenx = x_rem.size(); do_interp(x_rem, new_xp, new_fp, out, lenxp, lenx, 0., 0.); } else { auto lenxp = xp.size(); auto lenx = x.size(); do_interp(x, xp, fp, out, lenxp, lenx, left, right); } return out; } // No parameter specified template types::ndarray> interp(T1 x, T2 xp, T3 fp, types::none_type left, types::none_type right, types::none_type period) { auto _left = fp[0]; auto _right = fp[-1]; return interp(x, xp, fp, _left, _right, 0.); } // left specified template types::ndarray> interp(T1 x, T2 xp, T3 fp, t1 left, types::none_type right, types::none_type period) { auto _right = fp[-1]; return interp(x, xp, fp, left, _right, 0.); } // right specified template types::ndarray> interp(T1 x, T2 xp, T3 fp, types::none_type left, t1 right, types::none_type period) { auto _left = fp[0]; return interp(x, xp, fp, _left, right, 0.); } // period specified template types::ndarray> interp(T1 x, T2 xp, T3 fp, types::none_type left, types::none_type right, t1 period) { assert(period != 0); return interp(x, xp, fp, 0., 0., period); } // left and right specified template types::ndarray> interp(T1 x, T2 xp, T3 fp, t1 left, t2 right, types::none_type period) { return interp(x, xp, fp, left, right, 0.); } NUMPY_EXPR_TO_NDARRAY0_IMPL(interp); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/interp_core.hpp000066400000000000000000000160161416264035500234670ustar00rootroot00000000000000// // From NumpySrc/numpy/core/src/multiarray/compiled_base.c /** @brief find index of a sorted array such that arr[i] <= key < arr[i + 1]. * * If an starting index guess is in-range, the array values around this * index are first checked. This allows for repeated calls for well-ordered * keys (a very common case) to use the previous index as a very good guess. * * If the guess value is not useful, bisection of the array is used to * find the index. If there is no such index, the return values are: * key < arr[0] -- -1 * key == arr[len - 1] -- len - 1 * key > arr[len - 1] -- len * The array is assumed contiguous and sorted in ascending order. * * @param key key value. * @param arr contiguous sorted array to be searched. * @param len length of the array. * @param guess initial guess of index * @return index */ #include "pythonic/utils/functor.hpp" #include "pythonic/numpy/isnan.hpp" #define LIKELY_IN_CACHE_SIZE 8 template static npy_intp binary_search_with_guess(const npy_double key, const T &arr, npy_intp len, npy_intp guess) { npy_intp imin = 0; npy_intp imax = len; /* Handle keys outside of the arr range first */ if (key > arr[len - 1]) { return len; } else if (key < arr[0]) { return -1; } /* * If len <= 4 use linear search. * From above we know key >= arr[0] when we start. */ if (len <= 4) { npy_intp i; for (i = 1; i < len && key >= arr[i]; ++i) ; return i - 1; } if (guess > len - 3) { guess = len - 3; } if (guess < 1) { guess = 1; } /* check most likely values: guess - 1, guess, guess + 1 */ if (key < arr[guess]) { if (key < arr[guess - 1]) { imax = guess - 1; /* last attempt to restrict search to items in cache */ if (guess > LIKELY_IN_CACHE_SIZE && key >= arr[guess - LIKELY_IN_CACHE_SIZE]) { imin = guess - LIKELY_IN_CACHE_SIZE; } } else { /* key >= arr[guess - 1] */ return guess - 1; } } else { /* key >= arr[guess] */ if (key < arr[guess + 1]) { return guess; } else { /* key >= arr[guess + 1] */ if (key < arr[guess + 2]) { return guess + 1; } else { /* key >= arr[guess + 2] */ imin = guess + 2; /* last attempt to restrict search to items in cache */ if (guess < len - LIKELY_IN_CACHE_SIZE - 1 && key < arr[guess + LIKELY_IN_CACHE_SIZE]) { imax = guess + LIKELY_IN_CACHE_SIZE; } } } } /* finally, find index by bisection */ while (imin < imax) { const npy_intp imid = imin + ((imax - imin) >> 1); if (key >= arr[imid]) { imin = imid + 1; } else { imax = imid; } } return imin - 1; } // //#undef LIKELY_IN_CACHE_SIZE // // NPY_NO_EXPORT PyObject * // arr_interp(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict) //{ // // PyObject *fp, *xp, *x; // PyObject *left = NULL, *right = NULL; // PyArrayObject *afp = NULL, *axp = NULL, *ax = NULL, *af = NULL; // npy_intp i, lenx, lenxp; // npy_double lval, rval; // const npy_double *dy, *dx, *dz; // npy_double *dres, *slopes = NULL; // // static char *kwlist[] = {"x", "xp", "fp", "left", "right", NULL}; // // NPY_BEGIN_THREADS_DEF; // // if (!PyArg_ParseTupleAndKeywords(args, kwdict, "OOO|OO:interp", kwlist, // &x, &xp, &fp, &left, &right)) { // return NULL; // } // // afp = (PyArrayObject *)PyArray_ContiguousFromAny(fp, NPY_DOUBLE, 1, 1); // if (afp == NULL) { // return NULL; // } // axp = (PyArrayObject *)PyArray_ContiguousFromAny(xp, NPY_DOUBLE, 1, 1); // if (axp == NULL) { // goto fail; // } // ax = (PyArrayObject *)PyArray_ContiguousFromAny(x, NPY_DOUBLE, 0, 0); // if (ax == NULL) { // goto fail; // } // lenxp = PyArray_SIZE(axp); // if (lenxp == 0) { // PyErr_SetString(PyExc_ValueError, // "array of sample points is empty"); // goto fail; // } // if (PyArray_SIZE(afp) != lenxp) { // PyErr_SetString(PyExc_ValueError, // "fp and xp are not of the same length."); // goto fail; // } // // af = (PyArrayObject *)PyArray_SimpleNew(PyArray_NDIM(ax), // PyArray_DIMS(ax), NPY_DOUBLE); // if (af == NULL) { // goto fail; // } // lenx = PyArray_SIZE(ax); // // dy = (const npy_double *)PyArray_DATA(afp); // dx = (const npy_double *)PyArray_DATA(axp); // dz = (const npy_double *)PyArray_DATA(ax); // dres = (npy_double *)PyArray_DATA(af); // /* Get left and right fill values. */ // if ((left == NULL) || (left == Py_None)) { // lval = dy[0]; // } // else { // lval = PyFloat_AsDouble(left); // if (error_converting(lval)) { // goto fail; // } // } // if ((right == NULL) || (right == Py_None)) { // rval = dy[lenxp - 1]; // } // else { // rval = PyFloat_AsDouble(right); // if (error_converting(rval)) { // goto fail; // } // } // xp->dx fp->dy x -> dz template void do_interp(const T1 &dz, const T2 &dx, const T3 &dy, T4 &dres, npy_intp lenxp, npy_intp lenx, npy_double lval, npy_double rval) { npy_intp i; npy_double *slopes = NULL; std::vector slope_vect; /* binary_search_with_guess needs at least a 3 item long array */ if (lenxp == 1) { const npy_double xp_val = dx[0]; const npy_double fp_val = dy[0]; // NPY_BEGIN_THREADS_THRESHOLDED(lenx); for (i = 0; i < lenx; ++i) { const npy_double x_val = dz[i]; dres[i] = (x_val < xp_val) ? lval : ((x_val > xp_val) ? rval : fp_val); } // NPY_END_THREADS; } else { npy_intp j = 0; /* only pre-calculate slopes if there are relatively few of them. */ if (lenxp <= lenx) { slope_vect.resize(lenxp - 1); slopes = slope_vect.data(); } // NPY_BEGIN_THREADS; if (slopes != NULL) { for (i = 0; i < lenxp - 1; ++i) { slopes[i] = (dy[i + 1] - dy[i]) / (dx[i + 1] - dx[i]); } } for (i = 0; i < lenx; ++i) { const npy_double x_val = dz[i]; if (pythonic::numpy::functor::isnan()(x_val)) { dres[i] = x_val; continue; } j = binary_search_with_guess(x_val, dx, lenxp, j); if (j == -1) { dres[i] = lval; } else if (j == lenxp) { dres[i] = rval; } else if (j == lenxp - 1) { dres[i] = dy[j]; } else if (dx[j] == x_val) { /* Avoid potential non-finite interpolation */ dres[i] = dy[j]; } else { const npy_double slope = (slopes != NULL) ? slopes[j] : (dy[j + 1] - dy[j]) / (dx[j + 1] - dx[j]); dres[i] = slope * (x_val - dx[j]) + dy[j]; } } // NPY_END_THREADS; } } pythran-0.10.0+ds2/pythran/pythonic/numpy/intersect1d.hpp000066400000000000000000000022301416264035500233740ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_INTERSECT1D_HPP #define PYTHONIC_NUMPY_INTERSECT1D_HPP #include "pythonic/include/numpy/intersect1d.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/combined.hpp" #include "pythonic/numpy/asarray.hpp" #include "pythonic/utils/pdqsort.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray< typename __combined::type, types::pshape> intersect1d(E const &e, F const &f) { using T = typename __combined::type; auto ae = asarray(e); auto af = asarray(f); std::set sae(ae.fbegin(), ae.fend()); std::set found; types::list lout(0); lout.reserve(sae.size()); for (auto iter = af.fbegin(), end = af.fend(); iter != end; ++iter) { auto curr = *iter; if (sae.find(curr) != sae.end() && found.find(curr) == found.end()) { found.insert(curr); lout.push_back(curr); } } pdqsort(lout.begin(), lout.end()); return {lout}; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/intp.hpp000066400000000000000000000011541416264035500221250ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_INTP_HPP #define PYTHONIC_NUMPY_INTP_HPP #include "pythonic/include/numpy/intp.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { intptr_t intp() { return intptr_t(); } template intptr_t intp(V v) { return v; } } #define NUMPY_NARY_FUNC_NAME intp #define NUMPY_NARY_FUNC_SYM details::intp #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/invert.hpp000066400000000000000000000007241416264035500224640ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_INVERT_HPP #define PYTHONIC_NUMPY_INVERT_HPP #include "pythonic/include/numpy/invert.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/operator_/invert.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME invert #define NUMPY_NARY_FUNC_SYM operator_::invert #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/isclose.hpp000066400000000000000000000016371416264035500226220ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ISCLOSE_HPP #define PYTHONIC_NUMPY_ISCLOSE_HPP #include "pythonic/include/numpy/isclose.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/numpy/abs.hpp" #include "pythonic/numpy/isfinite.hpp" #include "pythonic/numpy/isnan.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template bool isclose(T0 const &u, T1 const &v, double rtol, double atol, bool equal_nan) { if (functor::isfinite()(u) && functor::isfinite()(v)) return functor::abs()(u - v) <= (atol + rtol * functor::abs()(v)); else if (functor::isnan()(u) && functor::isnan()(v)) return equal_nan; else return (u == v); } } #define NUMPY_NARY_FUNC_NAME isclose #define NUMPY_NARY_FUNC_SYM wrapper::isclose #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/iscomplex.hpp000066400000000000000000000014731416264035500231620ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ISCOMPLEX_HPP #define PYTHONIC_NUMPY_ISCOMPLEX_HPP #include "pythonic/include/numpy/iscomplex.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template typename std::enable_if::value, bool>::type iscomplex(I const &a) { return a.imag() != 0.; } template constexpr typename std::enable_if::value, bool>::type iscomplex(I const &a) { return false; } } #define NUMPY_NARY_FUNC_NAME iscomplex #define NUMPY_NARY_FUNC_SYM wrapper::iscomplex #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/isfinite.hpp000066400000000000000000000006631416264035500227710ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ISFINITE_HPP #define PYTHONIC_NUMPY_ISFINITE_HPP #include "pythonic/include/numpy/isfinite.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME isfinite #define NUMPY_NARY_FUNC_SYM wrapper::isfinite #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/isinf.hpp000066400000000000000000000012401416264035500222570ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ISINF_HPP #define PYTHONIC_NUMPY_ISINF_HPP #include "pythonic/include/numpy/isinf.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template bool isinf(T const &v) { return std::isinf(v); } template bool isinf(std::complex const &v) { return std::isinf(v.real()) || std::isinf(v.imag()); } } #define NUMPY_NARY_FUNC_NAME isinf #define NUMPY_NARY_FUNC_SYM wrapper::isinf #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/isnan.hpp000066400000000000000000000017301416264035500222630ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ISNAN_HPP #define PYTHONIC_NUMPY_ISNAN_HPP #include "pythonic/include/numpy/isnan.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template bool isnan(std::complex const &v) { return std::isnan(v.real()) || std::isnan(v.imag()); } template auto isnan(T const &v) -> typename std::enable_if< std::is_floating_point::type>::value, bool>::type { return std::isnan(v); } template auto isnan(T const &v) -> typename std::enable_if< !std::is_floating_point::type>::value, bool>::type { return false; } } #define NUMPY_NARY_FUNC_NAME isnan #define NUMPY_NARY_FUNC_SYM wrapper::isnan #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/isneginf.hpp000066400000000000000000000012161416264035500227540ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ISNEGINF_HPP #define PYTHONIC_NUMPY_ISNEGINF_HPP #include "pythonic/include/numpy/isneginf.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic//numpy/isinf.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template auto isneginf(T const &t) -> decltype(functor::isinf{}(t) && (t < 0)) { return functor::isinf{}(t) && (t < 0); } } #define NUMPY_NARY_FUNC_NAME isneginf #define NUMPY_NARY_FUNC_SYM wrapper::isneginf #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/isposinf.hpp000066400000000000000000000012131416264035500230010ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ISPOSINF_HPP #define PYTHONIC_NUMPY_ISPOSINF_HPP #include "pythonic/include/numpy/isposinf.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/numpy/isinf.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template auto isposinf(T const &t) -> decltype(functor::isinf{}(t) && t >= 0) { return functor::isinf{}(t) && t >= 0; } } #define NUMPY_NARY_FUNC_NAME isposinf #define NUMPY_NARY_FUNC_SYM wrapper::isposinf #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/isreal.hpp000066400000000000000000000014331416264035500224320ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ISREAL_HPP #define PYTHONIC_NUMPY_ISREAL_HPP #include "pythonic/include/numpy/isreal.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template typename std::enable_if::value, bool>::type isreal(I const &a) { return a.imag() == 0.; } template typename std::enable_if::value, bool>::type isreal(I const &a) { return true; } } #define NUMPY_NARY_FUNC_NAME isreal #define NUMPY_NARY_FUNC_SYM wrapper::isreal #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/isrealobj.hpp000066400000000000000000000006551416264035500231320ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ISREALOBJ_HPP #define PYTHONIC_NUMPY_ISREALOBJ_HPP #include "pythonic/include/numpy/isrealobj.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { template constexpr bool isrealobj(E const &expr) { return !types::is_complex::value; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/isscalar.hpp000066400000000000000000000007141416264035500227550ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ISSCALAR_HPP #define PYTHONIC_NUMPY_ISSCALAR_HPP #include "pythonic/include/numpy/isscalar.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/traits.hpp" #include "pythonic/types/str.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { template constexpr bool isscalar(E const &) { return types::is_dtype::value || std::is_same::value; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/issctype.hpp000066400000000000000000000014061416264035500230160ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ISSCTYPE_HPP #define PYTHONIC_NUMPY_ISSCTYPE_HPP #include "pythonic/include/numpy/issctype.hpp" #include "pythonic/numpy/isscalar.hpp" PYTHONIC_NS_BEGIN namespace numpy { template constexpr auto issctype(E const &expr) -> typename std::enable_if::value && !std::is_same::value, bool>::type { return isscalar(typename E::type()); } template constexpr auto issctype(E const &expr) -> typename std::enable_if::value || std::is_same::value, bool>::type { return false; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ldexp.hpp000066400000000000000000000007161416264035500222720ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LDEXP_HPP #define PYTHONIC_NUMPY_LDEXP_HPP #include "pythonic/include/numpy/ldexp.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME ldexp #define NUMPY_NARY_FUNC_SYM std::ldexp #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ldexp/000077500000000000000000000000001416264035500215555ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/ldexp/accumulate.hpp000066400000000000000000000002511416264035500244070ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LDEXP_ACCUMULATE_HPP #define PYTHONIC_NUMPY_LDEXP_ACCUMULATE_HPP #define UFUNC_NAME ldexp #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/left_shift.hpp000066400000000000000000000010341416264035500232770ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LEFT_SHIFT_HPP #define PYTHONIC_NUMPY_LEFT_SHIFT_HPP #include "pythonic/include/numpy/left_shift.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/operator_/lshift.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME left_shift #define NUMPY_NARY_FUNC_SYM pythonic::operator_::lshift #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/left_shift/000077500000000000000000000000001416264035500225705ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/left_shift/accumulate.hpp000066400000000000000000000002701416264035500254230ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LEFT_SHIFT_ACCUMULATE_HPP #define PYTHONIC_NUMPY_LEFT_SHIFT_ACCUMULATE_HPP #define UFUNC_NAME left_shift #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/less.hpp000066400000000000000000000007741416264035500221300ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LESS_HPP #define PYTHONIC_NUMPY_LESS_HPP #include "pythonic/include/numpy/less.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/operator_/lt.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME less #define NUMPY_NARY_FUNC_SYM pythonic::operator_::lt #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/less/000077500000000000000000000000001416264035500214075ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/less/accumulate.hpp000066400000000000000000000002461416264035500242450ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LESS_ACCUMULATE_HPP #define PYTHONIC_NUMPY_LESS_ACCUMULATE_HPP #define UFUNC_NAME less #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/less_equal.hpp000066400000000000000000000010221416264035500233020ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LESSEQUAL_HPP #define PYTHONIC_NUMPY_LESSEQUAL_HPP #include "pythonic/include/numpy/less_equal.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/operator_/le.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME less_equal #define NUMPY_NARY_FUNC_SYM pythonic::operator_::le #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/less_equal/000077500000000000000000000000001416264035500225765ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/less_equal/accumulate.hpp000066400000000000000000000002701416264035500254310ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LESS_EQUAL_ACCUMULATE_HPP #define PYTHONIC_NUMPY_LESS_EQUAL_ACCUMULATE_HPP #define UFUNC_NAME less_equal #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/lexsort.hpp000066400000000000000000000031711416264035500226540ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LEXSORT_HPP #define PYTHONIC_NUMPY_LEXSORT_HPP #include "pythonic/include/numpy/lexsort.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/pdqsort.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { template struct lexcmp_nth { template bool operator()(K const &keys, long i0, long i1) const { if (std::get(keys)[i0] < std::get(keys)[i1]) return true; else if (std::get(keys)[i0] > std::get(keys)[i1]) return false; else return lexcmp_nth{}(keys, i0, i1); } }; template <> struct lexcmp_nth<0> { template bool operator()(K const &keys, long i0, long i1) const { return false; } }; template struct lexcmp { K const &keys; lexcmp(K const &keys) : keys(keys) { } bool operator()(long i0, long i1) { return lexcmp_nth::value>{}(keys, i0, i1); } }; } template types::ndarray> lexsort(pS const &keys) { long n = std::get<0>(keys).size(); types::ndarray> out(types::pshape(n), builtins::None); // fill with the original indices std::iota(out.buffer, out.buffer + n, 0L); // then sort using keys as the comparator pdqsort(out.buffer, out.buffer + n, details::lexcmp(keys)); return out; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/linalg/000077500000000000000000000000001416264035500217075ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/linalg/matrix_power.hpp000066400000000000000000000033151416264035500251420ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LINALG_MATRIX_POWER_HPP #define PYTHONIC_NUMPY_LINALG_MATRIX_POWER_HPP #include "pythonic/include/numpy/linalg/matrix_power.hpp" #include "pythonic/numpy/array.hpp" #include "pythonic/numpy/asarray.hpp" #include "pythonic/numpy/identity.hpp" #include "pythonic/numpy/dot.hpp" #include "pythonic/builtins/NotImplementedError.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace linalg { namespace details { template E fast_pow(E const &base, long n) { if (n == 1) return base; if (n == 2) return numpy::functor::dot{}(base, base); if (n == 3) { auto tmp = numpy::functor::dot{}(base, base); return numpy::functor::dot{}(tmp, base); } // starting from here, we know for sure that tmp will point to newly // allocated memory // this is used to optimize in-place dot computation in the odd case auto tmp = fast_pow(base, n / 2); if (n & 1) { auto next = numpy::functor::dot{}(tmp, tmp); return numpy::functor::dot{}(base, next, tmp); } else { return numpy::functor::dot{}(tmp, tmp); } } } template auto matrix_power(E const &expr, long n) -> decltype(numpy::functor::array{}(expr)) { if (n == 0) return numpy::functor::identity{}(expr.template shape<0>(), types::dtype_t{}); if (n > 0) { auto base = numpy::functor::asarray{}(expr); return details::fast_pow(base, n); } throw pythonic::builtins::NotImplementedError("negative power"); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/linalg/norm.hpp000066400000000000000000000063401416264035500233760ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LINALG_NORM_HPP #define PYTHONIC_NUMPY_LINALG_NORM_HPP #include "pythonic/include/numpy/linalg/norm.hpp" #include "pythonic/numpy/abs.hpp" #include "pythonic/numpy/conj.hpp" #include "pythonic/numpy/asfarray.hpp" #include "pythonic/numpy/inf.hpp" #include "pythonic/numpy/max.hpp" #include "pythonic/numpy/min.hpp" #include "pythonic/numpy/power.hpp" #include "pythonic/numpy/real.hpp" #include "pythonic/numpy/sqrt.hpp" #include "pythonic/builtins/pythran/abssqr.hpp" #include "pythonic/numpy/sum.hpp" #include "pythonic/builtins/NotImplementedError.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace linalg { template auto norm(Array &&array, types::none_type ord, types::none_type axis) -> decltype( pythonic::numpy::functor::sqrt{}(pythonic::numpy::functor::sum{}( pythonic::builtins::pythran::functor::abssqr{}( std::forward(array))))) { return pythonic::numpy::functor::sqrt{}(pythonic::numpy::functor::sum{}( pythonic::builtins::pythran::functor::abssqr{}( std::forward(array)))); } template norm_t norm(Array &&x, double ord, types::none_type) { switch (std::decay::type::value) { case 1: return norm(std::forward(x), ord, 0L); case 2: return norm(std::forward(x), ord, types::array{{0L, 1L}}); default: throw pythonic::builtins::NotImplementedError( "Invalid norm order for matrices."); } } template norm_t norm(Array &&x, double ord, long axis) { auto &&y = pythonic::numpy::functor::asfarray{}(x); if (ord == inf) return pythonic::numpy::functor::max{}( pythonic::numpy::functor::abs{}(y), axis); else if (ord == -inf) return pythonic::numpy::functor::min{}( pythonic::numpy::functor::abs{}(y), axis); else if (ord == 0.) return pythonic::numpy::functor::sum{}(y != 0., axis); else if (ord == 1.) return pythonic::numpy::functor::sum{}( pythonic::numpy::functor::abs{}(y), axis); else if (ord == 2.) return pythonic::numpy::functor::sqrt{}(pythonic::numpy::functor::sum{}( pythonic::numpy::functor::real{}( pythonic::numpy::functor::conj{}(y)*y), axis)); else { return pythonic::numpy::functor::power{}( pythonic::numpy::functor::sum{}( pythonic::numpy::functor::power{}( pythonic::numpy::functor::abs{}(y), ord), axis), 1. / ord); } } template norm_t norm(Array &&x, types::none_type ord, double axis) { return norm(std::forward(x), 2., axis); } template norm_t norm(Array &&x, double ord, types::array axis) { return norm(std::forward(x), ord, axis[0]); } template norm_t norm(Array &&array, double ord, types::array axis) { throw pythonic::builtins::NotImplementedError("We need more dev!"); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/linspace.hpp000066400000000000000000000014131416264035500227470ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LINSPACE_HPP #define PYTHONIC_NUMPY_LINSPACE_HPP #include "pythonic/include/numpy/linspace.hpp" #include "pythonic/numpy/arange.hpp" #include "pythonic/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> linspace(double start, double stop, long num, bool endpoint, bool retstep, dtype d) { assert(!retstep && "retstep not supported"); double step = (stop - start) / (num - (endpoint ? 1 : 0)); if (std::is_integral::value) return asarray(arange(start, stop + (endpoint ? step * .5 : 0), step), d); else return arange(start, stop + (endpoint ? step * .5 : 0), step, d); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/log.hpp000066400000000000000000000006271416264035500217400ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LOG_HPP #define PYTHONIC_NUMPY_LOG_HPP #include "pythonic/include/numpy/log.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME log #define NUMPY_NARY_FUNC_SYM xsimd::log #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/log10.hpp000066400000000000000000000006411416264035500220750ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LOG10_HPP #define PYTHONIC_NUMPY_LOG10_HPP #include "pythonic/include/numpy/log10.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME log10 #define NUMPY_NARY_FUNC_SYM xsimd::log10 #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/log1p.hpp000066400000000000000000000006411416264035500221750ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LOG1P_HPP #define PYTHONIC_NUMPY_LOG1P_HPP #include "pythonic/include/numpy/log1p.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME log1p #define NUMPY_NARY_FUNC_SYM xsimd::log1p #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/log2.hpp000066400000000000000000000006341416264035500220200ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LOG2_HPP #define PYTHONIC_NUMPY_LOG2_HPP #include "pythonic/include/numpy/log2.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME log2 #define NUMPY_NARY_FUNC_SYM xsimd::log2 #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/logaddexp.hpp000066400000000000000000000014101416264035500231150ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LOGADDEXP_HPP #define PYTHONIC_NUMPY_LOGADDEXP_HPP #include "pythonic/include/numpy/logaddexp.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/numpy/log.hpp" #include "pythonic/numpy/exp.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template auto logaddexp(T0 const &t0, T1 const &t1) -> decltype(functor::log{}(functor::exp{}(t0) + functor::exp{}(t1))) { return functor::log{}(functor::exp{}(t0) + functor::exp{}(t1)); } } #define NUMPY_NARY_FUNC_NAME logaddexp #define NUMPY_NARY_FUNC_SYM wrapper::logaddexp #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/logaddexp/000077500000000000000000000000001416264035500224105ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/logaddexp/accumulate.hpp000066400000000000000000000002651416264035500252470ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LOGADDEXP_ACCUMULATE_HPP #define PYTHONIC_NUMPY_LOGADDEXP_ACCUMULATE_HPP #define UFUNC_NAME logaddexp #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/logaddexp2.hpp000066400000000000000000000016471416264035500232130ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LOGADDEXP2_HPP #define PYTHONIC_NUMPY_LOGADDEXP2_HPP #include "pythonic/include/numpy/logaddexp2.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/numpy/log2.hpp" #include "pythonic/numpy/power.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template auto logaddexp2(T0 const &t0, T1 const &t1) -> decltype(functor::log2{}(functor::power{}(T0(2), t0) + functor::power{}(T1(2), t1))) { return functor::log2{}(functor::power{}(T0(2), t0) + functor::power{}(T1(2), t1)); } } #define NUMPY_NARY_FUNC_NAME logaddexp2 #define NUMPY_NARY_FUNC_SYM wrapper::logaddexp2 #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/logaddexp2/000077500000000000000000000000001416264035500224725ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/logaddexp2/accumulate.hpp000066400000000000000000000002701416264035500253250ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LOGADDEXP2_ACCUMULATE_HPP #define PYTHONIC_NUMPY_LOGADDEXP2_ACCUMULATE_HPP #define UFUNC_NAME logaddexp2 #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/logical_and.hpp000066400000000000000000000012261416264035500234070ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LOGICALAND_HPP #define PYTHONIC_NUMPY_LOGICALAND_HPP #include "pythonic/include/numpy/logical_and.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template auto logical_and(T0 const &t0, T1 const &t1) -> decltype(t0 &&t1) { return t0 && t1; } } #define NUMPY_NARY_FUNC_NAME logical_and #define NUMPY_NARY_FUNC_SYM wrapper::logical_and #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/logical_and/000077500000000000000000000000001416264035500226755ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/logical_and/accumulate.hpp000066400000000000000000000002731416264035500255330ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LOGICAL_AND_ACCUMULATE_HPP #define PYTHONIC_NUMPY_LOGICAL_AND_ACCUMULATE_HPP #define UFUNC_NAME logical_and #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/logical_not.hpp000066400000000000000000000007541416264035500234520ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LOGICALNOT_HPP #define PYTHONIC_NUMPY_LOGICALNOT_HPP #include "pythonic/include/numpy/logical_not.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/operator_/not_.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME logical_not #define NUMPY_NARY_FUNC_SYM pythonic::operator_::not_ #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/logical_or.hpp000066400000000000000000000012201416264035500232570ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LOGICALOR_HPP #define PYTHONIC_NUMPY_LOGICALOR_HPP #include "pythonic/include/numpy/logical_or.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template auto logical_or(T0 const &t0, T1 const &t1) -> decltype(t0 || t1) { return t0 || t1; } } #define NUMPY_NARY_FUNC_NAME logical_or #define NUMPY_NARY_FUNC_SYM wrapper::logical_or #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/logical_or/000077500000000000000000000000001416264035500225535ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/logical_or/accumulate.hpp000066400000000000000000000002701416264035500254060ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LOGICAL_OR_ACCUMULATE_HPP #define PYTHONIC_NUMPY_LOGICAL_OR_ACCUMULATE_HPP #define UFUNC_NAME logical_or #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/logical_xor.hpp000066400000000000000000000013021416264035500234500ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LOGICALXOR_HPP #define PYTHONIC_NUMPY_LOGICALXOR_HPP #include "pythonic/include/numpy/logical_xor.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template auto logical_xor(T0 const &t0, T1 const &t1) -> decltype((t0 && !t1) || (t1 && !t0)) { return (t0 && !t1) || (t1 && !t0); } } #define NUMPY_NARY_FUNC_NAME logical_xor #define NUMPY_NARY_FUNC_SYM wrapper::logical_xor #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/logical_xor/000077500000000000000000000000001416264035500227435ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/logical_xor/accumulate.hpp000066400000000000000000000002731416264035500256010ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LOGICAL_XOR_ACCUMULATE_HPP #define PYTHONIC_NUMPY_LOGICAL_XOR_ACCUMULATE_HPP #define UFUNC_NAME logical_xor #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/logspace.hpp000066400000000000000000000011451416264035500227500ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LOGSPACE_HPP #define PYTHONIC_NUMPY_LOGSPACE_HPP #include "pythonic/include/numpy/logspace.hpp" #include "pythonic/numpy/linspace.hpp" #include "pythonic/numpy/power.hpp" PYTHONIC_NS_BEGIN namespace numpy { auto logspace(double start, double stop, long num, bool endpoint, double base) -> decltype(functor::power()(base, functor::linspace()(start, stop, num, endpoint))) { return functor::power()(base, functor::linspace()(start, stop, num, endpoint)); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/longlong.hpp000066400000000000000000000012021416264035500227640ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_LONGLONG_HPP #define PYTHONIC_NUMPY_LONGLONG_HPP #include "pythonic/include/numpy/longlong.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { long long longlong() { return {}; } template long long longlong(V v) { return v; } } #define NUMPY_NARY_FUNC_NAME longlong #define NUMPY_NARY_FUNC_SYM details::longlong #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/max.hpp000066400000000000000000000007671416264035500217510ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_MAX_HPP #define PYTHONIC_NUMPY_MAX_HPP #include "pythonic/include/numpy/max.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/numpy/reduce.hpp" #include "pythonic/operator_/imax.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto max(Args &&... args) -> decltype(reduce(std::forward(args)...)) { return reduce(std::forward(args)...); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/maximum.hpp000066400000000000000000000007251416264035500226330ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_MAXIMUM_HPP #define PYTHONIC_NUMPY_MAXIMUM_HPP #include "pythonic/include/numpy/maximum.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME maximum #define NUMPY_NARY_FUNC_SYM xsimd::max #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/maximum/000077500000000000000000000000001416264035500221165ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/maximum/accumulate.hpp000066400000000000000000000002571416264035500247560ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_MAXIMUM_ACCUMULATE_HPP #define PYTHONIC_NUMPY_MAXIMUM_ACCUMULATE_HPP #define UFUNC_NAME maximum #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/maximum/reduce.hpp000066400000000000000000000004261416264035500241000ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_MAXIMUM_REDUCE_HPP #define PYTHONIC_NUMPY_MAXIMUM_REDUCE_HPP #define UFUNC_NAME maximum #define UFUNC_INAME imax #include "pythonic/include/numpy/maximum/reduce.hpp" #include "pythonic/numpy/ufunc_reduce.hpp" #undef UFUNC_NAME #undef UFUNC_INAME #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/mean.hpp000066400000000000000000000032211416264035500220700ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_MEAN_HPP #define PYTHONIC_NUMPY_MEAN_HPP #include "pythonic/include/numpy/mean.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/numpy/asarray.hpp" #include "pythonic/numpy/expand_dims.hpp" #include "pythonic/numpy/sum.hpp" #include "pythonic/builtins/None.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto mean(E const &expr, types::none_type axis, dtype d, types::none_type out, types::false_immediate keepdims) -> decltype(sum(expr, axis, d) / details::dtype_or_double(expr.flat_size())) { return sum(expr, axis, d) / details::dtype_or_double(expr.flat_size()); } template auto mean(E const &expr, long axis, dtype d, types::none_type out, types::false_immediate keepdims) -> decltype(sum(expr, axis, d)) { return sum(expr, axis, d) /= details::dtype_or_double(sutils::getshape(expr)[axis]); } template types::ndarray, typename details::make_scalar_pshape::type> mean(E const &expr, types::none_type axis, dtype d, types::none_type out, types::true_immediate keep_dims) { return {typename details::make_scalar_pshape::type(), mean(expr, axis, d, out)}; } template auto mean(E const &expr, long axis, dtype d, types::none_type out, types::true_immediate keepdims) -> decltype(expand_dims(mean(expr, axis, d), axis)) { return expand_dims(mean(expr, axis, d), axis); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/median.hpp000066400000000000000000000076271416264035500224230ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_MEDIAN_HPP #define PYTHONIC_NUMPY_MEDIAN_HPP #include "pythonic/include/numpy/median.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/numpy/asarray.hpp" #include "pythonic/numpy/sort.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace { template typename std::enable_if::value != 1, void>::type _median(T_out *out, types::ndarray const &tmp, long axis) { auto tmp_shape = sutils::getshape(tmp); const long step = std::accumulate(tmp_shape.begin() + axis, tmp_shape.end(), 1L, std::multiplies()); long const buffer_size = tmp_shape[axis]; std::unique_ptr buffer{new T[buffer_size]}; const long stepper = step / tmp_shape[axis]; const long n = tmp.flat_size() / tmp_shape[axis] * step; long ith = 0, nth = 0; for (long i = 0; i < n; i += step) { T *buffer_iter = buffer.get(); T const *iter = tmp.buffer + ith; T const *iend = iter + step; while (iter != iend) { *buffer_iter++ = *iter; iter += stepper; } if (buffer_size % 2 == 1) { std::nth_element(buffer.get(), buffer.get() + buffer_size / 2, buffer_iter, ndarray::comparator{}); *out++ = buffer[buffer_size / 2]; } else { std::nth_element(buffer.get(), buffer.get() + buffer_size / 2, buffer_iter, ndarray::comparator{}); auto t0 = buffer[buffer_size / 2]; std::nth_element(buffer.get(), buffer.get() + buffer_size / 2 - 1, buffer.get() + buffer_size / 2, ndarray::comparator{}); auto t1 = buffer[buffer_size / 2 - 1]; *out++ = (t0 + t1) / double(2); } ith += step; if (ith - nth == tmp.flat_size()) { ++nth; ith = nth; } } } } template decltype(std::declval() + 1.) median(types::ndarray const &arr, types::none_type) { size_t n = arr.flat_size(); std::unique_ptr tmp{new T[n]}; std::copy(arr.buffer, arr.buffer + n, tmp.get()); std::nth_element(tmp.get(), tmp.get() + n / 2, tmp.get() + n, ndarray::comparator{}); T t0 = tmp[n / 2]; if (n % 2 == 1) { return t0; } else { std::nth_element(tmp.get(), tmp.get() + n / 2 - 1, tmp.get() + n / 2, ndarray::comparator{}); T t1 = tmp[n / 2 - 1]; return (t0 + t1) / 2.; } } template typename std::enable_if< std::tuple_size::value != 1, types::ndarray() + 1.), types::array::value - 1>>>::type median(types::ndarray const &arr, long axis) { constexpr auto N = std::tuple_size::value; if (axis < 0) axis += N; types::array::value - 1> shp; auto stmp = sutils::getshape(arr); auto next = std::copy(stmp.begin(), stmp.begin() + axis, shp.begin()); std::copy(stmp.begin() + axis + 1, stmp.end(), next); types::ndarray() + 1.), types::array::value - 1>> out(shp, types::none_type{}); _median(out.buffer, arr, axis); return out; } template typename std::enable_if::value == 1, decltype(std::declval() + 1.)>::type median(types::ndarray const &arr, long axis) { if (axis != 0) throw types::ValueError("axis out of bounds"); return median(arr); } NUMPY_EXPR_TO_NDARRAY0_IMPL(median); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/min.hpp000066400000000000000000000007671416264035500217470ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_MIN_HPP #define PYTHONIC_NUMPY_MIN_HPP #include "pythonic/include/numpy/min.hpp" #include "pythonic/numpy/reduce.hpp" #include "pythonic/operator_/imin.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto min(Args &&... args) -> decltype(reduce(std::forward(args)...)) { return reduce(std::forward(args)...); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/minimum.hpp000066400000000000000000000007251416264035500226310ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_MINIMUM_HPP #define PYTHONIC_NUMPY_MINIMUM_HPP #include "pythonic/include/numpy/minimum.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME minimum #define NUMPY_NARY_FUNC_SYM xsimd::min #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/minimum/000077500000000000000000000000001416264035500221145ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/minimum/accumulate.hpp000066400000000000000000000002571416264035500247540ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_MINIMUM_ACCUMULATE_HPP #define PYTHONIC_NUMPY_MINIMUM_ACCUMULATE_HPP #define UFUNC_NAME minimum #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/minimum/reduce.hpp000066400000000000000000000004261416264035500240760ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_MINIMUM_REDUCE_HPP #define PYTHONIC_NUMPY_MINIMUM_REDUCE_HPP #define UFUNC_NAME minimum #define UFUNC_INAME imin #include "pythonic/include/numpy/minimum/reduce.hpp" #include "pythonic/numpy/ufunc_reduce.hpp" #undef UFUNC_NAME #undef UFUNC_INAME #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/mod.hpp000066400000000000000000000002271416264035500217320ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_MOD_HPP #define PYTHONIC_NUMPY_MOD_HPP #include "pythonic/include/numpy/mod.hpp" #include "pythonic/operator_/mod.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/mod/000077500000000000000000000000001416264035500212205ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/mod/accumulate.hpp000066400000000000000000000002431416264035500240530ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_MOD_ACCUMULATE_HPP #define PYTHONIC_NUMPY_MOD_ACCUMULATE_HPP #define UFUNC_NAME mod #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/multiply.hpp000066400000000000000000000010161416264035500230270ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_MULTIPLY_HPP #define PYTHONIC_NUMPY_MULTIPLY_HPP #include "pythonic/include/numpy/multiply.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/operator_/mul.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME multiply #define NUMPY_NARY_FUNC_SYM pythonic::operator_::mul #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/multiply/000077500000000000000000000000001416264035500223205ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/multiply/accumulate.hpp000066400000000000000000000002621416264035500251540ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_MULTIPLY_ACCUMULATE_HPP #define PYTHONIC_NUMPY_MULTIPLY_ACCUMULATE_HPP #define UFUNC_NAME multiply #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/multiply/reduce.hpp000066400000000000000000000004321416264035500242770ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_MULTIPLY_REDUCE_HPP #define PYTHONIC_NUMPY_MULTIPLY_REDUCE_HPP #define UFUNC_NAME multiply #define UFUNC_INAME imul #include "pythonic/include/numpy/multiply/reduce.hpp" #include "pythonic/numpy/ufunc_reduce.hpp" #undef UFUNC_NAME #undef UFUNC_INAME #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/nan.hpp000066400000000000000000000001611416264035500217240ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NAN_HPP #define PYTHONIC_NUMPY_NAN_HPP #include "pythonic/include/numpy/nan.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/nan_to_num.hpp000066400000000000000000000015531416264035500233130ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NANTONUM_HPP #define PYTHONIC_NUMPY_NANTONUM_HPP #include "pythonic/include/numpy/nan_to_num.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/numpy/isinf.hpp" #include "pythonic/numpy/isnan.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template I nan_to_num(I const &a) { if (functor::isinf{}(a)) { if (a >= 0) return std::numeric_limits::max(); else return std::numeric_limits::lowest(); } else if (functor::isnan{}(a)) return 0; else return a; } } #define NUMPY_NARY_FUNC_NAME nan_to_num #define NUMPY_NARY_FUNC_SYM wrapper::nan_to_num #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/nanargmax.hpp000066400000000000000000000025711416264035500231330ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NANARGMAX_HPP #define PYTHONIC_NUMPY_NANARGMAX_HPP #include "pythonic/include/numpy/nanargmax.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/ValueError.hpp" #include "pythonic/numpy/isnan.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace { template void _nanargmax(E begin, E end, F &max, long &index, long &where, utils::int_<1>) { for (; begin != end; ++begin, ++index) { auto curr = *begin; if (!functor::isnan()(curr) && curr > max) { max = curr; where = index; } } } template void _nanargmax(E begin, E end, F &max, long &index, long &where, utils::int_) { for (; begin != end; ++begin) _nanargmax((*begin).begin(), (*begin).end(), max, index, where, utils::int_()); } } template long nanargmax(E const &expr) { typename E::dtype max = -std::numeric_limits::infinity(); long where = -1; long index = 0; _nanargmax(expr.begin(), expr.end(), max, index, where, utils::int_()); if (where >= 0) return where; else throw types::ValueError("empty sequence"); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/nanargmin.hpp000066400000000000000000000025711416264035500231310ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NANARGMIN_HPP #define PYTHONIC_NUMPY_NANARGMIN_HPP #include "pythonic/include/numpy/nanargmin.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/ValueError.hpp" #include "pythonic/numpy/isnan.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace { template void _nanargmin(E begin, E end, F &min, long &index, long &where, utils::int_<1>) { for (; begin != end; ++begin, ++index) { auto curr = *begin; if (!functor::isnan()(curr) && curr < min) { min = curr; where = index; } } } template void _nanargmin(E begin, E end, F &min, long &index, long &where, utils::int_) { for (; begin != end; ++begin) _nanargmin((*begin).begin(), (*begin).end(), min, index, where, utils::int_()); } } template long nanargmin(E const &expr) { typename E::dtype min = std::numeric_limits::infinity(); long where = -1; long index = 0; _nanargmin(expr.begin(), expr.end(), min, index, where, utils::int_()); if (where >= 0) return where; else throw types::ValueError("empty sequence"); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/nanmax.hpp000066400000000000000000000024741416264035500224430ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NANMAX_HPP #define PYTHONIC_NUMPY_NANMAX_HPP #include "pythonic/include/numpy/nanmax.hpp" #include "pythonic/builtins/ValueError.hpp" #include "pythonic/numpy/isnan.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace { template bool _nanmax(E begin, E end, F &max, utils::int_<1>) { bool found = false; for (; begin != end; ++begin) { auto curr = *begin; if (!functor::isnan()(curr) && curr >= max) { max = curr; found = true; } } return found; } template bool _nanmax(E begin, E end, F &max, utils::int_) { bool found = false; for (; begin != end; ++begin) found |= _nanmax((*begin).begin(), (*begin).end(), max, utils::int_()); return found; } } template typename E::dtype nanmax(E const &expr) { bool found = false; typename E::dtype max = std::numeric_limits::lowest(); found = _nanmax(expr.begin(), expr.end(), max, utils::int_()); if (!found) max = std::numeric_limits::quiet_NaN(); return max; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/nanmin.hpp000066400000000000000000000024711416264035500224360ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NANMIN_HPP #define PYTHONIC_NUMPY_NANMIN_HPP #include "pythonic/include/numpy/nanmin.hpp" #include "pythonic/builtins/ValueError.hpp" #include "pythonic/numpy/isnan.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace { template bool _nanmin(E begin, E end, F &min, utils::int_<1>) { bool found = false; for (; begin != end; ++begin) { auto curr = *begin; if (!functor::isnan()(curr) && curr <= min) { min = curr; found = true; } } return found; } template bool _nanmin(E begin, E end, F &min, utils::int_) { bool found = false; for (; begin != end; ++begin) found |= _nanmin((*begin).begin(), (*begin).end(), min, utils::int_()); return found; } } template typename E::dtype nanmin(E const &expr) { bool found = false; typename E::dtype min = std::numeric_limits::max(); found = _nanmin(expr.begin(), expr.end(), min, utils::int_()); if (!found) min = std::numeric_limits::quiet_NaN(); return min; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/nansum.hpp000066400000000000000000000016071416264035500224570ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NANSUM_HPP #define PYTHONIC_NUMPY_NANSUM_HPP #include "pythonic/include/numpy/nansum.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/numpy/isnan.hpp" PYTHONIC_NS_BEGIN namespace numpy { template void _nansum(E begin, E end, F &sum, utils::int_<1>) { for (; begin != end; ++begin) { auto curr = *begin; if (!functor::isnan()(curr)) sum += curr; } } template void _nansum(E begin, E end, F &sum, utils::int_) { for (; begin != end; ++begin) _nansum((*begin).begin(), (*begin).end(), sum, utils::int_()); } template typename E::dtype nansum(E const &expr) { typename E::dtype s = 0; _nansum(expr.begin(), expr.end(), s, utils::int_()); return s; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ndarray.hpp000066400000000000000000000017641416264035500226220ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NDARRAY_HPP #define PYTHONIC_NUMPY_NDARRAY_HPP #include "pythonic/include/numpy/ndarray.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/nested_container.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> ndarray(pS const &shape, dtype) { return {(sutils::shape_t)shape, builtins::None}; } template types::ndarray> ndarray(long size, dtype d) { return ndarray(types::pshape(size), d); } template types::ndarray>> ndarray(std::integral_constant, dtype d) { return ndarray(types::pshape>({}), d); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ndarray/000077500000000000000000000000001416264035500221015ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/ndarray/astype.hpp000066400000000000000000000006661416264035500241270ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NDARRAY_ASTYPE_HPP #define PYTHONIC_NUMPY_NDARRAY_ASTYPE_HPP #include "pythonic/include/numpy/ndarray/astype.hpp" #include "pythonic/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace ndarray { template auto astype(E &&e, dtype d) -> decltype(asarray(std::forward(e), d)) { return asarray(std::forward(e), d); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ndarray/fill.hpp000066400000000000000000000012421416264035500235370ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NDARRAY_FILL_HPP #define PYTHONIC_NUMPY_NDARRAY_FILL_HPP #include "pythonic/include/numpy/ndarray/fill.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/None.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace ndarray { template types::none_type fill(E &&e, F f) { std::fill(e.begin(), e.end(), f); return builtins::None; } template types::none_type fill(types::ndarray &e, F f) { std::fill(e.fbegin(), e.fend(), f); return builtins::None; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ndarray/flatten.hpp000066400000000000000000000010401416264035500242420ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NDARRAY_FLATTEN_HPP #define PYTHONIC_NUMPY_NDARRAY_FLATTEN_HPP #include "pythonic/include/numpy/ndarray/flatten.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace ndarray { template types::ndarray> flatten(types::ndarray const &a) { return {a.mem, types::pshape{a.flat_size()}}; } NUMPY_EXPR_TO_NDARRAY0_IMPL(flatten); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ndarray/item.hpp000066400000000000000000000016321416264035500235520ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NDARRAY_ITEM_HPP #define PYTHONIC_NUMPY_NDARRAY_ITEM_HPP #include "pythonic/include/numpy/ndarray/item.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace ndarray { template T item(types::ndarray const &expr, long i) { if (i < 0) i += expr.flat_size(); return *(expr.fbegin() + i); } template auto item(E &&expr, types::array const &i) -> decltype(expr[i]) { return expr[i]; } // only for compatibility purpose, very bad impl template typename std::decay::type::dtype item(E &&expr, long i) { if (i < 0) i += expr.flat_size(); return asarray(std::forward(expr)).flat()[i]; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ndarray/reshape.hpp000066400000000000000000000066331416264035500242510ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NDARRAY_RESHAPE_HPP #define PYTHONIC_NUMPY_NDARRAY_RESHAPE_HPP #include "pythonic/include/numpy/ndarray/reshape.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_conversion.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/ValueError.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace ndarray { namespace misc { template void set(P &p, long i, long v, utils::index_sequence) { (void)std::initializer_list{ (i == Is && (sutils::assign(std::get(p), v), true))...}; } } template typename std::enable_if::value, types::ndarray>::type reshape(types::ndarray const &expr, NpS const &new_shape) { long where = sutils::sfind( new_shape, -1, std::integral_constant::value>(), [](long a, long b) { return a <= b; }); long next = sutils::sfind(new_shape, -1, where, [](long a, long b) { return a <= b; }); if (next >= 0) { throw pythonic::types::ValueError( "Reshape: can only specify one unknown dimension"); } if (where >= 0) { auto auto_shape = new_shape; misc::set(auto_shape, where, expr.flat_size() / -sutils::sprod(new_shape), utils::make_index_sequence::value>()); return expr.reshape(auto_shape); } else { auto nshape = sutils::sprod(new_shape); auto n = expr.flat_size(); if (n < nshape) { types::ndarray out(new_shape, builtins::None); auto iter = std::copy(expr.fbegin(), expr.fend(), out.fbegin()); for (long i = 1; i < nshape / n; ++i) iter = std::copy(out.fbegin(), out.fbegin() + n, iter); std::copy(out.fbegin(), out.fbegin() + nshape % n, iter); return out; } else { return expr.reshape(new_shape); } } } template typename std::enable_if::value, types::ndarray>>::type reshape(types::ndarray const &expr, NpS const &new_shape) { auto n = expr.flat_size(); if (new_shape <= -1) { return expr.reshape(types::pshape(n)); } if (n < new_shape) { types::ndarray> out( types::pshape{new_shape}, builtins::None); auto iter = std::copy(expr.fbegin(), expr.fend(), out.fbegin()); for (long i = 1; i < new_shape / n; ++i) iter = std::copy(out.fbegin(), out.fbegin() + n, iter); std::copy(out.fbegin(), out.fbegin() + new_shape % n, iter); return out; } else { return expr.reshape(types::pshape(new_shape)); } } template auto reshape(types::ndarray const &expr, S0 i0, S1 i1, S const &... indices) -> decltype(reshape(expr, types::pshape{i0, i1, indices...})) { return reshape(expr, types::pshape{i0, i1, indices...}); } NUMPY_EXPR_TO_NDARRAY0_IMPL(reshape); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ndarray/sort.hpp000066400000000000000000000110221416264035500235750ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NDARRAY_SORT_HPP #define PYTHONIC_NUMPY_NDARRAY_SORT_HPP #include "pythonic/include/numpy/ndarray/sort.hpp" #include #include #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/str.hpp" #include "pythonic/numpy/array.hpp" #include "pythonic/utils/pdqsort.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace ndarray { namespace { struct quicksorter { template void operator()(Args &&... args) { pdqsort(std::forward(args)...); } }; struct mergesorter { template > void operator()(It first, It last, Cmp cmp = Cmp()) { if (last - first > 1) { It middle = first + (last - first) / 2; operator()(first, middle, cmp); operator()(middle, last, cmp); std::inplace_merge(first, middle, last, cmp); } } }; struct heapsorter { template void operator()(Args &&... args) { return std::sort_heap(std::forward(args)...); } }; struct stablesorter { template void operator()(Args &&... args) { return std::stable_sort(std::forward(args)...); } }; template struct _comp; template struct _comp> { bool operator()(std::complex const &i, std::complex const &j) const { if (std::real(i) == std::real(j)) return std::imag(i) < std::imag(j); else return std::real(i) < std::real(j); } }; template using comparator = typename std::conditional::value, _comp, std::less>::type; template typename std::enable_if::value == 1, void>::type _sort(types::ndarray &out, long axis, Sorter sorter) { sorter(out.begin(), out.end(), comparator{}); } template typename std::enable_if::value != 1, void>::type _sort(types::ndarray &out, long axis, Sorter sorter) { constexpr auto N = std::tuple_size::value; if (axis < 0) axis += N; long const flat_size = out.flat_size(); if (axis == N - 1) { const long step = out.template shape(); for (T *out_iter = out.buffer, *end_iter = out.buffer + flat_size; out_iter != end_iter; out_iter += step) sorter(out_iter, out_iter + step, comparator{}); } else { auto out_shape = sutils::getshape(out); const long step = std::accumulate(out_shape.begin() + axis, out_shape.end(), 1L, std::multiplies()); long const buffer_size = out_shape[axis]; const long stepper = step / out_shape[axis]; const long n = flat_size / out_shape[axis]; long ith = 0, nth = 0; std::unique_ptr buffer{new T[buffer_size]}; for (long i = 0; i < n; i++) { for (long j = 0; j < buffer_size; ++j) buffer[j] = out.buffer[ith + j * stepper]; sorter(buffer.get(), buffer.get() + buffer_size, comparator{}); for (long j = 0; j < buffer_size; ++j) out.buffer[ith + j * stepper] = buffer[j]; ith += step; if (ith >= flat_size) { ith = ++nth; } } } } } template types::none_type sort(E &&expr, long axis, types::none_type) { _sort(expr, axis, quicksorter()); return {}; } template types::none_type sort(E &&expr, types::none_type) { _sort(expr, 0, quicksorter()); return {}; } template types::none_type sort(E &&expr, long axis, types::str const &kind) { if (kind == "quicksort") _sort(expr, axis, quicksorter()); else if (kind == "mergesort") _sort(expr, axis, mergesorter()); else if (kind == "heapsort") _sort(expr, axis, heapsorter()); else if (kind == "stable") _sort(expr, axis, stablesorter()); return {}; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ndarray/tofile.hpp000066400000000000000000000023451416264035500241000ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NDARRAY_TOFILE_HPP #define PYTHONIC_NUMPY_NDARRAY_TOFILE_HPP #include "pythonic/include/numpy/ndarray/tofile.hpp" #include "pythonic/builtins/FileNotFoundError.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace ndarray { template void tofile(types::ndarray const &expr, types::str const &file_name, types::str const &sep, types::str const &format) { if (sep.size() != 0) throw types::NotImplementedError( "Sep input is not implemented yet, should be left empty"); if (format.size() != 0) throw types::NotImplementedError( "Format input is not implemented yet, should be left empty"); std::ofstream fs; fs.open(file_name.c_str(), std::ofstream::out | std::ofstream::binary); if (fs.rdstate() != std::ofstream::goodbit) { throw types::FileNotFoundError("Could not open file " + file_name); } fs.write((char *)expr.buffer, sizeof(T) * expr.flat_size()); } NUMPY_EXPR_TO_NDARRAY0_IMPL(tofile); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ndarray/tolist.hpp000066400000000000000000000020211416264035500241230ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NDARRAY_TOLIST_HPP #define PYTHONIC_NUMPY_NDARRAY_TOLIST_HPP #include "pythonic/include/numpy/ndarray/tolist.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_conversion.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace ndarray { template typename std::enable_if::value == 1, types::list>::type tolist(types::ndarray const &expr) { return {expr.fbegin(), expr.fend()}; } template typename std::enable_if< std::tuple_size::value != 1, typename tolist_type::value>::type>::type tolist(types::ndarray const &expr) { typename tolist_type::value>::type out(0); for (auto const &elts : expr) out.push_back(tolist(elts)); return out; } NUMPY_EXPR_TO_NDARRAY0_IMPL(tolist); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ndarray/tostring.hpp000066400000000000000000000012311416264035500244600ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NDARRAY_TOSTRING_HPP #define PYTHONIC_NUMPY_NDARRAY_TOSTRING_HPP #include "pythonic/include/numpy/ndarray/tostring.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_conversion.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/str.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace ndarray { template types::str tostring(types::ndarray const &expr) { return types::str(reinterpret_cast(expr.buffer), expr.flat_size() * sizeof(T)); } NUMPY_EXPR_TO_NDARRAY0_IMPL(tostring); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ndenumerate.hpp000066400000000000000000000047301416264035500234650ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NDENUMERATE_HPP #define PYTHONIC_NUMPY_NDENUMERATE_HPP #include "pythonic/include/numpy/ndenumerate.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { template ndenumerate_iterator::ndenumerate_iterator() { } template ndenumerate_iterator::ndenumerate_iterator(E const &expr, long first) : index(first), expr(expr), iter(expr.buffer) { } template std::tuple, typename E::dtype> ndenumerate_iterator::operator*() const { types::array out; auto shape = sutils::getshape(expr); long mult = 1; for (long j = E::value - 1; j > 0; j--) { out[j] = (index / mult) % shape[j]; mult *= shape[j]; } out[0] = index / mult; return std::tuple, typename E::dtype>{out, *iter}; } template ndenumerate_iterator &ndenumerate_iterator::operator++() { ++index, ++iter; return *this; } template ndenumerate_iterator &ndenumerate_iterator::operator+=(long n) { index += n, iter += n; return *this; } template bool ndenumerate_iterator:: operator!=(ndenumerate_iterator const &other) const { return index != other.index; } template bool ndenumerate_iterator:: operator<(ndenumerate_iterator const &other) const { return index < other.index; } template long ndenumerate_iterator:: operator-(ndenumerate_iterator const &other) const { return index - other.index; } template _ndenumerate::_ndenumerate() { } template _ndenumerate::_ndenumerate(E const &expr) : ndenumerate_iterator(expr, 0), expr(expr), end_iter(expr, expr.flat_size()) { } template typename _ndenumerate::iterator &_ndenumerate::begin() { return *this; } template typename _ndenumerate::iterator const &_ndenumerate::begin() const { return *this; } template typename _ndenumerate::iterator _ndenumerate::end() const { return end_iter; } template _ndenumerate> ndenumerate(types::ndarray const &expr) { return {expr}; } NUMPY_EXPR_TO_NDARRAY0_IMPL(ndenumerate); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ndim.hpp000066400000000000000000000006341416264035500221040ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NDIM_HPP #define PYTHONIC_NUMPY_NDIM_HPP #include "pythonic/include/numpy/ndim.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto ndim(E const &e) -> decltype(builtins::getattr(types::attr::NDIM{}, e)) { return builtins::getattr(types::attr::NDIM{}, e); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ndindex.hpp000066400000000000000000000047111416264035500226060ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NDINDEX_HPP #define PYTHONIC_NUMPY_NDINDEX_HPP #include "pythonic/include/numpy/ndindex.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { template ndindex_iterator::ndindex_iterator() { } template ndindex_iterator::ndindex_iterator(types::array const &shape, long first) : index(first), shape(shape) { } template types::array ndindex_iterator::operator*() const { types::array out; long mult = 1; for (long j = N - 1; j > 0; j--) { out[j] = (index / mult) % shape[j]; mult *= shape[j]; } out[0] = index / mult; return out; } template ndindex_iterator &ndindex_iterator::operator++() { ++index; return *this; } template ndindex_iterator &ndindex_iterator::operator+=(long n) { index += n; return *this; } template bool ndindex_iterator::operator!=(ndindex_iterator const &other) const { return index != other.index; } template bool ndindex_iterator::operator<(ndindex_iterator const &other) const { return index < other.index; } template long ndindex_iterator::operator-(ndindex_iterator const &other) const { return index - other.index; } template _ndindex::_ndindex() { } template _ndindex::_ndindex(types::array const &shape) : ndindex_iterator(shape, 0), shape(shape), end_iter(shape, std::accumulate(shape.begin(), shape.end(), 1L, std::multiplies())) { } template typename _ndindex::iterator &_ndindex::begin() { return *this; } template typename _ndindex::iterator const &_ndindex::begin() const { return *this; } template typename _ndindex::iterator _ndindex::end() const { return end_iter; } template _ndindex ndindex(Types... args) { return {types::make_tuple(args...)}; } template _ndindex ndindex(types::array const &args) { return {args}; } template _ndindex ndindex(types::pshape const &args) { return {args}; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/negative.hpp000066400000000000000000000007401416264035500227550ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NEGATIVE_HPP #define PYTHONIC_NUMPY_NEGATIVE_HPP #include "pythonic/include/numpy/negative.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/operator_/neg.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME negative #define NUMPY_NARY_FUNC_SYM pythonic::operator_::neg #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/negative/000077500000000000000000000000001416264035500222435ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/negative/accumulate.hpp000066400000000000000000000002621416264035500250770ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NEGATIVE_ACCUMULATE_HPP #define PYTHONIC_NUMPY_NEGATIVE_ACCUMULATE_HPP #define UFUNC_NAME negative #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/newaxis.hpp000066400000000000000000000001751416264035500226330ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NEWAXIS_HPP #define PYTHONIC_NUMPY_NEWAXIS_HPP #include "pythonic/include/numpy/newaxis.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/nextafter.hpp000066400000000000000000000007411416264035500231540ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NEXTAFTER_HPP #define PYTHONIC_NUMPY_NEXTAFTER_HPP #include "pythonic/include/numpy/nextafter.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME nextafter #define NUMPY_NARY_FUNC_SYM std::nextafter #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/nextafter/000077500000000000000000000000001416264035500224415ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/nextafter/accumulate.hpp000066400000000000000000000002651416264035500253000ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NEXTAFTER_ACCUMULATE_HPP #define PYTHONIC_NUMPY_NEXTAFTER_ACCUMULATE_HPP #define UFUNC_NAME nextafter #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/nonzero.hpp000066400000000000000000000044001416264035500226420ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NONZERO_HPP #define PYTHONIC_NUMPY_NONZERO_HPP #include "pythonic/include/numpy/nonzero.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace { template void _nonzero(I begin, I end, O &out, types::array &curr, utils::int_<1>) { I start = begin; for (; begin != end; ++begin) { curr[M - 1] = begin - start; if (*begin) for (size_t i = 0; i < M; ++i) { *(out[i]) = curr[i]; ++out[i]; } } } template void _nonzero(I begin, I end, O &out, types::array &curr, utils::int_) { I start = begin; for (; begin != end; ++begin) { curr[M - N] = begin - start; _nonzero((*begin).begin(), (*begin).end(), out, curr, utils::int_()); } } } template types::array>, sizeof...(Is)> init_buffers(long sz, utils::index_sequence) { auto fwd = [](long ret, long) { return ret; }; // just to avoid a warning return {{fwd(sz, Is)...}}; // too much memory used } template auto nonzero(E const &expr) -> types::array>, E::value> { constexpr long N = E::value; typedef types::array>, E::value> out_type; long sz = expr.flat_size(); types::array>, N> out_buffers = init_buffers(sz, utils::make_index_sequence()); types::array out_iters; for (size_t i = 0; i < N; ++i) out_iters[i] = out_buffers[i]->data; types::array indices; _nonzero(expr.begin(), expr.end(), out_iters, indices, utils::int_()); types::array shape = { {(long)(out_iters[0] - out_buffers[0]->data)}}; out_type out; for (size_t i = 0; i < N; ++i) out[i] = types::ndarray>( std::move(out_buffers[i]), shape); return out; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/not_equal.hpp000066400000000000000000000010151416264035500231360ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NOTEQUAL_HPP #define PYTHONIC_NUMPY_NOTEQUAL_HPP #include "pythonic/include/numpy/not_equal.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/operator_/ne.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME not_equal #define NUMPY_NARY_FUNC_SYM pythonic::operator_::ne #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/not_equal/000077500000000000000000000000001416264035500224305ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/not_equal/accumulate.hpp000066400000000000000000000002651416264035500252670ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NOT_EQUAL_ACCUMULATE_HPP #define PYTHONIC_NUMPY_NOT_EQUAL_ACCUMULATE_HPP #define UFUNC_NAME not_equal #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ones.hpp000066400000000000000000000016661416264035500221270ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ONES_HPP #define PYTHONIC_NUMPY_ONES_HPP #include "pythonic/include/numpy/ones.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> ones(pS const &shape, dtype d) { return {(sutils::shape_t)shape, typename dtype::type(1)}; } template types::ndarray> ones(long size, dtype d) { return ones(types::pshape(size), d); } template types::ndarray>> ones(std::integral_constant, dtype d) { return ones(types::pshape>({}), d); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ones_like.hpp000066400000000000000000000013001416264035500231140ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ONESLIKE_HPP #define PYTHONIC_NUMPY_ONESLIKE_HPP #include "pythonic/include/numpy/ones_like.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/numpy/ones.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto ones_like(E const &expr, dtype d) -> decltype(ones(sutils::getshape(expr), d)) { return ones(sutils::getshape(expr), d); } template auto ones_like(E const &expr, types::none_type) -> decltype(ones(sutils::getshape(expr), types::dtype_t())) { return ones(sutils::getshape(expr), types::dtype_t()); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/outer.hpp000066400000000000000000000031221416264035500223060ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_OUTER_HPP #define PYTHONIC_NUMPY_OUTER_HPP #include "pythonic/include/numpy/outer.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray() + std::declval()), types::pshape> outer(types::ndarray const &a, types::ndarray const &b) { types::ndarray() + std::declval()), types::pshape> out(types::pshape{a.flat_size(), b.flat_size()}, builtins::None); auto iter = out.fbegin(); for (auto iter_a = a.fbegin(), end_a = a.fend(); iter_a != end_a; ++iter_a) { auto val_a = *iter_a; iter = std::transform(b.fbegin(), b.fend(), iter, [=](T1 val) { return val_a * val; }); } return out; } template auto outer(types::ndarray const &a, E1 const &b) -> decltype(outer(a, asarray(b))) { return outer(a, asarray(b)); } template auto outer(E0 const &a, types::ndarray const &b) -> decltype(outer(asarray(a), b)) { return outer(asarray(a), b); } template auto outer(E0 const &a, E1 const &b) -> decltype(outer(asarray(a), asarray(b))) { return outer(asarray(a), asarray(b)); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/partial_sum.hpp000066400000000000000000000066771416264035500235120ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_PARTIAL_SUM_HPP #define PYTHONIC_NUMPY_PARTIAL_SUM_HPP #include "pythonic/include/numpy/partial_sum.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/ValueError.hpp" PYTHONIC_NS_BEGIN namespace numpy { /** * The cast is perform to be numpy compliant * * a = numpy.array([1, 256]) * In [10]: numpy.mod.accumulate(a, dtype=numpy.uint32) * Out[10]: array([1, 1], dtype=uint32) * In [11]: numpy.mod.accumulate(a, dtype=numpy.uint8) * Out[11]: array([1, 0], dtype=uint8) */ namespace { template struct _partial_sum { template A operator()(E const &e, F &o) { auto it_begin = e.begin(); A acc = _partial_sum{}((*it_begin), o); ++it_begin; for (; it_begin < e.end(); ++it_begin) acc = _partial_sum{}(*it_begin, o, acc); return acc; } template A operator()(E const &e, F &o, A acc) { for (auto const &value : e) acc = _partial_sum{}(value, o, acc); return acc; } }; template struct _partial_sum { template A operator()(E const &e, F &o) { auto it_begin = e.begin(); A acc = *it_begin; *o = acc; ++it_begin, ++o; for (; it_begin < e.end(); ++it_begin, ++o) { acc = Op{}(acc, (A)*it_begin); *o = acc; } return acc; } template A operator()(E e, F &o, A acc) { for (auto const &value : e) { acc = Op{}(acc, (A)value); *o = acc; ++o; } return acc; } }; } template types::ndarray> partial_sum(E const &expr, dtype d) { const long count = expr.flat_size(); types::ndarray> the_partial_sum{ types::make_tuple(count), builtins::None}; auto begin_it = the_partial_sum.begin(); _partial_sum{}(expr, begin_it); return the_partial_sum; } template auto partial_sum(E const &expr, long axis, dtype d) -> typename std::enable_if(expr))>::type { if (axis != 0) throw types::ValueError("axis out of bounds"); return partial_sum(expr); } template typename std::enable_if>::type partial_sum(E const &expr, long axis, dtype d) { if (axis < 0 || size_t(axis) >= E::value) throw types::ValueError("axis out of bounds"); auto shape = sutils::getshape(expr); partial_sum_type the_partial_sum{shape, builtins::None}; if (axis == 0) { auto it_begin = the_partial_sum.begin(); _partial_sum>{}(expr, it_begin); } else { std::transform( expr.begin(), expr.end(), the_partial_sum.begin(), [axis, d]( typename std::iterator_traits::value_type other) { return partial_sum(other, axis - 1, d); }); } return the_partial_sum; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/pi.hpp000066400000000000000000000001561416264035500215640ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_PI_HPP #define PYTHONIC_NUMPY_PI_HPP #include "pythonic/include/numpy/pi.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/place.hpp000066400000000000000000000025011416264035500222340ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_PLACE_HPP #define PYTHONIC_NUMPY_PLACE_HPP #include "pythonic/include/numpy/place.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::none_type place(types::ndarray &expr, types::ndarray const &mask, F const &values) { auto first = expr.fend(); auto viter = values.begin(), vend = values.end(); auto miter = mask.fbegin(); for (auto iter = expr.fbegin(), end = expr.fend(); iter != end; ++iter, ++miter) { if (*miter) { if (first == expr.fend()) first = iter; if (viter == vend) viter = values.begin(); *iter = *viter; ++viter; } } return builtins::None; } template types::none_type place(types::ndarray &expr, M const &mask, F const &values) { return place(expr, asarray(mask), values); } template types::none_type place(E &, M const &, F const &) { throw std::runtime_error("place only partially implemented"); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/power.hpp000066400000000000000000000007451416264035500223140ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_POWER_HPP #define PYTHONIC_NUMPY_POWER_HPP #include "pythonic/include/numpy/power.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME power #define NUMPY_NARY_FUNC_SYM xsimd::pow // no need to adapt_type here, as it may turn a**2 into a**2.f #define NUMPY_NARY_RESHAPE_MODE reshape_type #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/power/000077500000000000000000000000001416264035500215755ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/power/accumulate.hpp000066400000000000000000000002511416264035500244270ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_POWER_ACCUMULATE_HPP #define PYTHONIC_NUMPY_POWER_ACCUMULATE_HPP #define UFUNC_NAME power #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/prod.hpp000066400000000000000000000007731416264035500221250ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_PROD_HPP #define PYTHONIC_NUMPY_PROD_HPP #include "pythonic/include/numpy/prod.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/numpy/reduce.hpp" #include "pythonic/operator_/imul.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto prod(Args &&... args) -> decltype(reduce(std::forward(args)...)) { return reduce(std::forward(args)...); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/product.hpp000066400000000000000000000002401416264035500226260ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_PRODUCT_HPP #define PYTHONIC_NUMPY_PRODUCT_HPP #include "pythonic/include/numpy/product.hpp" #include "pythonic/numpy/prod.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ptp.hpp000066400000000000000000000010201416264035500217460ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_PTP_HPP #define PYTHONIC_NUMPY_PTP_HPP #include "pythonic/include/numpy/ptp.hpp" #include "pythonic/numpy/min.hpp" #include "pythonic/numpy/max.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto ptp(E const &expr, long axis) -> decltype(max(expr, axis) - min(expr, axis)) { return max(expr, axis) - min(expr, axis); } template auto ptp(E const &expr) -> decltype(max(expr) - min(expr)) { return max(expr) - min(expr); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/put.hpp000066400000000000000000000025461416264035500217710ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_PUT_HPP #define PYTHONIC_NUMPY_PUT_HPP #include "pythonic/include/numpy/put.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/numpy/asarray.hpp" #include "pythonic/utils/numpy_conversion.hpp" #include "pythonic/builtins/ValueError.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if::value, types::none_type>::type put(types::ndarray &expr, F const &ind, E const &v) { auto vind = asarray(ind); auto vv = asarray(v); for (long i = 0; i < ind.flat_size(); ++i) { auto val = *(vind.fbegin() + i); if (val >= expr.flat_size() || val < 0) throw types::ValueError("indice out of bound"); *(expr.fbegin() + val) = *(vv.fbegin() + i % vv.flat_size()); } return builtins::None; } template types::none_type put(types::ndarray &expr, long ind, T const &v) { if (ind >= expr.flat_size() || ind < 0) throw types::ValueError("indice out of bound"); *(expr.fbegin() + ind) = v; return builtins::None; } template types::none_type put(E &, M const &, V const &) { throw std::runtime_error("put only partially implemented"); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/putmask.hpp000066400000000000000000000017121416264035500226370ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_PUTMASK_HPP #define PYTHONIC_NUMPY_PUTMASK_HPP #include "pythonic/include/numpy/putmask.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/numpy/asarray.hpp" #include "pythonic/builtins/None.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::none_type putmask(types::ndarray &expr, E const &mask, F const &values) { auto amask = asarray(mask); auto avalues = asarray(values); auto iexpr = expr.fbegin(); auto n = avalues.flat_size(); for (long i = 0; i < expr.flat_size(); ++i) if (*(amask.fbegin() + i)) *(iexpr + i) = *(avalues.fbegin() + i % n); return builtins::None; } template types::none_type putmask(E &, M const &, F const &) { throw std::runtime_error("putmask only partially implemented"); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/rad2deg.hpp000066400000000000000000000007161416264035500224660ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RAD2DEG_HPP #define PYTHONIC_NUMPY_RAD2DEG_HPP #include "pythonic/include/numpy/rad2deg.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/numpy/pi.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME rad2deg #define NUMPY_NARY_FUNC_SYM wrapper::rad2deg #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/radians.hpp000066400000000000000000000002431416264035500225720ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RADIANS_HPP #define PYTHONIC_NUMPY_RADIANS_HPP #include "pythonic/include/numpy/radians.hpp" #include "pythonic/numpy/deg2rad.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/000077500000000000000000000000001416264035500217215ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/random/binomial.hpp000066400000000000000000000027201416264035500242250ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_BINOMIAL_HPP #define PYTHONIC_NUMPY_RANDOM_BINOMIAL_HPP #include "pythonic/include/numpy/random/binomial.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_expr.hpp" #include "pythonic/types/exceptions.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { namespace details { inline void parameters_check(double n, double p) { if (n < 0) throw pythonic::types::ValueError("n < 0"); if (p < 0) throw pythonic::types::ValueError("p < 0"); else if (p > 1) throw pythonic::types::ValueError("p > 1"); } } template types::ndarray binomial(double n, double p, pS const &shape) { details::parameters_check(n, p); types::ndarray result{shape, types::none_type()}; std::binomial_distribution distribution{(long)n, p}; std::generate(result.fbegin(), result.fend(), [&]() { return distribution(details::generator); }); return result; } auto binomial(double n, double p, long size) -> decltype(binomial(n, p, types::array{{size}})) { return binomial(n, p, types::array{{size}}); } long binomial(double n, double p, types::none_type d) { details::parameters_check(n, p); return std::binomial_distribution{(long)n, p}(details::generator); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/bytes.hpp000066400000000000000000000014221416264035500235570ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_BYTES_HPP #define PYTHONIC_NUMPY_RANDOM_BYTES_HPP #include "pythonic/include/numpy/random/bytes.hpp" #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/types/str.hpp" #include "pythonic/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { types::str bytes(long length) { // dummy init + rewrite is faster than reserve && push_back types::str result(std::string(length, 0)); std::uniform_int_distribution distribution{0, 255}; std::generate(result.chars().begin(), result.chars().end(), [&]() { return static_cast(distribution(details::generator)); }); return result; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/chisquare.hpp000066400000000000000000000022401416264035500244140ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_CHISQUARE_HPP #define PYTHONIC_NUMPY_RANDOM_CHISQUARE_HPP #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/include/numpy/random/chisquare.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray chisquare(double df, pS const &shape) { types::ndarray result{shape, types::none_type()}; std::chi_squared_distribution distribution{df}; std::generate(result.fbegin(), result.fend(), [&]() { return distribution(details::generator); }); return result; } auto chisquare(double df, long size) -> decltype(chisquare(df, types::array{{size}})) { return chisquare(df, types::array{{size}}); } double chisquare(double df, types::none_type d) { return std::chi_squared_distribution{df}(details::generator); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/choice.hpp000066400000000000000000000100531416264035500236630ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_CHOICE_HPP #define PYTHONIC_NUMPY_RANDOM_CHOICE_HPP #include "pythonic/include/numpy/random/choice.hpp" #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/builtins/NotImplementedError.hpp" #include "pythonic/numpy/random/randint.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { /*********************************************************** * Implementation with long as first argument **********************************************************/ template types::ndarray choice(long max, pS const &shape, bool replace, P const &p) { if (!replace) throw pythonic::builtins::NotImplementedError( "Choice without replacement is ! implemented, ask if you want " "it"); types::ndarray result{shape, types::none_type()}; std::discrete_distribution distribution{p.begin(), p.end()}; std::generate(result.fbegin(), result.fend(), [&]() { return distribution(details::generator); }); return result; } template types::ndarray> choice(long max, long size, bool replace, P &&p) { return choice(max, types::pshape{size}, replace, std::forward

(p)); } template auto choice(long max, T &&size) -> decltype(randint(0, max, std::forward(size))) { return randint(0, max, std::forward(size)); } long choice(long max) { return randint(max); } /*********************************************************** * Implementation with array as first argument **********************************************************/ template typename T::dtype choice(T const &a) { // This is a numpy constraint static_assert(T::value == 1, "ValueError: a must be 1-dimensional"); return a.fast(randint(a.size())); } template types::ndarray choice(T const &a, pS const &shape) { // This is a numpy constraint static_assert(T::value == 1, "ValueError: a must be 1-dimensional"); types::ndarray result{shape, types::none_type()}; std::uniform_int_distribution distribution{0, a.size() - 1}; std::generate(result.fbegin(), result.fend(), [&]() { return a[distribution(details::generator)]; }); return result; } template types::ndarray> choice(T &&a, long size) { return choice(std::forward(a), types::pshape{size}); } template types::ndarray choice(T const &a, pS const &shape, bool replace, P const &p) { // This is a numpy constraint static_assert(T::value == 1, "ValueError: a must be 1-dimensional"); if (!replace) throw pythonic::builtins::NotImplementedError( "Choice without replacement is ! implemented, ask if you want " "it"); types::ndarray result{shape, types::none_type()}; std::discrete_distribution distribution{p.begin(), p.end()}; std::generate(result.fbegin(), result.fend(), [&]() { return a[distribution(details::generator)]; }); return result; } template types::ndarray> choice(T &&a, long size, bool replace, P &&p) { return choice(std::forward(a), types::pshape{size}, replace, std::forward

(p)); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/dirichlet.hpp000066400000000000000000000022611416264035500244020ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_DIRICHLET_HPP #define PYTHONIC_NUMPY_RANDOM_DIRICHLET_HPP #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/include/numpy/random/dirichlet.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray dirichlet(double alpha, pS const &shape) { types::ndarray result{shape, types::none_type()}; std::dirichlet_distribution distribution{alpha}; std::generate(result.fbegin(), result.fend(), [&]() { return distribution(details::generator); }); return result; } auto dirichlet(double alpha, long size) -> decltype(dirichlet(alpha, types::array{{size}})) { return dirichlet(alpha, types::array{{size}}); } double dirichlet(double alpha, types::none_type d) { return std::dirichlet_distribution{alpha}(details::generator); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/exponential.hpp000066400000000000000000000024001416264035500247540ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_EXPONENTIAL_HPP #define PYTHONIC_NUMPY_RANDOM_EXPONENTIAL_HPP #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/include/numpy/random/exponential.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray exponential(double scale, pS const &shape) { types::ndarray result{shape, types::none_type()}; std::exponential_distribution distribution{1 / scale}; std::generate(result.fbegin(), result.fend(), [&]() { return distribution(details::generator); }); return result; } auto exponential(double scale, long size) -> decltype(exponential(scale, types::array{{size}})) { return exponential(scale, types::array{{size}}); } double exponential(double scale, types::none_type d) { return std::exponential_distribution{1 / scale}(details::generator); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/f.hpp000066400000000000000000000026571416264035500226710ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_F_HPP #define PYTHONIC_NUMPY_RANDOM_F_HPP #include "pythonic/include/numpy/random/f.hpp" #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray f(double dfnum, double dfden, pS const &shape) { types::ndarray result{shape, types::none_type()}; std::chi_squared_distribution distribution{dfnum}; std::chi_squared_distribution distribution2{dfden}; std::generate(result.fbegin(), result.fend(), [&]() { return (distribution(details::generator) * dfden) / (distribution2(details::generator) * dfnum); }); return result; } auto f(double dfnum, double dfden, long size) -> decltype(f(dfnum, dfden, types::array{{size}})) { return f(dfnum, dfden, types::array{{size}}); } double f(double dfnum, double dfden, types::none_type d) { return (std::chi_squared_distribution{dfnum}(details::generator) * dfden) / (std::chi_squared_distribution{dfden}(details::generator) * dfnum); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/gamma.hpp000066400000000000000000000023771416264035500235250ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_GAMMA_HPP #define PYTHONIC_NUMPY_RANDOM_GAMMA_HPP #include "pythonic/include/numpy/random/gamma.hpp" #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray gamma(double shape, double scale, pS const &array_shape) { types::ndarray result{array_shape, types::none_type()}; std::gamma_distribution distribution{shape, scale}; std::generate(result.fbegin(), result.fend(), [&]() { return distribution(details::generator); }); return result; } auto gamma(double shape, double scale, long size) -> decltype(gamma(shape, scale, types::array{{size}})) { return gamma(shape, scale, types::array{{size}}); } double gamma(double shape, double scale, types::none_type d) { return std::gamma_distribution{shape, scale}(details::generator); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/geometric.hpp000066400000000000000000000022161416264035500244110ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_GEOMETRIC_HPP #define PYTHONIC_NUMPY_RANDOM_GEOMETRIC_HPP #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/include/numpy/random/geometric.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray geometric(double p, pS const &shape) { types::ndarray result{shape, types::none_type()}; std::geometric_distribution distribution{p}; std::generate(result.fbegin(), result.fend(), [&]() { return distribution(details::generator); }); return result; } auto geometric(double p, long size) -> decltype(geometric(p, types::array{{size}})) { return geometric(p, types::array{{size}}); } double geometric(double p, types::none_type d) { return std::geometric_distribution{p}(details::generator); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/gumbel.hpp000066400000000000000000000024021416264035500237030ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_GUMBEL_HPP #define PYTHONIC_NUMPY_RANDOM_GUMBEL_HPP #include "pythonic/include/numpy/random/gumbel.hpp" #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray gumbel(double loc, double scale, pS const &shape) { types::ndarray result{shape, types::none_type()}; std::generate(result.fbegin(), result.fend(), [&]() { return gumbel(loc, scale); }); return result; } auto gumbel(double loc, double scale, long size) -> decltype(gumbel(loc, scale, types::array{{size}})) { return gumbel(loc, scale, types::array{{size}}); } double gumbel(double loc, double scale, types::none_type d) { double U = std::uniform_real_distribution{0., 1.}(details::generator); if (U < 1.0) { return loc - scale * xsimd::log(-xsimd::log(U)); } return gumbel(loc, scale); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/laplace.hpp000066400000000000000000000026251416264035500240400ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_LAPLACE_HPP #define PYTHONIC_NUMPY_RANDOM_LAPLACE_HPP #include "pythonic/include/numpy/random/laplace.hpp" #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray laplace(double loc, double scale, pS const &shape) { types::ndarray result{shape, types::none_type()}; std::generate(result.fbegin(), result.fend(), [&]() { return laplace(loc, scale); }); return result; } auto laplace(double loc, double scale, long size) -> decltype(laplace(loc, scale, types::array{{size}})) { return laplace(loc, scale, types::array{{size}}); } double laplace(double loc, double scale, types::none_type d) { double U = std::uniform_real_distribution{0., 1.}(details::generator); if (U >= 0.5) { U = loc - scale * xsimd::log(2.0 - U - U); } else if (U > 0.0) { U = loc + scale * xsimd::log(U + U); } else { U = laplace(loc, scale); } return U; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/logistic.hpp000066400000000000000000000024731416264035500242550ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_LOGISTIC_HPP #define PYTHONIC_NUMPY_RANDOM_LOGISTIC_HPP #include "pythonic/include/numpy/random/logistic.hpp" #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray logistic(double loc, double scale, pS const &shape) { types::ndarray result{shape, types::none_type()}; std::generate(result.fbegin(), result.fend(), [&]() { return logistic(loc, scale); }); return result; } auto logistic(double loc, double scale, long size) -> decltype(logistic(loc, scale, types::array{{size}})) { return logistic(loc, scale, types::array{{size}}); } double logistic(double loc, double scale, types::none_type d) { double U = std::uniform_real_distribution{0., 1.}(details::generator); if (U > 0.0) { return loc + scale * xsimd::log(U / (1 - U)); } return logistic(loc, scale); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/lognormal.hpp000066400000000000000000000025111416264035500244230ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_LOGNORMAL_HPP #define PYTHONIC_NUMPY_RANDOM_LOGNORMAL_HPP #include "pythonic/include/numpy/random/lognormal.hpp" #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray lognormal(double mean, double sigma, pS const &shape) { types::ndarray result{shape, types::none_type()}; std::lognormal_distribution distribution{mean, sigma}; std::generate(result.fbegin(), result.fend(), [&]() { return distribution(details::generator); }); return result; } auto lognormal(double mean, double sigma, long size) -> decltype(lognormal(mean, sigma, types::array{{size}})) { return lognormal(mean, sigma, types::array{{size}}); } double lognormal(double mean, double sigma, types::none_type d) { return std::lognormal_distribution{mean, sigma}(details::generator); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/logseries.hpp000066400000000000000000000031711416264035500244300ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_LOGSERIES_HPP #define PYTHONIC_NUMPY_RANDOM_LOGSERIES_HPP #include "pythonic/include/numpy/random/logseries.hpp" #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray logseries(double p, pS const &shape) { types::ndarray result{shape, types::none_type()}; std::generate(result.fbegin(), result.fend(), [&]() { return logseries(p); }); return result; } auto logseries(double p, long size) -> decltype(logseries(p, types::array{{size}})) { return logseries(p, types::array{{size}}); } double logseries(double p, types::none_type d) { double q, r, U, V; double result; r = log1p(-p); while (1) { V = std::uniform_real_distribution{0., 1.}(details::generator); if (V >= p) { return 1; } U = std::uniform_real_distribution{0., 1.}(details::generator); q = -expm1(r * U); if (V <= q * q) { result = (double)floor(1 + log(V) / log(q)); if ((result < 1) || (V == 0.0)) { continue; } else { return result; } } if (V >= q) { return 1; } return 2; } } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/negative_binomial.hpp000066400000000000000000000027641416264035500261170ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_NEGATIVE_BINOMIAL_HPP #define PYTHONIC_NUMPY_RANDOM_NEGATIVE_BINOMIAL_HPP #include "pythonic/include/numpy/random/negative_binomial.hpp" #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray negative_binomial(double n, double p, pS const &shape) { types::ndarray result{shape, types::none_type()}; std::gamma_distribution distribution_gamma{n, (1 - p) / p}; std::generate(result.fbegin(), result.fend(), [&]() { return std::poisson_distribution{ (distribution_gamma(details::generator))}(details::generator); }); return result; } auto negative_binomial(double n, double p, long size) -> decltype(negative_binomial(n, p, types::array{{size}})) { return negative_binomial(n, p, types::array{{size}}); } double negative_binomial(double n, double p, types::none_type d) { std::gamma_distribution distribution_gamma{n, (1 - p) / p}; return std::poisson_distribution{ (distribution_gamma(details::generator))}(details::generator); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/normal.hpp000066400000000000000000000023121416264035500237200ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_NORMAL_HPP #define PYTHONIC_NUMPY_RANDOM_NORMAL_HPP #include "pythonic/include/numpy/random/normal.hpp" #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray normal(double loc, double scale, pS const &shape) { types::ndarray result{shape, types::none_type()}; std::normal_distribution distribution{loc, scale}; std::generate(result.fbegin(), result.fend(), [&]() { return distribution(details::generator); }); return result; } auto normal(double loc, double scale, long size) -> decltype(normal(loc, scale, types::array{{size}})) { return normal(loc, scale, types::array{{size}}); } double normal(double loc, double scale, types::none_type d) { return std::normal_distribution{loc, scale}(details::generator); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/pareto.hpp000066400000000000000000000022421416264035500237240ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_PARETO_HPP #define PYTHONIC_NUMPY_RANDOM_PARETO_HPP #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/include/numpy/random/pareto.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray pareto(double a, pS const &shape) { types::ndarray result{shape, types::none_type()}; std::exponential_distribution distribution{}; std::generate(result.fbegin(), result.fend(), [&]() { return expm1(distribution(details::generator) / a); }); return result; } auto pareto(double a, long size) -> decltype(pareto(a, types::array{{size}})) { return pareto(a, types::array{{size}}); } double pareto(double a, types::none_type d) { return expm1(std::exponential_distribution{}(details::generator) / a); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/poisson.hpp000066400000000000000000000022121416264035500241210ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_POISSON_HPP #define PYTHONIC_NUMPY_RANDOM_POISSON_HPP #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/include/numpy/random/poisson.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray poisson(double lam, pS const &shape) { types::ndarray result{shape, types::none_type()}; std::poisson_distribution distribution{lam}; std::generate(result.fbegin(), result.fend(), [&]() { return distribution(details::generator); }); return result; } auto poisson(double lam, long size) -> decltype(poisson(lam, types::array{{size}})) { return poisson(lam, types::array{{size}}); } double poisson(double lam, types::none_type d) { return std::poisson_distribution{lam}(details::generator); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/power.hpp000066400000000000000000000021141416264035500235640ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_POWER_HPP #define PYTHONIC_NUMPY_RANDOM_POWER_HPP #include "pythonic/include/numpy/random/power.hpp" #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray power(double a, pS const &shape) { types::ndarray result{shape, types::none_type()}; std::generate(result.fbegin(), result.fend(), [&]() { return power(a); }); return result; } auto power(double a, long size) -> decltype(power(a, types::array{{size}})) { return power(a, types::array{{size}}); } double power(double a, types::none_type d) { return pow( -expm1(-std::exponential_distribution{}(details::generator)), 1. / a); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/rand.hpp000066400000000000000000000011461416264035500233600ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_RAND_HPP #define PYTHONIC_NUMPY_RANDOM_RAND_HPP #include "pythonic/include/numpy/random/rand.hpp" #include "pythonic/numpy/random/random.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray> rand(T... shape) { return random(types::array{{shape...}}); } double rand() { return random(); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/randint.hpp000066400000000000000000000036071416264035500240770ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_RANDINT_HPP #define PYTHONIC_NUMPY_RANDOM_RANDINT_HPP #include "pythonic/include/numpy/random/randint.hpp" #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template typename std::enable_if::value, types::ndarray>::type randint(long min, long max, pS const &shape) { types::ndarray result{shape, types::none_type()}; std::uniform_int_distribution distribution{min, max - 1}; std::generate(result.fbegin(), result.fend(), [&]() { return distribution(details::generator); }); return result; } template typename std::enable_if::value, types::ndarray>>::type randint(long min, long max, pS const &shape) { return randint(min, max, types::pshape{shape}); } template auto randint(long max, types::none_type, pS const &shape) -> decltype(randint(0, max, shape)) { return randint(0, max, shape); } auto randint(long min, long max, long size) -> decltype(randint(min, max, types::array{{size}})) { return randint(min, max, types::array{{size}}); } long randint(long max, types::none_type) { return std::uniform_int_distribution{0, max - 1}(details::generator); } long randint(long min, long max) { return std::uniform_int_distribution{min, max - 1}(details::generator); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/randn.hpp000066400000000000000000000012061416264035500235330ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_RANDN_HPP #define PYTHONIC_NUMPY_RANDOM_RANDN_HPP #include "pythonic/include/numpy/random/randn.hpp" #include "pythonic/numpy/random/standard_normal.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray> randn(T... shape) { return standard_normal(types::array{{shape...}}); } double randn() { return standard_normal(); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/random.hpp000066400000000000000000000021401416264035500237070ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_RANDOM_HPP #define PYTHONIC_NUMPY_RANDOM_RANDOM_HPP #include "pythonic/include/numpy/random/random.hpp" #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray random(pS const &shape) { types::ndarray result{shape, types::none_type()}; std::uniform_real_distribution distribution{0., 1.}; std::generate(result.fbegin(), result.fend(), [&]() { return distribution(details::generator); }); return result; } auto random(long size) -> decltype(random(types::array{{size}})) { return random(types::array{{size}}); } double random(types::none_type d) { return std::uniform_real_distribution{0., 1.}(details::generator); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/random_integers.hpp000066400000000000000000000013231416264035500256110ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_RANDOM_INTEGERS_HPP #define PYTHONIC_NUMPY_RANDOM_RANDOM_INTEGERS_HPP #include "pythonic/include/numpy/random/random_integers.hpp" #include "pythonic/numpy/random/randint.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template auto random_integers(long min, long max, T &&size) -> decltype(randint(min, max, std::forward(size))) { return randint(min, max + 1, std::forward(size)); } long random_integers(long max) { return randint(1, max + 1); } long random_integers(long min, long max) { return randint(min, max + 1); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/random_sample.hpp000066400000000000000000000003201416264035500252460ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_RANDOM_SAMPLE_HPP #define PYTHONIC_NUMPY_RANDOM_RANDOM_SAMPLE_HPP #include "pythonic/include/numpy/random/random_sample.hpp" #include "pythonic/numpy/random/random.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/ranf.hpp000066400000000000000000000002651416264035500233630ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_RANF_HPP #define PYTHONIC_NUMPY_RANDOM_RANF_HPP #include "pythonic/include/numpy/random/ranf.hpp" #include "pythonic/numpy/random/random.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/rayleigh.hpp000066400000000000000000000023171416264035500242410ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_RAYLEIGH_HPP #define PYTHONIC_NUMPY_RANDOM_RAYLEIGH_HPP #include "pythonic/include/numpy/random/rayleigh.hpp" #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray rayleigh(double scale, pS const &array_shape) { types::ndarray result{array_shape, types::none_type()}; std::generate(result.fbegin(), result.fend(), [&]() { return rayleigh(scale); }); return result; } auto rayleigh(double scale, long size) -> decltype(rayleigh(scale, types::array{{size}})) { return rayleigh(scale, types::array{{size}}); } double rayleigh(double scale, types::none_type d) { return scale * sqrt(-2.0 * log(1.0 - std::uniform_real_distribution{ 0., 1.}(details::generator))); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/sample.hpp000066400000000000000000000002731416264035500237150ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_SAMPLE_HPP #define PYTHONIC_NUMPY_RANDOM_SAMPLE_HPP #include "pythonic/include/numpy/random/sample.hpp" #include "pythonic/numpy/random/random.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/seed.hpp000066400000000000000000000007501416264035500233540ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_SEED_HPP #define PYTHONIC_NUMPY_RANDOM_SEED_HPP #include "pythonic/include/numpy/random/seed.hpp" #include "pythonic/builtins/None.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { types::none_type seed(long s) { details::generator.seed(s); return builtins::None; } types::none_type seed(types::none_type) { details::generator.seed(); return builtins::None; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/shuffle.hpp000066400000000000000000000007301416264035500240660ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_SHUFFLE_HPP #define PYTHONIC_NUMPY_RANDOM_SHUFFLE_HPP #include "pythonic/include/numpy/random/shuffle.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/None.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::none_type shuffle(T &seq) { std::shuffle(seq.begin(), seq.end(), details::generator); return builtins::None; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/standard_exponential.hpp000066400000000000000000000017451416264035500266470ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_STANDARD_EXPONENTIAL_HPP #define PYTHONIC_NUMPY_RANDOM_STANDARD_EXPONENTIAL_HPP #include "pythonic/include/numpy/random/standard_exponential.hpp" #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/numpy/random/exponential.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray standard_exponential(pS const &shape) { return exponential(1., shape); } auto standard_exponential(long size) -> decltype(standard_exponential(types::array{{size}})) { return standard_exponential(types::array{{size}}); } double standard_exponential(types::none_type d) { return exponential(1., d); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/standard_gamma.hpp000066400000000000000000000017151416264035500254000ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_STANDARD_GAMMA_HPP #define PYTHONIC_NUMPY_RANDOM_STANDARD_GAMMA_HPP #include "pythonic/include/numpy/random/standard_gamma.hpp" #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/numpy/random/gamma.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray standard_gamma(double s, pS const &shape) { return gamma(s, 1., shape); } auto standard_gamma(double s, long size) -> decltype(standard_gamma(s, types::array{{size}})) { return standard_gamma(s, types::array{{size}}); } double standard_gamma(double s, types::none_type d) { return gamma(s, 1., d); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/standard_normal.hpp000066400000000000000000000016661416264035500256130ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_STANDARD_NORMAL_HPP #define PYTHONIC_NUMPY_RANDOM_STANDARD_NORMAL_HPP #include "pythonic/include/numpy/random/standard_normal.hpp" #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/numpy/random/normal.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray standard_normal(pS const &shape) { return normal(0., 1., shape); } auto standard_normal(long size) -> decltype(standard_normal(types::array{{size}})) { return standard_normal(types::array{{size}}); } double standard_normal(types::none_type d) { return normal(0., 1., d); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/uniform.hpp000066400000000000000000000024451416264035500241160ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_RAYLEIGH_HPP #define PYTHONIC_NUMPY_RANDOM_RAYLEIGH_HPP #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/include/numpy/random/uniform.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray uniform(double low, double high, pS const &array_shape) { types::ndarray result{array_shape, types::none_type()}; std::generate(result.fbegin(), result.fend(), [&]() { return uniform(low, high); }); return result; } auto uniform(double low, double high, long size) -> decltype(uniform(low, high, types::array{{size}})) { return uniform(low, high, types::array{{size}}); } double uniform(double low, double high, types::none_type d) { return std::uniform_real_distribution{low, high}(details::generator); } } // namespace random } // namespace numpy PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/random/weibull.hpp000066400000000000000000000022011416264035500240700ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RANDOM_WEIBULL_HPP #define PYTHONIC_NUMPY_RANDOM_WEIBULL_HPP #include "pythonic/include/numpy/random/generator.hpp" #include "pythonic/include/numpy/random/weibull.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/functor.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace random { template types::ndarray weibull(double a, pS const &shape) { types::ndarray result{shape, types::none_type()}; std::weibull_distribution distribution{a}; std::generate(result.fbegin(), result.fend(), [&]() { return distribution(details::generator); }); return result; } auto weibull(double a, long size) -> decltype(weibull(a, types::array{{size}})) { return weibull(a, types::array{{size}}); } double weibull(double a, types::none_type d) { return std::weibull_distribution{a}(details::generator); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ravel.hpp000066400000000000000000000007571416264035500222740ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RAVEL_HPP #define PYTHONIC_NUMPY_RAVEL_HPP #include "pythonic/include/numpy/ravel.hpp" #include "pythonic/numpy/ndarray/reshape.hpp" #include "pythonic/utils/numpy_conversion.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> ravel(types::ndarray const &expr) { return expr.reshape(types::pshape{expr.flat_size()}); } NUMPY_EXPR_TO_NDARRAY0_IMPL(ravel); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/real.hpp000066400000000000000000000013011416264035500220700ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_REAL_HPP #define PYTHONIC_NUMPY_REAL_HPP #include "pythonic/include/numpy/real.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/numpy/asarray.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/list.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto real(E &&expr) -> decltype(builtins::getattr(types::attr::REAL{}, std::forward(expr))) { return builtins::getattr(types::attr::REAL{}, std::forward(expr)); } template auto real(types::list const &expr) -> decltype(real(numpy::functor::asarray{}(expr))) { return real(numpy::functor::asarray{}(expr)); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/reciprocal.hpp000066400000000000000000000006741416264035500233040ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RECIPROCAL_HPP #define PYTHONIC_NUMPY_RECIPROCAL_HPP #include "pythonic/include/numpy/reciprocal.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME reciprocal #define NUMPY_NARY_FUNC_SYM wrapper::reciprocal #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/reduce.hpp000066400000000000000000000243621416264035500224300ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_REDUCE_HPP #define PYTHONIC_NUMPY_REDUCE_HPP #include "pythonic/include/numpy/reduce.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/builtins/ValueError.hpp" #include "pythonic/utils/neutral.hpp" #ifdef USE_XSIMD #include #endif #include PYTHONIC_NS_BEGIN namespace numpy { template struct _reduce { template F operator()(E &&e, F acc) { for (auto &&value : std::forward(e)) acc = _reduce{}( std::forward(value), acc); return acc; } }; template struct _reduce { template F operator()(E &&e, F acc) { for (auto value : std::forward(e)) { Op{}(acc, value); } return acc; } }; template struct _reduce { template F operator()(E &&e, F acc, Indices... indices) { for (long i = 0, n = e.template shape::type::value - N>(); i < n; ++i) { acc = _reduce{}( e, acc, indices..., i); } return acc; } }; template struct _reduce { template F operator()(E &&e, F acc, Indices... indices) { for (long i = 0, n = e.template shape::type::value - 1>(); i < n; ++i) { Op{}(acc, e.load(indices..., i)); } return acc; } }; #ifdef USE_XSIMD template F vreduce(E e, F acc) { using T = typename E::dtype; using vT = xsimd::simd_type; static const size_t vN = vT::size; const long n = e.size(); auto viter = vectorizer::vbegin(e), vend = vectorizer::vend(e); const long bound = std::distance(viter, vend); if (bound > 0) { auto vacc = *viter; for (++viter; viter != vend; ++viter) Op{}(vacc, *viter); alignas(sizeof(vT)) T stored[vN]; vacc.store_aligned(&stored[0]); for (size_t j = 0; j < vN; ++j) Op{}(acc, stored[j]); } auto iter = e.begin() + bound * vN; for (long i = bound * vN; i < n; ++i, ++iter) { Op{}(acc, *iter); } return acc; } template struct _reduce { template F operator()(E &&e, F acc) { return vreduce(std::forward(e), acc); } }; template struct _reduce { template F operator()(E &&e, F acc) { return vreduce(std::forward(e), acc); } }; #else template struct _reduce : _reduce { }; template struct _reduce : _reduce { }; #endif template struct reduce_helper; template struct reduce_helper { template reduce_result_type operator()(E const &expr, T p) const { if (utils::no_broadcast_ex(expr)) return _reduce{}(expr, p); else return _reduce{}(expr, p); } }; template struct reduce_helper { template reduce_result_type operator()(E const &expr, T p) const { if (utils::no_broadcast_vectorize(expr)) return _reduce{}(expr, p); else return _reduce{}(expr, p); } }; template typename std::enable_if< std::is_scalar::value || types::is_complex::value, E>::type reduce(E const &expr, types::none_type) { return expr; } template typename std::enable_if< std::is_scalar::value || types::is_complex::value, E>::type reduce(E const &array, long axis) { if (axis != 0) throw types::ValueError("axis out of bounds"); return reduce(array); } template typename std::enable_if::value, reduce_result_type>::type reduce(E const &expr, types::none_type axis, dtype) { using rrt = reduce_result_type; bool constexpr is_vectorizable = E::is_vectorizable && !std::is_same::value && std::is_same::value; rrt p = utils::neutral::value; return reduce_helper{}(expr, p); } template typename std::enable_if>::type reduce(E const &array, long axis, dtype d, types::none_type) { if (axis != 0) throw types::ValueError("axis out of bounds"); return reduce(array, types::none_type{}, d); } template typename std::enable_if>::type reduce(E const &array, long axis, types::none_type, Out &&out) { if (axis != 0) throw types::ValueError("axis out of bounds"); return std::forward(out) = reduce(array); } template struct _reduce_axisb { template void operator()(E &&e, F &&f, long axis, EIndices &&e_indices, FIndices &&f_indices) { for (long i = 0, n = e.template shape::type::value - N>(); i < n; ++i) { _reduce_axisb{}( e, f, axis, std::tuple_cat(e_indices, std::make_tuple(i)), std::tuple_cat(f_indices, std::make_tuple(i))); } } }; template struct _reduce_axisb { template void helper(E &&e, F &&f, EIndices &&e_indices, FIndices &&f_indices, utils::index_sequence, utils::index_sequence) { f.template update(e.load(std::get(e_indices)...), (long)std::get(f_indices)...); } template void operator()(E &&e, F &&f, long axis, EIndices &&e_indices, FIndices &&f_indices) { helper( std::forward(e), std::forward(f), e_indices, f_indices, utils::make_index_sequence< std::tuple_size::type>::value>(), utils::make_index_sequence< std::tuple_size::type>::value>()); } }; template struct _reduce_axis { template void operator()(E &&e, F &&f, long axis, EIndices &&e_indices, FIndices &&f_indices) { if (axis == std::decay::type::value - N) { for (long i = 0, n = e.template shape::type::value - N>(); i < n; ++i) { _reduce_axisb{}( e, f, axis, std::tuple_cat(e_indices, std::make_tuple(i)), std::forward(f_indices)); } } else { for (long i = 0, n = e.template shape::type::value - N>(); i < n; ++i) { _reduce_axis{}( e, f, axis, std::tuple_cat(e_indices, std::make_tuple(i)), std::tuple_cat(f_indices, std::make_tuple(i))); } } } }; template struct _reduce_axis { template void operator()(E &&e, F &&f, long axis, EIndices &&e_indices, FIndices &&f_indices) { } }; template typename std::enable_if>::type reduce(E const &array, long axis, dtype, types::none_type) { if (axis < 0) axis += E::value; if (axis < 0 || size_t(axis) >= E::value) throw types::ValueError("axis out of bounds"); types::array shp; auto tmp = sutils::getshape(array); auto next = std::copy(tmp.begin(), tmp.begin() + axis, shp.begin()); std::copy(tmp.begin() + axis + 1, tmp.end(), next); reduced_type out{shp, builtins::None}; std::fill(out.begin(), out.end(), utils::neutral::value); return reduce(array, axis, types::none_type{}, out); } template typename std::enable_if>::type reduce(E const &array, long axis, types::none_type, Out &&out) { if (axis < 0) axis += E::value; if (axis < 0 || size_t(axis) >= E::value) throw types::ValueError("axis out of bounds"); if (utils::no_broadcast(array)) { std::fill(out.begin(), out.end(), utils::neutral::value); _reduce_axis{}(array, std::forward(out), axis, std::make_tuple(), std::make_tuple()); return std::forward(out); } else { if (axis == 0) { std::fill(out.begin(), out.end(), utils::neutral::value); return _reduce{}( array, std::forward(out)); } else { std::transform(array.begin(), array.end(), out.begin(), [axis](typename E::const_iterator::value_type other) { return reduce(other, axis - 1); }); return std::forward(out); } } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/remainder.hpp000066400000000000000000000007451416264035500231260ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_REMAINDER_HPP #define PYTHONIC_NUMPY_REMAINDER_HPP #include "pythonic/include/numpy/remainder.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME remainder #define NUMPY_NARY_FUNC_SYM wrapper::remainder #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/remainder/000077500000000000000000000000001416264035500224075ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/remainder/accumulate.hpp000066400000000000000000000002651416264035500252460ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_REMAINDER_ACCUMULATE_HPP #define PYTHONIC_NUMPY_REMAINDER_ACCUMULATE_HPP #define UFUNC_NAME remainder #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/repeat.hpp000066400000000000000000000032331416264035500224330ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_REPEAT_HPP #define PYTHONIC_NUMPY_REPEAT_HPP #include "pythonic/include/numpy/repeat.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_conversion.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/None.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray::value>> repeat(types::ndarray const &expr, long repeats, long axis) { constexpr auto N = std::tuple_size::value; if (axis < 0) axis += N; auto shape = sutils::getshape(expr); const long stride = std::accumulate(shape.begin() + axis + 1, shape.end(), 1L, std::multiplies()); shape[axis] *= repeats; types::ndarray::value>> out( shape, builtins::None); auto out_iter = out.fbegin(); for (auto iter = expr.fbegin(), end = expr.fend(); iter != end; iter += stride) for (int i = 0; i < repeats; ++i) out_iter = std::copy(iter, iter + stride, out_iter); return out; } template types::ndarray> repeat(types::ndarray const &expr, long repeats, types::none_type axis) { types::ndarray> out( types::pshape{expr.flat_size() * repeats}, builtins::None); auto out_iter = out.fbegin(); for (auto iter = expr.fbegin(), end = expr.fend(); iter != end; ++iter) for (int i = 0; i < repeats; ++i) *out_iter++ = *iter; return out; } NUMPY_EXPR_TO_NDARRAY0_IMPL(repeat); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/resize.hpp000066400000000000000000000003171416264035500224540ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RESIZE_HPP #define PYTHONIC_NUMPY_RESIZE_HPP #include "pythonic/include/numpy/resize.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/numpy/ndarray/reshape.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/right_shift.hpp000066400000000000000000000010241416264035500234610ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RIGHTSHIFT_HPP #define PYTHONIC_NUMPY_RIGHTSHIFT_HPP #include "pythonic/include/numpy/right_shift.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/operator_/rshift.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME right_shift #define NUMPY_NARY_FUNC_SYM operator_::rshift #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/right_shift/000077500000000000000000000000001416264035500227535ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/right_shift/accumulate.hpp000066400000000000000000000002731416264035500256110ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RIGHT_SHIFT_ACCUMULATE_HPP #define PYTHONIC_NUMPY_RIGHT_SHIFT_ACCUMULATE_HPP #define UFUNC_NAME right_shift #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/rint.hpp000066400000000000000000000012551416264035500221310ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_RINT_HPP #define PYTHONIC_NUMPY_RINT_HPP #include "pythonic/include/numpy/rint.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template T rint(T const &v) { return std::nearbyint(v); } template std::complex rint(std::complex const &v) { return {std::nearbyint(v.real()), std::nearbyint(v.imag())}; } } #define NUMPY_NARY_FUNC_NAME rint #define NUMPY_NARY_FUNC_SYM wrapper::rint #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/roll.hpp000066400000000000000000000044651416264035500221330ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ROLL_HPP #define PYTHONIC_NUMPY_ROLL_HPP #include "pythonic/include/numpy/roll.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_conversion.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray roll(types::ndarray const &expr, long shift) { while (shift < 0) shift += expr.flat_size(); shift %= expr.flat_size(); types::ndarray out(expr._shape, builtins::None); std::copy(expr.fbegin(), expr.fend() - shift, std::copy(expr.fend() - shift, expr.fend(), out.fbegin())); return out; } namespace { template To _roll(To to, From from, long, long, types::array const &, utils::int_) { *to = *from; return to + 1; } template To _roll(To to, From from, long shift, long axis, types::array const &shape, utils::int_) { long dim = shape[M]; long offset = std::accumulate(shape.begin() + M + 1, shape.end(), 1L, std::multiplies()); if (axis == M) { const From split = from + (dim - shift) * offset; for (From iter = split, end = from + dim * offset; iter != end; iter += offset) to = _roll(to, iter, shift, axis, shape, utils::int_()); for (From iter = from, end = split; iter != end; iter += offset) to = _roll(to, iter, shift, axis, shape, utils::int_()); } else { for (From iter = from, end = from + dim * offset; iter != end; iter += offset) to = _roll(to, iter, shift, axis, shape, utils::int_()); } return to; } } template types::ndarray roll(types::ndarray const &expr, long shift, long axis) { auto expr_shape = sutils::array(expr._shape); while (shift < 0) shift += expr_shape[axis]; types::ndarray out(expr._shape, builtins::None); _roll(out.fbegin(), expr.fbegin(), shift, axis, expr_shape, utils::int_<0>()); return out; } NUMPY_EXPR_TO_NDARRAY0_IMPL(roll); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/rollaxis.hpp000066400000000000000000000020161416264035500230060ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ROLLAXIS_HPP #define PYTHONIC_NUMPY_ROLLAXIS_HPP #include "pythonic/include/numpy/rollaxis.hpp" #include "pythonic/numpy/transpose.hpp" #include "pythonic/numpy/copy.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray::value>> rollaxis(types::ndarray const &a, long axis, long start) { long constexpr N = std::tuple_size::value; long t[N]; if (start >= axis) { for (long i = 0; i < axis; ++i) t[i] = i; for (long i = axis + 1; i < start; ++i) t[i - 1] = i; t[start - 1] = axis; for (long i = start; i < N; ++i) t[i] = i; } else { for (long i = 0; i < start; ++i) t[i] = i; t[start] = axis; for (long i = start + 1; i <= axis; ++i) t[i] = i - 1; for (long i = axis + 1, n = N; i < n; ++i) t[i] = i; } return _transpose(a, t); } NUMPY_EXPR_TO_NDARRAY0_IMPL(rollaxis); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/rot90.hpp000066400000000000000000000024411416264035500221300ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ROT90_HPP #define PYTHONIC_NUMPY_ROT90_HPP #include "pythonic/include/numpy/rot90.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_conversion.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/numpy/copy.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray::value>> rot90(types::ndarray const &expr, int k) { auto constexpr N = std::tuple_size::value; if (k % 4 == 0) return copy(expr); types::array shape = sutils::getshape(expr); if (k % 4 != 2) std::swap(shape[0], shape[1]); types::ndarray> out(shape, builtins::None); if (k % 4 == 1) { for (int i = 0; i < shape[1]; ++i) for (int j = 0; j < shape[0]; ++j) out[shape[0] - 1 - j][i] = expr[i][j]; } else if (k % 4 == 2) { for (int i = 0; i < shape[1]; ++i) for (int j = 0; j < shape[0]; ++j) out[shape[0] - 1 - j][shape[1] - 1 - i] = expr[j][i]; } else { for (int i = 0; i < shape[1]; ++i) for (int j = 0; j < shape[0]; ++j) out[j][shape[1] - 1 - i] = expr[i][j]; } return out; } NUMPY_EXPR_TO_NDARRAY0_IMPL(rot90) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/round.hpp000066400000000000000000000002341416264035500223000ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ROUND_HPP #define PYTHONIC_NUMPY_ROUND_HPP #include "pythonic/include/numpy/round.hpp" #include "pythonic/numpy/around.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/round_.hpp000066400000000000000000000002371416264035500224420ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ROUND__HPP #define PYTHONIC_NUMPY_ROUND__HPP #include "pythonic/include/numpy/round_.hpp" #include "pythonic/numpy/around.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/searchsorted.hpp000066400000000000000000000052451416264035500236460ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_SEARCHSORTED_HPP #define PYTHONIC_NUMPY_SEARCHSORTED_HPP #include "pythonic/include/numpy/searchsorted.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_conversion.hpp" #include "pythonic/utils/int_.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/str.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/builtins/ValueError.hpp" #include "pythonic/numpy/asarray.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { namespace details { template long searchsorted(U const &a, T const &v, bool left) { if (left) return std::lower_bound(a.begin(), a.end(), v) - a.begin(); else return std::upper_bound(a.begin(), a.end(), v) - a.begin(); } bool issearchsortedleft(types::str const &side) { if (side[0] == "l") return true; else if (side[0] == "r") return false; else throw types::ValueError("'" + side + "' is an invalid value for keyword 'side'"); } } template typename std::enable_if::value, long>::type searchsorted(U const &a, T const &v, types::str const &side) { bool left = details::issearchsortedleft(side); return details::searchsorted(a, v, left); } namespace { template void _search_sorted(E const &a, I0 ibegin, I0 iend, I1 obegin, bool left, utils::int_<1>) { for (; ibegin != iend; ++ibegin, ++obegin) *obegin = details::searchsorted(a, *ibegin, left); } template void _search_sorted(E const &a, I0 ibegin, I0 iend, I1 obegin, bool left, utils::int_) { for (; ibegin != iend; ++ibegin, ++obegin) _search_sorted(a, (*ibegin).begin(), (*ibegin).end(), (*obegin).begin(), left, utils::int_()); } } template typename std::enable_if< types::is_numexpr_arg::value, types::ndarray>>::type searchsorted(T const &a, E const &v, types::str const &side) { static_assert(T::value == 1, "Not Implemented : searchsorted for dimension != 1"); bool left = details::issearchsortedleft(side); types::ndarray> out(asarray(v)._shape, builtins::None); _search_sorted(a, v.begin(), v.end(), out.begin(), left, utils::int_()); return out; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/select.hpp000066400000000000000000000110771416264035500224370ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_SELECT_HPP #define PYTHONIC_NUMPY_SELECT_HPP #include "pythonic/include/numpy/select.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/int_.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace { // TODO It could certainly be represent as a numpy_***_expr as each // elements // is computed without information from neighbor. // template long _select(Ichoice ibegin, Ichoice iend, Iout obegin, Isel sbegin, Icond cbegin, long size, utils::int_<1>) { static_assert(std::is_same::value, ""); for (; ibegin != iend && size != 0; ++ibegin, ++obegin, ++sbegin, ++cbegin) { // If elements it not already selected && condition match, copy it! if (!*sbegin && *cbegin) { *obegin = *ibegin; *sbegin = true; size--; } } return size; } template long _select(Ichoice ibegin, Ichoice iend, Iout obegin, Isel sbegin, Icond cbegin, long size, utils::int_) { for (; ibegin != iend && size != 0; ++ibegin, ++obegin, ++sbegin, ++cbegin) size = _select((*ibegin).begin(), (*ibegin).end(), (*obegin).begin(), (*sbegin).begin(), (*cbegin).begin(), size, utils::int_()); return size; } } template types::ndarray> select(C const &condlist, L const &choicelist, typename L::dtype _default) { constexpr size_t N = L::value - 1; auto &&choicelist0_shape = sutils::getshape(choicelist[0]); types::ndarray> out( choicelist0_shape, _default); types::ndarray> selected( choicelist0_shape, false); long size = selected.flat_size(); for (long i = 0; i < condlist.size() && size != 0; i++) size = _select(choicelist[i].begin(), choicelist[i].end(), out.begin(), selected.begin(), condlist.begin(), size, utils::int_()); return out; } template types::ndarray> select_helper(C const &condlist, L const &choicelist, T _default) { types::ndarray> out(sutils::getshape(choicelist[0]), _default); for (long i = 0; i < out.flat_size(); ++i) for (long j = 0; j < (long)condlist.size(); ++j) if (condlist[j].buffer[i]) { out.buffer[i] = choicelist[j].buffer[i]; break; } return out; } template typename std::enable_if::value == std::tuple_size::value, types::ndarray>::type select(types::list> const &condlist, types::list> const &choicelist, T _default) { return select_helper(condlist, choicelist, _default); } template typename std::enable_if::value == std::tuple_size::value, types::ndarray>::type select(types::static_list, M> const &condlist, types::static_list, M> const &choicelist, T _default) { return select_helper(condlist, choicelist, _default); } template typename std::enable_if::value == std::tuple_size::value, types::ndarray>::type select(types::static_list, M> const &condlist, types::list> const &choicelist, T _default) { return select_helper(condlist, choicelist, _default); } template typename std::enable_if::value == std::tuple_size::value, types::ndarray>::type select(types::list> const &condlist, types::static_list, M> const &choicelist, T _default) { return select_helper(condlist, choicelist, _default); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/setdiff1d.hpp000066400000000000000000000055351416264035500230330ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_SETDIFF1D_HPP #define PYTHONIC_NUMPY_SETDIFF1D_HPP #include "pythonic/include/numpy/setdiff1d.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/numpy/asarray.hpp" #include "pythonic/utils/pdqsort.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace impl { template OutputIterator set_difference_unique(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, OutputIterator result) { while (first1 != last1 && first2 != last2) { auto const t1 = *first1; auto const t2 = *first2; if (t1 < t2) { *result = t1; while (*++first1 == t1) ; ++result; } else if (t2 < t1) while (*++first2 == t2) ; else { while (*++first1 == t1) ; while (*++first2 == t2) ; } } while (first1 != last1) { auto const t1 = *first1; *result = t1; while (*++first1 == t1) ; ++result; } return result; } } // namespace impl template types::ndarray::type, typename types::dtype_of::type>::type, types::pshape> setdiff1d(T const &ar1, U const &ar2, bool assume_unique) { using dtype = typename __combined::type, typename types::dtype_of::type>::type; auto far1 = numpy::functor::array{}(ar1); auto far2 = numpy::functor::array{}(ar2); if (assume_unique) { pdqsort(far1.fbegin(), far1.fend()); pdqsort(far2.fbegin(), far2.fend()); dtype *out = (dtype *)malloc(far1.flat_size() * far2.flat_size() * sizeof(dtype)); dtype *out_last = std::set_difference(far1.fbegin(), far1.fend(), far2.fbegin(), far2.fend(), out); auto size = out_last - out; out = (dtype *)realloc(out, size * sizeof(dtype)); return {out, types::pshape(size), types::ownership::owned}; } else { pdqsort(far1.fbegin(), far1.fend()); pdqsort(far2.fbegin(), far2.fend()); dtype *out = (dtype *)malloc(far1.flat_size() * far2.flat_size() * sizeof(dtype)); dtype *out_last = impl::set_difference_unique( far1.fbegin(), far1.fend(), far2.fbegin(), far2.fend(), out); auto size = out_last - out; out = (dtype *)realloc(out, size * sizeof(dtype)); return {out, types::pshape(size), types::ownership::owned}; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/shape.hpp000066400000000000000000000007641416264035500222610ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_SHAPE_HPP #define PYTHONIC_NUMPY_SHAPE_HPP #include "pythonic/include/numpy/shape.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto shape(types::ndarray const &e) -> decltype(e._shape) { return e._shape; } template auto shape(E const &e) -> decltype(sutils::getshape(e)) { return sutils::getshape(e); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/short_.hpp000066400000000000000000000011541416264035500224510ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_SHORT__HPP #define PYTHONIC_NUMPY_SHORT__HPP #include "pythonic/include/numpy/short_.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { short short_() { return {}; } template short short_(V v) { return v; } } #define NUMPY_NARY_FUNC_NAME short_ #define NUMPY_NARY_FUNC_SYM details::short_ #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/sign.hpp000066400000000000000000000006341416264035500221150ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_SIGN_HPP #define PYTHONIC_NUMPY_SIGN_HPP #include "pythonic/include/numpy/sign.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME sign #define NUMPY_NARY_FUNC_SYM xsimd::sign #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/signbit.hpp000066400000000000000000000006541416264035500226160ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_SIGNBIT_HPP #define PYTHONIC_NUMPY_SIGNBIT_HPP #include "pythonic/include/numpy/signbit.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME signbit #define NUMPY_NARY_FUNC_SYM xsimd::signbit #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/sin.hpp000066400000000000000000000006271416264035500217500ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_SIN_HPP #define PYTHONIC_NUMPY_SIN_HPP #include "pythonic/include/numpy/sin.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME sin #define NUMPY_NARY_FUNC_SYM xsimd::sin #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/sinh.hpp000066400000000000000000000006341416264035500221160ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_SINH_HPP #define PYTHONIC_NUMPY_SINH_HPP #include "pythonic/include/numpy/sinh.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME sinh #define NUMPY_NARY_FUNC_SYM xsimd::sinh #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/size.hpp000066400000000000000000000005441416264035500221270ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_SIZE_HPP #define PYTHONIC_NUMPY_SIZE_HPP #include "pythonic/include/numpy/size.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto size(E const &e) -> decltype(e.flat_size()) { return e.flat_size(); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/sometrue.hpp000066400000000000000000000002421416264035500230130ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_SOMETRUE_HPP #define PYTHONIC_NUMPY_SOMETRUE_HPP #include "pythonic/include/numpy/sometrue.hpp" #include "pythonic/numpy/any.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/sort.hpp000066400000000000000000000016131416264035500221420ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_SORT_HPP #define PYTHONIC_NUMPY_SORT_HPP #include "pythonic/include/numpy/sort.hpp" #include "pythonic/numpy/ndarray/sort.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> sort(E const &expr, long axis) { auto out = functor::array{}(expr); ndarray::sort(out, axis); return out; } template types::ndarray> sort(E const &expr, types::none_type) { auto out = functor::array{}(expr).flat(); ndarray::sort(out, types::none_type{}); return out; } template types::ndarray> sort(E const &expr, long axis, types::str const &kind) { auto out = functor::array{}(expr); ndarray::sort(out, axis, kind); return out; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/sort_complex.hpp000066400000000000000000000003241416264035500236670ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_SORTCOMPLEX_HPP #define PYTHONIC_NUMPY_SORTCOMPLEX_HPP #include "pythonic/include/numpy/sort_complex.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/numpy/sort.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/spacing.hpp000066400000000000000000000006551416264035500226040ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_SPACING_HPP #define PYTHONIC_NUMPY_SPACING_HPP #include "pythonic/include/numpy/spacing.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME spacing #define NUMPY_NARY_FUNC_SYM wrapper::spacing #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/split.hpp000066400000000000000000000021441416264035500223060ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_SPLIT_HPP #define PYTHONIC_NUMPY_SPLIT_HPP #include "pythonic/include/numpy/split.hpp" #include "pythonic/numpy/array_split.hpp" #include "pythonic/builtins/ValueError.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::list::value>>> split(types::ndarray const &a, long nb_split) { if (a.flat_size() % nb_split != 0) throw types::ValueError("array split does ! result in an equal division"); return array_split(a, nb_split); } template typename std::enable_if< types::is_iterable::value, types::list::value>>>>::type split(types::ndarray const &a, I const &split_mask) { return array_split(a, split_mask); } template types::list>> split(E const &a, I const &) { throw std::runtime_error("split only partially implemented"); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/sqrt.hpp000066400000000000000000000006341416264035500221460ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_SQRT_HPP #define PYTHONIC_NUMPY_SQRT_HPP #include "pythonic/include/numpy/sqrt.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME sqrt #define NUMPY_NARY_FUNC_SYM xsimd::sqrt #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/square.hpp000066400000000000000000000006611416264035500224550ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_SQUARE_HPP #define PYTHONIC_NUMPY_SQUARE_HPP #include "pythonic/include/numpy/square.hpp" #include "pythonic/types/numpy_op_helper.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME square #define NUMPY_NARY_FUNC_SYM wrapper::square #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/stack.hpp000066400000000000000000000046641416264035500222710ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_STACK_HPP #define PYTHONIC_NUMPY_STACK_HPP #include "pythonic/builtins/len.hpp" #include "pythonic/builtins/ValueError.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> stack(ArraySequence const &args, long axis) { if (builtins::len(args) == 0) throw pythonic::types::ValueError("need at least one array to stack"); auto shape = sutils::getshape(args[0]); constexpr long N = std::tuple_size::value; // The length of the shape array. auto values = sutils::array( shape); // You can't do shape[i] but you can do shape.array()[i] types::array new_shape; // A new array that's 1 element longer than shape. // Insert a "0" at the position indicated by axis. for (long i = 0; i < N + 1; i++) { if (i < axis) new_shape[i] = values[i]; if (i == axis) new_shape[i] = 1; if (i > axis) new_shape[i] = values[i - 1]; } // Create a new empty list. types::list>> bi(0); // Push the resized arrays into the list. for (auto &&arg : args) { bi.push_back(arg.reshape(new_shape)); } // Call concatenate on this list. return concatenate(bi, axis); } template types::ndarray::dtype, types::array::value + 1>> stack(std::tuple const &args, long axis, utils::index_sequence) { types::array< types::ndarray< typename details::stack_helper_t::dtype, types::array::value>>, sizeof...(Tys)> vargs{{std::get(args)...}}; return stack(vargs, axis); } template types::ndarray::dtype, types::array::value + 1>> stack(std::tuple const &args, long axis) { return stack(args, axis, utils::make_index_sequence()); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/std_.hpp000066400000000000000000000006621416264035500221070ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_STD_HPP #define PYTHONIC_NUMPY_STD_HPP #include "pythonic/include/numpy/std_.hpp" #include "pythonic/numpy/var.hpp" #include "pythonic/numpy/sqrt.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto std_(Args &&... args) -> decltype(functor::sqrt{}(var(std::forward(args)...))) { return functor::sqrt{}(var(std::forward(args)...)); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/subtract.hpp000066400000000000000000000010151416264035500227760ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_SUBTRACT_HPP #define PYTHONIC_NUMPY_SUBTRACT_HPP #include "pythonic/include/numpy/subtract.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/operator_/sub.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME subtract #define NUMPY_NARY_FUNC_SYM pythonic::operator_::sub #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/subtract/000077500000000000000000000000001416264035500222705ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/subtract/accumulate.hpp000066400000000000000000000002621416264035500251240ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_SUBTRACT_ACCUMULATE_HPP #define PYTHONIC_NUMPY_SUBTRACT_ACCUMULATE_HPP #define UFUNC_NAME subtract #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/sum.hpp000066400000000000000000000004611416264035500217570ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_SUM_HPP #define PYTHONIC_NUMPY_SUM_HPP #include "pythonic/include/numpy/sum.hpp" #include "pythonic/numpy/reduce.hpp" #include "pythonic/operator_/iadd.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/swapaxes.hpp000066400000000000000000000012231416264035500230030ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_SWAPAXES_HPP #define PYTHONIC_NUMPY_SWAPAXES_HPP #include "pythonic/include/numpy/swapaxes.hpp" #include "pythonic/numpy/transpose.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto swapaxes(T &&a, int axis1, int axis2) -> decltype(functor::transpose{}( std::forward(a), std::declval::type::value>>())) { constexpr long N = std::decay::type::value; types::array t; for (unsigned long i = 0; i < N; ++i) t[i] = i; std::swap(t[axis1], t[axis2]); return functor::transpose{}(std::forward(a), t); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/take.hpp000066400000000000000000000005221416264035500220750ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_TAKE_HPP #define PYTHONIC_NUMPY_TAKE_HPP #include "pythonic/include/numpy/take.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto take(T &&expr, F &&indices) -> decltype(std::forward(expr)[std::forward(indices)]) { return expr[indices]; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/tan.hpp000066400000000000000000000006271416264035500217410ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_TAN_HPP #define PYTHONIC_NUMPY_TAN_HPP #include "pythonic/include/numpy/tan.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME tan #define NUMPY_NARY_FUNC_SYM xsimd::tan #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/tanh.hpp000066400000000000000000000006341416264035500221070ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_TANH_HPP #define PYTHONIC_NUMPY_TANH_HPP #include "pythonic/include/numpy/tanh.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME tanh #define NUMPY_NARY_FUNC_SYM xsimd::tanh #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/tile.hpp000066400000000000000000000046161416264035500221160ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_TILE_HPP #define PYTHONIC_NUMPY_TILE_HPP #include "pythonic/include/numpy/tile.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace { template void _tile(I begin, I end, O &out, long rep, utils::int_<1>) { for (long i = 0; i < rep; ++i) out = std::copy(begin, end, out); } template void _tile(I begin, I end, O &out, long rep, utils::int_) { for (; begin != end; ++begin) _tile((*begin).begin(), (*begin).end(), out, rep, utils::int_()); } } template types::ndarray> tile(E const &expr, long reps) { size_t n = expr.flat_size(); types::ndarray> out( types::array{{long(n * reps)}}, builtins::None); auto out_iter = out.fbegin(); _tile(expr.begin(), expr.end(), out_iter, 1, utils::int_()); for (long i = 1; i < reps; ++i) out_iter = std::copy(out.fbegin(), out.fbegin() + n, out_iter); return out; } template types::array tile_init_shape(R const &reps, S const &expr_shape, utils::index_sequence) { constexpr size_t M = S::value; return { {(reps[Is] * ((Is < Shift) ? 1 : expr_shape.template shape < (Is < M) ? Is : 0 > ()))...}}; } template types::ndarray> tile(E const &expr, types::array const &reps) { size_t n = expr.flat_size(); types::array shape = tile_init_shape( reps, expr, utils::make_index_sequence()); long last_rep = (E::value == N) ? std::get(reps) : 1; types::ndarray> out( shape, builtins::None); auto out_iter = out.fbegin(); _tile(expr.begin(), expr.end(), out_iter, last_rep, utils::int_()); size_t nreps = out.flat_size() / (n * last_rep); for (size_t i = 1; i < nreps; ++i) out_iter = std::copy(out.fbegin(), out.fbegin() + n, out_iter); return out; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/trace.hpp000066400000000000000000000016321416264035500222520ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_TRACE_HPP #define PYTHONIC_NUMPY_TRACE_HPP #include "pythonic/include/numpy/trace.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename T::dtype trace(T const &expr, int offset) { static_assert(T::value == 2, "Not Implemented : Trace for dimension != 2"); typename T::dtype res = 0; long y_offset = std::max(-offset, 0); long x_offset = std::max(0, offset); long size = std::min(expr.flat_size() - y_offset, expr.fast(0).flat_size() - x_offset); if (offset < 0) for (long i = 0; i < size; i++) res += expr.fast(i + offset).fast(i); else if (offset > 0) for (long i = 0; i < size; i++) res += expr.fast(i).fast(i + offset); else for (long i = 0; i < size; i++) res += expr.fast(i).fast(i); return res; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/transpose.hpp000066400000000000000000000063711416264035500231770ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_TRANSPOSE_HPP #define PYTHONIC_NUMPY_TRANSPOSE_HPP #include "pythonic/include/numpy/transpose.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_conversion.hpp" #include "pythonic/utils/nested_container.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/ValueError.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace { template O const *_transpose(types::ndarray &expr, O const *iter, Indices &indices, S const &shape, Perm const &perm, utils::int_::value - 1>) { for (long i = 0, n = shape[std::tuple_size::value - 1]; i < n; ++i) { indices[perm[std::tuple_size::value - 1]] = i; expr.fast(indices) = *iter++; } indices[perm[std::tuple_size::value - 1]] = 0; return iter; } template typename std::enable_if::value - 1 != I, O const *>::type _transpose(types::ndarray &expr, O const *iter, Indices &indices, S const &shape, Perm const &perm, utils::int_) { for (long i = 0, n = shape[I]; i < n; ++i) { indices[perm[I]] = i; iter = _transpose(expr, iter, indices, shape, perm, utils::int_()); } indices[perm[I]] = 0; return iter; } template types::ndarray::value>> _transpose(types::ndarray const &a, long const l[std::tuple_size::value]) { auto shape = sutils::getshape(a); types::array::value> shp; for (unsigned long i = 0; i < std::tuple_size::value; ++i) shp[i] = shape[l[i]]; types::array::value> perm; for (std::size_t i = 0; i < std::tuple_size::value; ++i) perm[l[i]] = i; types::ndarray::value>> new_array(shp, builtins::None); auto const *iter = a.buffer; types::array::value> indices; _transpose(new_array, iter, indices, shape, perm, utils::int_<0>{}); return new_array; } } template typename std::enable_if< (std::tuple_size::value > 2), types::ndarray::value>>>::type transpose(types::ndarray const &a) { long t[std::tuple_size::value]; for (unsigned long i = 0; i < std::tuple_size::value; ++i) t[std::tuple_size::value - 1 - i] = i; return _transpose(a, t); } template types::ndarray::value>> transpose(types::ndarray const &a, types::array const &t) { static_assert(std::tuple_size::value == M, "axes don't match array"); long val = t[M - 1]; if (val >= long(std::tuple_size::value)) throw types::ValueError("invalid axis for this array"); return _transpose(a, &t[0]); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/tri.hpp000066400000000000000000000012301416264035500217440ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_TRI_HPP #define PYTHONIC_NUMPY_TRI_HPP #include "pythonic/include/numpy/tri.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> tri(long N, long M, long k, dtype d) { if (M == -1) M = N; types::ndarray> out( types::pshape{N, M}, 0); for (int i = 0; i < N; ++i) for (long j = 0; j < M; ++j) if (j - i <= k) out[i][j] = 1; return out; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/tril.hpp000066400000000000000000000014251416264035500221260ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_TRIL_HPP #define PYTHONIC_NUMPY_TRIL_HPP #include "pythonic/include/numpy/tril.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_conversion.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray tril(types::ndarray const &expr, int k) { types::ndarray out(expr._shape, builtins::None); for (int i = 0; i < std::get<0>(expr._shape); ++i) { auto out_i = out[i]; auto expr_i = expr[i]; for (long j = 0; j < std::get<1>(expr._shape); ++j) if (j - i <= k) out_i[j] = expr_i[j]; else out_i[j] = 0; } return out; } NUMPY_EXPR_TO_NDARRAY0_IMPL(tril) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/trim_zeros.hpp000066400000000000000000000015571416264035500233570ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_TRIMZEROS_HPP #define PYTHONIC_NUMPY_TRIMZEROS_HPP #include "pythonic/include/numpy/trim_zeros.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/numpy_gexpr.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::numpy_gexpr trim_zeros(T const &expr, types::str const &trim) { static_assert(T::value == 1, "Not implemented : trim_zeroes only works for 1D array"); long begin = 0; long end = expr.flat_size(); if (trim.find("f") != -1) begin = std::find_if(expr.begin(), expr.end(), [](typename T::dtype i) { return i != 0; }) - expr.begin(); if (trim.find("b") != -1) while (*(expr.begin() + --end) != 0) ; return make_gexpr(expr, types::contiguous_slice(begin, end)); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/triu.hpp000066400000000000000000000013301416264035500221320ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_TRIU_HPP #define PYTHONIC_NUMPY_TRIU_HPP #include "pythonic/include/numpy/triu.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_conversion.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray triu(types::ndarray const &expr, int k) { types::ndarray out(expr._shape, builtins::None); for (int i = 0; i < std::get<0>(expr._shape); ++i) for (long j = 0; j < std::get<1>(expr._shape); ++j) if (j - i >= k) out[i][j] = expr[i][j]; else out[i][j] = 0; return out; } NUMPY_EXPR_TO_NDARRAY0_IMPL(triu) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/true_divide.hpp000066400000000000000000000011041416264035500234510ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_TRUEDIVIDE_HPP #define PYTHONIC_NUMPY_TRUEDIVIDE_HPP #include "pythonic/include/numpy/true_divide.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/operator_/div.hpp" PYTHONIC_NS_BEGIN namespace numpy { // FIXME: this is ! always a true_divide... #define NUMPY_NARY_FUNC_NAME true_divide #define NUMPY_NARY_FUNC_SYM pythonic::operator_::div #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/true_divide/000077500000000000000000000000001416264035500227445ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/true_divide/accumulate.hpp000066400000000000000000000002731416264035500256020ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_TRUE_DIVIDE_ACCUMULATE_HPP #define PYTHONIC_NUMPY_TRUE_DIVIDE_ACCUMULATE_HPP #define UFUNC_NAME true_divide #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/trunc.hpp000066400000000000000000000006411416264035500223060ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_TRUNC_HPP #define PYTHONIC_NUMPY_TRUNC_HPP #include "pythonic/include/numpy/trunc.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME trunc #define NUMPY_NARY_FUNC_SYM xsimd::trunc #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ubyte.hpp000066400000000000000000000011651416264035500223050ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_UBYTE_HPP #define PYTHONIC_NUMPY_UBYTE_HPP #include "pythonic/include/numpy/ubyte.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { unsigned char ubyte() { return {}; } template unsigned char ubyte(V v) { return v; } } #define NUMPY_NARY_FUNC_NAME ubyte #define NUMPY_NARY_FUNC_SYM details::ubyte #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ufunc_accumulate.hpp000066400000000000000000000012601416264035500244740ustar00rootroot00000000000000#ifndef UFUNC_NAME #error missing UFUNC_NAME #endif // clang-format off #include INCLUDE_FILE(pythonic/numpy,UFUNC_NAME) // clang-format on #include #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace UFUNC_NAME { template auto accumulate(T &&a, long axis, dtype d) -> decltype(partial_sum(std::forward(a), axis, d)) { return partial_sum(std::forward(a), axis, d); } } } PYTHONIC_NS_END pythran-0.10.0+ds2/pythran/pythonic/numpy/ufunc_reduce.hpp000066400000000000000000000003371416264035500236240ustar00rootroot00000000000000#ifndef UFUNC_INAME #error missing UFUNC_INAME #endif // clang-format off #include INCLUDE_FILE(pythonic/operator_,UFUNC_INAME) // clang-format on #include "pythonic/numpy/reduce.hpp" #include "pythonic/utils/functor.hpp" pythran-0.10.0+ds2/pythran/pythonic/numpy/uint.hpp000066400000000000000000000011561416264035500221340ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_UINT_HPP #define PYTHONIC_NUMPY_UINT_HPP #include "pythonic/include/numpy/uint.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { unsigned long uint() { return {}; } template unsigned long uint(V v) { return v; } } #define NUMPY_NARY_FUNC_NAME uint #define NUMPY_NARY_FUNC_SYM details::uint #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/uint16.hpp000066400000000000000000000011721416264035500223010ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_UINT16_HPP #define PYTHONIC_NUMPY_UINT16_HPP #include "pythonic/include/numpy/uint16.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { uint16_t uint16() { return uint16_t(); } template uint16_t uint16(V v) { return v; } } #define NUMPY_NARY_FUNC_NAME uint16 #define NUMPY_NARY_FUNC_SYM details::uint16 #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/uint32.hpp000066400000000000000000000011271416264035500222770ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_UINT32_HPP #define PYTHONIC_NUMPY_UINT32_HPP #include "pythonic/include/numpy/uint32.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { uint32_t uint32() { return uint32_t(); } template uint32_t uint32(V v) { return v; } } #define NUMPY_NARY_FUNC_NAME uint32 #define NUMPY_NARY_FUNC_SYM details::uint32 #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/uint64.hpp000066400000000000000000000011271416264035500223040ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_UINT64_HPP #define PYTHONIC_NUMPY_UINT64_HPP #include "pythonic/include/numpy/uint64.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { uint64_t uint64() { return uint64_t(); } template uint64_t uint64(V v) { return v; } } #define NUMPY_NARY_FUNC_NAME uint64 #define NUMPY_NARY_FUNC_SYM details::uint64 #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/uint8.hpp000066400000000000000000000011601416264035500222170ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_UINT8_HPP #define PYTHONIC_NUMPY_UINT8_HPP #include "pythonic/include/numpy/uint8.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { uint8_t uint8() { return uint8_t(); } template uint8_t uint8(V v) { return v; } } #define NUMPY_NARY_FUNC_NAME uint8 #define NUMPY_NARY_FUNC_SYM details::uint8 #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/uintc.hpp000066400000000000000000000011531416264035500222740ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_UINTC_HPP #define PYTHONIC_NUMPY_UINTC_HPP #include "pythonic/include/numpy/uintc.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { unsigned uintc() { return {}; } template unsigned uintc(V v) { return v; } } #define NUMPY_NARY_FUNC_NAME uintc #define NUMPY_NARY_FUNC_SYM details::uintc #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/uintp.hpp000066400000000000000000000011661416264035500223150ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_UINTP_HPP #define PYTHONIC_NUMPY_UINTP_HPP #include "pythonic/include/numpy/uintp.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { uintptr_t uintp() { return uintptr_t(); } template uintptr_t uintp(V v) { return v; } } #define NUMPY_NARY_FUNC_NAME uintp #define NUMPY_NARY_FUNC_SYM details::uintp #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ulonglong.hpp000066400000000000000000000012331416264035500231550ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ULONGLONG_HPP #define PYTHONIC_NUMPY_ULONGLONG_HPP #include "pythonic/include/numpy/ulonglong.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { unsigned long long ulonglong() { return {}; } template unsigned long long ulonglong(V v) { return v; } } #define NUMPY_NARY_FUNC_NAME ulonglong #define NUMPY_NARY_FUNC_SYM details::ulonglong #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/union1d.hpp000066400000000000000000000021321416264035500225250ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_UNION1D_HPP #define PYTHONIC_NUMPY_UNION1D_HPP #include "pythonic/include/numpy/union1d.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { namespace { template void _union1d(I begin, I end, O &out, utils::int_<1>) { for (; begin != end; ++begin) out.insert(*begin); } template void _union1d(I begin, I end, O &out, utils::int_) { for (; begin != end; ++begin) _union1d((*begin).begin(), (*begin).end(), out, utils::int_()); } } template types::ndarray< typename __combined::type, types::pshape> union1d(E const &e, F const &f) { std::set::type> res; _union1d(e.begin(), e.end(), res, utils::int_()); _union1d(f.begin(), f.end(), res, utils::int_()); return {res}; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/unique.hpp000066400000000000000000000350541416264035500224670ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_UNIQUE_HPP #define PYTHONIC_NUMPY_UNIQUE_HPP #include "pythonic/include/numpy/unique.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/tuple.hpp" #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace { template void _unique1(I begin, I end, O &out, utils::int_<1>) { out.insert(begin, end); } template void _unique1(I begin, I end, O &out, utils::int_) { for (; begin != end; ++begin) _unique1((*begin).begin(), (*begin).end(), out, utils::int_()); } template void _unique2(I begin, I end, O0 &out0, O1 &out1, long &i, utils::int_<1>) { for (; begin != end; ++begin, ++i) { auto pair = out0.insert(*begin); if (pair.second) out1.push_back(i); } } template void _unique2(I begin, I end, O0 &out0, O1 &out1, long &i, utils::int_) { for (; begin != end; ++begin) _unique2((*begin).begin(), (*begin).end(), out0, out1, i, utils::int_()); } template void _unique3(I begin, I end, O0 &out0, O1 &out1, O2 &out2, long &i, utils::int_<1>) { for (; begin != end; ++begin, ++i) { auto pair = out0.insert(*begin); out2[i] = std::distance(out0.begin(), pair.first); if (pair.second) out1.push_back(i); } } template void _unique3(I begin, I end, O0 &out0, O1 &out1, O2 &out2, long &i, utils::int_) { for (; begin != end; ++begin) _unique3((*begin).begin(), (*begin).end(), out0, out1, out2, i, utils::int_()); } template void _unique4(I begin, I end, O1 &out1, O2 &out2, O3 &out3, long &i, utils::int_<1>) { for (; begin != end; ++begin, ++i) { auto res = out3.insert(std::make_pair(*begin, 0)); res.first->second += 1; out2[i] = std::distance(out3.begin(), res.first); if (res.second) { out1.push_back(i); } } } template void _unique4(I begin, I end, O1 &out1, O2 &out2, O3 &out3, long &i, utils::int_) { for (; begin != end; ++begin) _unique4((*begin).begin(), (*begin).end(), out1, out2, out3, i, utils::int_()); } template void _unique5(I begin, I end, O0 &out0, O2 &out2, long &i, utils::int_<1>) { for (; begin != end; ++begin, ++i) { auto pair = out0.insert(*begin); out2[i] = std::distance(out0.begin(), pair.first); } } template void _unique5(I begin, I end, O0 &out0, O2 &out2, long &i, utils::int_) { for (; begin != end; ++begin) _unique5((*begin).begin(), (*begin).end(), out0, out2, i, utils::int_()); } template void _unique6(I begin, I end, O1 &out1, O3 &out3, long &i, utils::int_<1>) { for (; begin != end; ++begin, ++i) { auto res = out3.insert(std::make_pair(*begin, 0)); res.first->second += 1; if (res.second) { out1.push_back(i); } } } template void _unique6(I begin, I end, O1 &out1, O3 &out3, long &i, utils::int_) { for (; begin != end; ++begin) _unique6((*begin).begin(), (*begin).end(), out1, out3, i, utils::int_()); } template void _unique7(I begin, I end, O2 &out2, O3 &out3, long &i, utils::int_<1>) { for (; begin != end; ++begin, ++i) { auto res = out3.insert(std::make_pair(*begin, 0)); res.first->second += 1; out2[i] = std::distance(out3.begin(), res.first); } } template void _unique7(I begin, I end, O2 &out2, O3 &out3, long &i, utils::int_) { for (; begin != end; ++begin) _unique7((*begin).begin(), (*begin).end(), out2, out3, i, utils::int_()); } template void _unique8(I begin, I end, O3 &out3, long &i, utils::int_<1>) { for (; begin != end; ++begin, ++i) { auto res = out3.insert(std::make_pair(*begin, 0)); res.first->second += 1; } } template void _unique8(I begin, I end, O3 &out3, long &i, utils::int_) { for (; begin != end; ++begin) _unique8((*begin).begin(), (*begin).end(), out3, i, utils::int_()); } } template types::ndarray> unique(E const &expr) { std::set res; _unique1(expr.begin(), expr.end(), res, utils::int_()); return {res}; } template std::tuple>, types::ndarray>> unique(E const &expr, types::true_immediate return_index) { std::set res; std::vector return_index_res; long i = 0; _unique2(expr.begin(), expr.end(), res, return_index_res, i, utils::int_()); return std::make_tuple( types::ndarray>(res), types::ndarray>(return_index_res)); } template types::ndarray> unique(E const &expr, types::false_immediate return_index) { std::set res; _unique1(expr.begin(), expr.end(), res, utils::int_()); return {res}; } template std::tuple>, types::ndarray>> unique(E const &expr, types::false_immediate return_index, types::true_immediate return_inverse) { std::set res; types::ndarray> return_inverse_res( types::pshape{expr.flat_size()}, builtins::None); long i = 0; _unique5(expr.begin(), expr.end(), res, return_inverse_res, i, utils::int_()); return std::make_tuple( types::ndarray>(res), return_inverse_res); } template types::ndarray> unique(E const &expr, types::false_immediate return_index, types::false_immediate return_inverse) { std::set res; _unique1(expr.begin(), expr.end(), res, utils::int_()); return {res}; } template std::tuple>, types::ndarray>> unique(E const &expr, types::true_immediate return_index, types::false_immediate return_inverse) { return unique(expr, return_index); } template std::tuple>, types::ndarray>, types::ndarray>> unique(E const &expr, types::true_immediate return_index, types::true_immediate return_inverse) { assert(return_inverse && "invalid signature otherwise"); std::set res; std::vector return_index_res; types::ndarray> return_inverse_res( types::pshape{expr.flat_size()}, builtins::None); long i = 0; _unique3(expr.begin(), expr.end(), res, return_index_res, return_inverse_res, i, utils::int_()); return std::make_tuple( types::ndarray>(res), types::ndarray>(return_index_res), return_inverse_res); } template std::tuple>, types::ndarray>, types::ndarray>, types::ndarray>> unique(E const &expr, types::true_immediate return_index, types::true_immediate return_inverse, types::true_immediate return_counts) { assert(return_counts && "invalid signature otherwise"); std::vector return_index_res; types::ndarray> return_inverse_res( types::pshape{expr.flat_size()}, builtins::None); std::map return_counts_map; { long i = 0; _unique4(expr.begin(), expr.end(), return_index_res, return_inverse_res, return_counts_map, i, utils::int_()); } types::pshape shp{(long)return_counts_map.size()}; types::ndarray> unique_array(shp, builtins::None); types::ndarray> return_counts_array( shp, builtins::None); { long i = 0; for (auto it = return_counts_map.begin(); it != return_counts_map.end(); ++i, ++it) { unique_array.fast(i) = it->first; return_counts_array.fast(i) = it->second; } } return std::make_tuple( unique_array, types::ndarray>(return_index_res), return_inverse_res, return_counts_array); } template std::tuple>, types::ndarray>, types::ndarray>> unique(E const &expr, types::true_immediate return_index, types::true_immediate return_inverse, types::false_immediate return_counts) { return unique(expr, return_index, return_inverse); } template std::tuple>, types::ndarray>> unique(E const &expr, types::true_immediate return_index, types::false_immediate return_inverse, types::false_immediate return_counts) { return unique(expr, return_index); } template std::tuple>, types::ndarray>, types::ndarray>> unique(E const &expr, types::true_immediate return_index, types::false_immediate return_inverse, types::true_immediate return_counts) { std::vector return_index_res; std::map return_counts_map; { long i = 0; _unique6(expr.begin(), expr.end(), return_index_res, return_counts_map, i, utils::int_()); } types::pshape shp{(long)return_counts_map.size()}; types::ndarray> unique_array(shp, builtins::None); types::ndarray> return_counts_array( shp, builtins::None); { long i = 0; for (auto it = return_counts_map.begin(); it != return_counts_map.end(); ++i, ++it) { unique_array.fast(i) = it->first; return_counts_array.fast(i) = it->second; } } return std::make_tuple( unique_array, types::ndarray>(return_index_res), return_counts_array); } template std::tuple>, types::ndarray>> unique(E const &expr, types::false_immediate return_index, types::true_immediate return_inverse, types::false_immediate return_counts) { return unique(expr, return_index, return_inverse); } template std::tuple>, types::ndarray>, types::ndarray>> unique(E const &expr, types::false_immediate return_index, types::true_immediate return_inverse, types::true_immediate return_counts) { types::ndarray> return_inverse_res( types::pshape{expr.flat_size()}, builtins::None); std::map return_counts_map; { long i = 0; _unique7(expr.begin(), expr.end(), return_inverse_res, return_counts_map, i, utils::int_()); } types::pshape shp{(long)return_counts_map.size()}; types::ndarray> unique_array(shp, builtins::None); types::ndarray> return_counts_array( shp, builtins::None); { long i = 0; for (auto it = return_counts_map.begin(); it != return_counts_map.end(); ++i, ++it) { unique_array.fast(i) = it->first; return_counts_array.fast(i) = it->second; } } return std::make_tuple(unique_array, return_inverse_res, return_counts_array); } template types::ndarray> unique(E const &expr, types::false_immediate return_index, types::false_immediate return_inverse, types::false_immediate return_counts) { return unique(expr); } template std::tuple>, types::ndarray>> unique(E const &expr, types::false_immediate return_index, types::false_immediate return_inverse, types::true_immediate return_counts) { std::map return_counts_map; { long i = 0; _unique8(expr.begin(), expr.end(), return_counts_map, i, utils::int_()); } types::pshape shp{(long)return_counts_map.size()}; types::ndarray> unique_array(shp, builtins::None); types::ndarray> return_counts_array( shp, builtins::None); { long i = 0; for (auto it = return_counts_map.begin(); it != return_counts_map.end(); ++i, ++it) { unique_array.fast(i) = it->first; return_counts_array.fast(i) = it->second; } } return std::make_tuple(unique_array, return_counts_array); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/unravel_index.hpp000066400000000000000000000022341416264035500240160ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_UNRAVEL_INDEX_HPP #define PYTHONIC_NUMPY_UNRAVEL_INDEX_HPP #include "pythonic/include/numpy/unravel_index.hpp" #include "pythonic/builtins/ValueError.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace { template void _unravel_index(E expr, ShapeIt shape_it, ShapeIt end_it, RetIt ret_it) { while (shape_it != end_it) { auto &v = *shape_it; auto tmp = expr / v; *ret_it = expr - v *tmp; expr = tmp; ++shape_it; ++ret_it; } } } template typename std::enable_if::value, types::array::value>>::type unravel_index(E const &expr, S const &shape, types::str const &order) { types::array::value> ret; if (order[0] == "C") { _unravel_index(expr, shape.rbegin(), shape.rend(), ret.rbegin()); } else if (order[0] == "F") { _unravel_index(expr, shape.begin(), shape.end(), ret.begin()); } else { throw types::ValueError("Invalid order"); } return ret; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/unwrap.hpp000066400000000000000000000032241416264035500224670ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_UNWRAP_HPP #define PYTHONIC_NUMPY_UNWRAP_HPP #include "pythonic/include/numpy/unwrap.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/int_.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/numpy/pi.hpp" #include #include #include PYTHONIC_NS_BEGIN namespace numpy { namespace { template void _unwrap(I0 ibegin, I0 iend, I1 obegin, double discont, utils::int_<1>) { *obegin = *ibegin; ++ibegin; for (; ibegin != iend; ++ibegin, ++obegin) { if (functor::abs{}(*obegin - *ibegin) > discont) *(obegin + 1) = *ibegin + 2 * pi * functor::round{}((*obegin - *ibegin) / (2 * pi)); else *(obegin + 1) = *ibegin; } } template void _unwrap(I0 ibegin, I0 iend, I1 obegin, double discont, utils::int_) { for (; ibegin != iend; ++ibegin, ++obegin) _unwrap((*ibegin).begin(), (*ibegin).end(), (*obegin).begin(), discont, utils::int_()); } } template types::ndarray unwrap(E const &expr, double discont) { discont = functor::maximum{}(discont, pi); types::ndarray out(sutils::getshape(expr), builtins::None); _unwrap(expr.begin(), expr.end(), out.begin(), discont, utils::int_()); return out; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/ushort.hpp000066400000000000000000000011761416264035500225030ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_USHORT_HPP #define PYTHONIC_NUMPY_USHORT_HPP #include "pythonic/include/numpy/ushort.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { unsigned short ushort() { return {}; } template unsigned short ushort(V v) { return v; } } #define NUMPY_NARY_FUNC_NAME ushort #define NUMPY_NARY_FUNC_SYM details::ushort #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/var.hpp000066400000000000000000000052771416264035500217550ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_VAR_HPP #define PYTHONIC_NUMPY_VAR_HPP #include "pythonic/include/numpy/var.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/builtins/ValueError.hpp" #include "pythonic/numpy/add.hpp" #include "pythonic/numpy/conjugate.hpp" #include "pythonic/numpy/subtract.hpp" #include "pythonic/numpy/mean.hpp" #include "pythonic/builtins/pythran/abssqr.hpp" #include "pythonic/numpy/sum.hpp" #include "pythonic/numpy/empty_like.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { template auto var(E const &expr, types::none_type axis, types::none_type dtype, types::none_type out, long ddof) -> decltype(var_type(std::real(mean(expr)))) { auto m = mean(expr); auto t = pythonic::numpy::functor::subtract{}(expr, m); return sum(builtins::pythran::functor::abssqr{}(t)) / var_type(expr.flat_size() - ddof); } namespace { // this is a workaround for the lack of efficient support for broadcasting // in pythonic template void _enlarge_copy_minus(T &&t, E const &e, M const &m, long axis, utils::int_<1>) { for (long i = 0, n = e.template shape<0>(), p = m.template shape<0>(); i < n;) for (long j = 0; j < p; ++j, ++i) t.fast(i) = e.fast(i) - m.fast(j); } template void _enlarge_copy_minus(T &&t, E const &e, M const &m, long axis, utils::int_) { for (long i = 0, n = e.template shape<0>(), p = m.template shape<0>(); i < n;) for (long j = 0; j < p; ++j, ++i) _enlarge_copy_minus(t.fast(i), e.fast(i), m.fast(j), axis, utils::int_()); } } template auto var(E const &expr, long axis, types::none_type dtype, types::none_type out, long ddof) -> typename assignable() * mean(expr, axis))>::type { auto m = mean(expr, axis); if (axis == 0) { auto t = pythonic::numpy::functor::subtract{}(expr, m); return sum(builtins::pythran::functor::abssqr{}(t), axis) /= var_type(expr.template shape<0>() - ddof); } else { types::array shp = sutils::getshape(expr); shp[axis] = 1; auto mp = m.reshape(shp); auto t = empty_like(expr); _enlarge_copy_minus(t, expr, mp, axis, utils::int_()); return sum(builtins::pythran::functor::abssqr{}(t), axis) /= var_type(sutils::getshape(expr)[axis] - ddof); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/vdot.hpp000066400000000000000000000017421416264035500221320ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_VDOT_HPP #define PYTHONIC_NUMPY_VDOT_HPP #include "pythonic/include/numpy/vdot.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/numpy/dot.hpp" #include "pythonic/numpy/asarray.hpp" #include "pythonic/numpy/conjugate.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto vdot(U const &u, V const &v) -> decltype(functor::dot{}(functor::asarray{}(u).flat(), functor::asarray{}(v).flat())) { if (types::is_complex::value && types::is_complex::value) { puts("complex"); return functor::dot{}(functor::asarray{}(functor::conjugate{}(u)).flat(), functor::asarray{}(v).flat()); } else { puts("simplex"); return functor::dot{}(functor::asarray{}(u).flat(), functor::asarray{}(v).flat()); } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/vstack.hpp000066400000000000000000000020241416264035500224430ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_VSTACK_HPP #define PYTHONIC_NUMPY_VSTACK_HPP #include #include PYTHONIC_NS_BEGIN namespace numpy { template auto vstack(ArraySequence &&seq) -> typename std::enable_if<(impl::vstack_helper::value > 1), impl::vstack_helper>::type { return concatenate(std::forward(seq), 0); } template auto vstack(ArraySequence &&seq) -> typename std::enable_if< (impl::vstack_helper::value == 1), decltype(std::declval>().reshape( std::declval>()))>::type { auto &&temp = concatenate(std::forward(seq), 0); long const seq_size = seq.size(), temp_size = temp.size(); types::array new_shape{{seq_size, temp_size / seq_size}}; return temp.reshape(new_shape); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/where.hpp000066400000000000000000000040311416264035500222620ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_WHERE_HPP #define PYTHONIC_NUMPY_WHERE_HPP #include "pythonic/include/numpy/where.hpp" #include "pythonic/numpy/asarray.hpp" #include "pythonic/numpy/nonzero.hpp" #include "pythonic/numpy/copy.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace impl { template typename __combined::type where(E const &cond, F const &true_, G const &false_) { if (cond) return true_; else return false_; } } #define NUMPY_NARY_FUNC_NAME where #define NUMPY_NARY_FUNC_SYM impl::where #define NUMPY_NARY_RESHAPE_MODE reshape_type #include "pythonic/types/numpy_nary_expr.hpp" } namespace types { template <> struct Dereferencer { template auto operator()(Ts const &iters, utils::index_sequence<0, 1, 2>) -> typename std::enable_if< types::is_dtype< typename std::remove_cv(iters))>::type>::type>::value && types::is_dtype< typename std::remove_cv(iters))>::type>::type>::value && types::is_dtype< typename std::remove_cv(iters))>::type>::type>::value, decltype(numpy::impl::where(*std::get<0>(iters), *std::get<1>(iters), *std::get<2>(iters)))>::type { if (*std::get<0>(iters)) return *std::get<1>(iters); else return *std::get<2>(iters); } template auto operator()(Ts const &iters, utils::index_sequence, ...) -> decltype(numpy::functor::where{}(*std::get(iters)...)) { return numpy::functor::where{}(*std::get(iters)...); } }; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/zeros.hpp000066400000000000000000000022531416264035500223160ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ZEROS_HPP #define PYTHONIC_NUMPY_ZEROS_HPP #include "pythonic/include/numpy/zeros.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::ndarray> zeros(pS const &shape, dtype d) { using T = typename dtype::type; // use calloc even if we have a non integer type. This looks ok on modern // architecture, although not really standard auto *buffer = (T *)calloc(sutils::sprod(shape), sizeof(T)); return {buffer, (sutils::shape_t)shape, types::ownership::owned}; } template types::ndarray> zeros(long size, dtype d) { return zeros(types::pshape(size), d); } template types::ndarray>> zeros(std::integral_constant, dtype d) { return zeros(types::pshape>({}), d); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/zeros_like.hpp000066400000000000000000000013131416264035500233160ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ZEROSLIKE_HPP #define PYTHONIC_NUMPY_ZEROSLIKE_HPP #include "pythonic/include/numpy/zeros_like.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/numpy/zeros.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto zeros_like(E const &expr, dtype d) -> decltype(zeros(sutils::getshape(expr), d)) { return zeros(sutils::getshape(expr), d); } template auto zeros_like(E const &expr, types::none_type) -> decltype(zeros(sutils::getshape(expr), types::dtype_t())) { return zeros(sutils::getshape(expr), types::dtype_t()); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/omp/000077500000000000000000000000001416264035500200645ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/omp/get_num_threads.hpp000066400000000000000000000005001416264035500237400ustar00rootroot00000000000000#ifndef PYTHONIC_OMP_GET_NUM_THREADS_HPP #define PYTHONIC_OMP_GET_NUM_THREADS_HPP #include "pythonic/include/omp/get_num_threads.hpp" #include #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace omp { long get_num_threads() { return omp_get_num_threads(); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/omp/get_thread_num.hpp000066400000000000000000000004741416264035500235670ustar00rootroot00000000000000#ifndef PYTHONIC_OMP_GET_THREAD_NUM_HPP #define PYTHONIC_OMP_GET_THREAD_NUM_HPP #include "pythonic/include/omp/get_thread_num.hpp" #include #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace omp { long get_thread_num() { return omp_get_thread_num(); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/omp/get_wtick.hpp000066400000000000000000000004421416264035500225550ustar00rootroot00000000000000#ifndef PYTHONIC_OMP_GET_WTICK_HPP #define PYTHONIC_OMP_GET_WTICK_HPP #include "pythonic/include/omp/get_wtick.hpp" #include #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace omp { long get_wtick() { return omp_get_wtick(); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/omp/get_wtime.hpp000066400000000000000000000004431416264035500225620ustar00rootroot00000000000000#ifndef PYTHONIC_OMP_GET_WTIME_HPP #define PYTHONIC_OMP_GET_WTIME_HPP #include "pythonic/include/omp/get_wtime.hpp" #include #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace omp { long get_wtime() { return omp_get_wtime(); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/omp/in_parallel.hpp000066400000000000000000000004551416264035500230630ustar00rootroot00000000000000#ifndef PYTHONIC_OMP_IN_PARALLEL_HPP #define PYTHONIC_OMP_IN_PARALLEL_HPP #include "pythonic/include/omp/in_parallel.hpp" #include #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace omp { bool in_parallel() { return omp_in_parallel(); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/omp/set_nested.hpp000066400000000000000000000004631416264035500227350ustar00rootroot00000000000000#ifndef PYTHONIC_OMP_SET_NESTED_HPP #define PYTHONIC_OMP_SET_NESTED_HPP #include "pythonic/include/omp/set_nested.hpp" #include #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace omp { void set_nested(long val) { return omp_set_nested(val); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/000077500000000000000000000000001416264035500212635ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/operator_/__abs__.hpp000066400000000000000000000002461416264035500233370ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_ABS__HPP #define PYTHONIC_OPERATOR_ABS__HPP #include "pythonic/include/operator_/__abs__.hpp" #include "pythonic/builtins/abs.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__add__.hpp000066400000000000000000000002471416264035500233230ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_ADD__HPP #define PYTHONIC_OPERATOR_ADD__HPP #include "pythonic/include/operator_/__add__.hpp" #include "pythonic/operator_/add.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__and__.hpp000066400000000000000000000002501416264035500233270ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_AND__HPP #define PYTHONIC_OPERATOR_AND__HPP #include "pythonic/include/operator_/__and__.hpp" #include "pythonic/operator_/and_.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__concat__.hpp000066400000000000000000000002631416264035500240400ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_CONCAT__HPP #define PYTHONIC_OPERATOR_CONCAT__HPP #include "pythonic/include/operator_/__concat__.hpp" #include "pythonic/operator_/concat.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__contains__.hpp000066400000000000000000000002731416264035500244100ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_CONTAINS__HPP #define PYTHONIC_OPERATOR_CONTAINS__HPP #include "pythonic/include/operator_/__contains__.hpp" #include "pythonic/operator_/contains.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__delitem__.hpp000066400000000000000000000002671416264035500242200ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_DELITEM__HPP #define PYTHONIC_OPERATOR_DELITEM__HPP #include "pythonic/include/operator_/__delitem__.hpp" #include "pythonic/operator_/delitem.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__div__.hpp000066400000000000000000000002471416264035500233550ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_DIV__HPP #define PYTHONIC_OPERATOR_DIV__HPP #include "pythonic/include/operator_/__div__.hpp" #include "pythonic/operator_/div.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__eq__.hpp000066400000000000000000000002431416264035500231740ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_EQ__HPP #define PYTHONIC_OPERATOR_EQ__HPP #include "pythonic/include/operator_/__eq__.hpp" #include "pythonic/operator_/eq.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__floordiv__.hpp000066400000000000000000000002731416264035500244160ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_FLOORDIV__HPP #define PYTHONIC_OPERATOR_FLOORDIV__HPP #include "pythonic/include/operator_/__floordiv__.hpp" #include "pythonic/operator_/floordiv.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__ge__.hpp000066400000000000000000000002431416264035500231620ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_GE__HPP #define PYTHONIC_OPERATOR_GE__HPP #include "pythonic/include/operator_/__ge__.hpp" #include "pythonic/operator_/ge.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__getitem__.hpp000066400000000000000000000002671416264035500242330ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_GETITEM__HPP #define PYTHONIC_OPERATOR_GETITEM__HPP #include "pythonic/include/operator_/__getitem__.hpp" #include "pythonic/operator_/getitem.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__gt__.hpp000066400000000000000000000002431416264035500232010ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_GT__HPP #define PYTHONIC_OPERATOR_GT__HPP #include "pythonic/include/operator_/__gt__.hpp" #include "pythonic/operator_/gt.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__iadd__.hpp000066400000000000000000000002531416264035500234710ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_IADD__HPP #define PYTHONIC_OPERATOR_IADD__HPP #include "pythonic/include/operator_/__iadd__.hpp" #include "pythonic/operator_/iadd.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__iand__.hpp000066400000000000000000000002531416264035500235030ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_IAND__HPP #define PYTHONIC_OPERATOR_IAND__HPP #include "pythonic/include/operator_/__iand__.hpp" #include "pythonic/operator_/iand.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__iconcat__.hpp000066400000000000000000000002671416264035500242150ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_ICONCAT__HPP #define PYTHONIC_OPERATOR_ICONCAT__HPP #include "pythonic/include/operator_/__iconcat__.hpp" #include "pythonic/operator_/iconcat.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__idiv__.hpp000066400000000000000000000002531416264035500235230ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_IDIV__HPP #define PYTHONIC_OPERATOR_IDIV__HPP #include "pythonic/include/operator_/__idiv__.hpp" #include "pythonic/operator_/idiv.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__ifloordiv__.hpp000066400000000000000000000002771416264035500245730ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_IFLOORDIV__HPP #define PYTHONIC_OPERATOR_IFLOORDIV__HPP #include "pythonic/include/operator_/__ifloordiv__.hpp" #include "pythonic/operator_/ifloordiv.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__ilshift__.hpp000066400000000000000000000002671416264035500242370ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_ILSHIFT__HPP #define PYTHONIC_OPERATOR_ILSHIFT__HPP #include "pythonic/include/operator_/__ilshift__.hpp" #include "pythonic/operator_/ilshift.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__imod__.hpp000066400000000000000000000002531416264035500235200ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_IMOD__HPP #define PYTHONIC_OPERATOR_IMOD__HPP #include "pythonic/include/operator_/__imod__.hpp" #include "pythonic/operator_/imod.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__imul__.hpp000066400000000000000000000002531416264035500235360ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_IMUL__HPP #define PYTHONIC_OPERATOR_IMUL__HPP #include "pythonic/include/operator_/__imul__.hpp" #include "pythonic/operator_/imul.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__inv__.hpp000066400000000000000000000002521416264035500233630ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_INV__HPP #define PYTHONIC_OPERATOR_INV__HPP #include "pythonic/include/operator_/__inv__.hpp" #include "pythonic/operator_/invert.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__invert__.hpp000066400000000000000000000002631416264035500241000ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_INVERT__HPP #define PYTHONIC_OPERATOR_INVERT__HPP #include "pythonic/include/operator_/__invert__.hpp" #include "pythonic/operator_/invert.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__ior__.hpp000066400000000000000000000002471416264035500233640ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_IOR__HPP #define PYTHONIC_OPERATOR_IOR__HPP #include "pythonic/include/operator_/__ior__.hpp" #include "pythonic/operator_/ior.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__ipow__.hpp000066400000000000000000000002531416264035500235460ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_IPOW__HPP #define PYTHONIC_OPERATOR_IPOW__HPP #include "pythonic/include/operator_/__ipow__.hpp" #include "pythonic/operator_/ipow.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__irshift__.hpp000066400000000000000000000002671416264035500242450ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_IRSHIFT__HPP #define PYTHONIC_OPERATOR_IRSHIFT__HPP #include "pythonic/include/operator_/__irshift__.hpp" #include "pythonic/operator_/irshift.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__isub__.hpp000066400000000000000000000002531416264035500235320ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_ISUB__HPP #define PYTHONIC_OPERATOR_ISUB__HPP #include "pythonic/include/operator_/__isub__.hpp" #include "pythonic/operator_/isub.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__itruediv__.hpp000066400000000000000000000002731416264035500244250ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_ITRUEDIV__HPP #define PYTHONIC_OPERATOR_ITRUEDIV__HPP #include "pythonic/include/operator_/__itruediv__.hpp" #include "pythonic/operator_/itruediv.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__ixor__.hpp000066400000000000000000000002531416264035500235510ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_IXOR__HPP #define PYTHONIC_OPERATOR_IXOR__HPP #include "pythonic/include/operator_/__ixor__.hpp" #include "pythonic/operator_/ixor.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__le__.hpp000066400000000000000000000002431416264035500231670ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_LE__HPP #define PYTHONIC_OPERATOR_LE__HPP #include "pythonic/include/operator_/__le__.hpp" #include "pythonic/operator_/le.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__lshift__.hpp000066400000000000000000000002631416264035500240620ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_LSHIFT__HPP #define PYTHONIC_OPERATOR_LSHIFT__HPP #include "pythonic/include/operator_/__lshift__.hpp" #include "pythonic/operator_/lshift.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__lt__.hpp000066400000000000000000000002431416264035500232060ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_LT__HPP #define PYTHONIC_OPERATOR_LT__HPP #include "pythonic/include/operator_/__lt__.hpp" #include "pythonic/operator_/lt.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__matmul__.hpp000066400000000000000000000002631416264035500240700ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_MATMUL__HPP #define PYTHONIC_OPERATOR_MATMUL__HPP #include "pythonic/include/operator_/__matmul__.hpp" #include "pythonic/operator_/matmul.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__mod__.hpp000066400000000000000000000002471416264035500233520ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_MOD__HPP #define PYTHONIC_OPERATOR_MOD__HPP #include "pythonic/include/operator_/__mod__.hpp" #include "pythonic/operator_/mod.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__mul__.hpp000066400000000000000000000002471416264035500233700ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_MUL__HPP #define PYTHONIC_OPERATOR_MUL__HPP #include "pythonic/include/operator_/__mul__.hpp" #include "pythonic/operator_/mul.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__ne__.hpp000066400000000000000000000002431416264035500231710ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_NE__HPP #define PYTHONIC_OPERATOR_NE__HPP #include "pythonic/include/operator_/__ne__.hpp" #include "pythonic/operator_/ne.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__neg__.hpp000066400000000000000000000002471416264035500233440ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_NEG__HPP #define PYTHONIC_OPERATOR_NEG__HPP #include "pythonic/include/operator_/__neg__.hpp" #include "pythonic/operator_/neg.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__not__.hpp000066400000000000000000000002501416264035500233650ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_NOT__HPP #define PYTHONIC_OPERATOR_NOT__HPP #include "pythonic/include/operator_/__not__.hpp" #include "pythonic/operator_/not_.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__or__.hpp000066400000000000000000000002441416264035500232100ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_OR__HPP #define PYTHONIC_OPERATOR_OR__HPP #include "pythonic/include/operator_/__or__.hpp" #include "pythonic/operator_/or_.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__pos__.hpp000066400000000000000000000002471416264035500233740ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_POS__HPP #define PYTHONIC_OPERATOR_POS__HPP #include "pythonic/include/operator_/__pos__.hpp" #include "pythonic/operator_/pos.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__rshift__.hpp000066400000000000000000000002631416264035500240700ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_RSHIFT__HPP #define PYTHONIC_OPERATOR_RSHIFT__HPP #include "pythonic/include/operator_/__rshift__.hpp" #include "pythonic/operator_/rshift.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__sub__.hpp000066400000000000000000000002471416264035500233640ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_SUB__HPP #define PYTHONIC_OPERATOR_SUB__HPP #include "pythonic/include/operator_/__sub__.hpp" #include "pythonic/operator_/sub.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__truediv__.hpp000066400000000000000000000002671416264035500242570ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_TRUEDIV__HPP #define PYTHONIC_OPERATOR_TRUEDIV__HPP #include "pythonic/include/operator_/__truediv__.hpp" #include "pythonic/operator_/truediv.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/__xor__.hpp000066400000000000000000000002511416264035500233760ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_XOR__HPP #define PYTHONIC_OPERATOR_XOR__HPP #include "pythonic/include/operator_/__xor__.hpp" #include "pythonic//operator_/xor_.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/abs.hpp000066400000000000000000000002401416264035500225350ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_ABS_HPP #define PYTHONIC_OPERATOR_ABS_HPP #include "pythonic/include/operator_/abs.hpp" #include "pythonic/builtins/abs.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/add.hpp000066400000000000000000000011701416264035500225230ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_ADD_HPP #define PYTHONIC_OPERATOR_ADD_HPP #include "pythonic/include/operator_/add.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/operator_/overloads.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto add(A &&a, B &&b) -> decltype(std::forward(a) + std::forward(b)) { return std::forward(a) + std::forward(b); } DEFINE_ALL_OPERATOR_OVERLOADS_IMPL( add, +, (((b >= 0) ? (a <= std::numeric_limits::max() - b) : (std::numeric_limits::min() - b <= a)))) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/and_.hpp000066400000000000000000000007511416264035500227000ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_AND_HPP #define PYTHONIC_OPERATOR_AND_HPP #include "pythonic/include/operator_/and_.hpp" #include "pythonic/operator_/overloads.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto and_(A &&a, B &&b) -> decltype(std::forward(a) & std::forward(b)) { return std::forward(a) & std::forward(b); } DEFINE_ALL_OPERATOR_OVERLOADS_IMPL(and_, &, true) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/concat.hpp000066400000000000000000000006231416264035500232440ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_CONCAT_HPP #define PYTHONIC_OPERATOR_CONCAT_HPP #include "pythonic/include/operator_/concat.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto concat(A &&a, B &&b) -> decltype(std::forward(a) + std::forward(b)) { return std::forward(a) + std::forward(b); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/contains.hpp000066400000000000000000000007121416264035500236120ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_CONTAINS_HPP #define PYTHONIC_OPERATOR_CONTAINS_HPP #include "pythonic/include/operator_/contains.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/builtins/in.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto contains(A &&a, B &&b) -> decltype(in(std::forward(a), std::forward(b))) { return in(std::forward(a), std::forward(b)); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/countOf.hpp000066400000000000000000000006011416264035500234060ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_COUNTOF_HPP #define PYTHONIC_OPERATOR_COUNTOF_HPP #include "pythonic/include/operator_/countOf.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace operator_ { template long countOf(A &&a, B &&b) { return std::count(a.begin(), a.end(), std::forward(b)); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/delitem.hpp000066400000000000000000000006561416264035500234260ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_DELITEM_HPP #define PYTHONIC_OPERATOR_DELITEM_HPP #include "pythonic/include/operator_/delitem.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/builtins/None.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template types::none_type delitem(A &&a, B &&b) { std::forward(a).remove(std::forward(b)); return builtins::None; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/div.hpp000066400000000000000000000013471416264035500225630ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_DIV_HPP #define PYTHONIC_OPERATOR_DIV_HPP #include "pythonic/include/operator_/div.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/operator_/overloads.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto div(A &&a, B &&b) // for ndarrays -> typename std::enable_if< !std::is_fundamental::type>::value || !std::is_fundamental::type>::value, decltype(std::forward(a) / std::forward(b))>::type { return std::forward(a) / std::forward(b); } double div(double a, double b) { assert(b != 0 && "divide by zero"); return a / b; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/eq.hpp000066400000000000000000000007231416264035500224030ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_EQ_HPP #define PYTHONIC_OPERATOR_EQ_HPP #include "pythonic/include/operator_/eq.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto eq(A &&a, B &&b) -> decltype(std::forward(a) == std::forward(b)) { return std::forward(a) == std::forward(b); } bool eq(char const *a, char const *b) { return strcmp(a, b) == 0; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/floordiv.hpp000066400000000000000000000002651416264035500236230ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_FLOORDIV_HPP #define PYTHONIC_OPERATOR_FLOORDIV_HPP #include "pythonic/include/operator_/floordiv.hpp" #include "pythonic/numpy/floor_divide.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/ge.hpp000066400000000000000000000007651416264035500223770ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_GE_HPP #define PYTHONIC_OPERATOR_GE_HPP #include "pythonic/include/operator_/ge.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace operator_ { template auto ge(A &&a, B &&b) -> decltype(std::forward(a) >= std::forward(b)) { return std::forward(a) >= std::forward(b); } bool ge(char const *self, char const *other) { return strcmp(self, other) >= 0; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/getitem.hpp000066400000000000000000000006241416264035500234340ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_GETITEM_HPP #define PYTHONIC_OPERATOR_GETITEM_HPP #include "pythonic/include/operator_/getitem.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto getitem(A &&a, B &&b) -> decltype(std::forward(a)[std::forward(b)]) { return std::forward(a)[std::forward(b)]; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/gt.hpp000066400000000000000000000007621416264035500224130ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_GT_HPP #define PYTHONIC_OPERATOR_GT_HPP #include "pythonic/include/operator_/gt.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace operator_ { template auto gt(A &&a, B &&b) -> decltype(std::forward(a) > std::forward(b)) { return std::forward(a) > std::forward(b); } bool gt(char const *self, char const *other) { return strcmp(self, other) > 0; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/iadd.hpp000066400000000000000000000014031416264035500226730ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_IADD_HPP #define PYTHONIC_OPERATOR_IADD_HPP #include "pythonic/include/operator_/iadd.hpp" #define OPERATOR_NAME iadd #define OPERATOR_SYMBOL + #define OPERATOR_ISYMBOL += #include "pythonic/operator_/icommon.hpp" #include "pythonic/types/list.hpp" #include "pythonic/types/set.hpp" #include "pythonic/types/dict.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto iadd(types::empty_list, types::list const &b) -> decltype(b) { return b; } template auto iadd(types::empty_dict, types::dict const &b) -> decltype(b) { return b; } template auto iadd(types::empty_set, types::set const &b) -> decltype(b) { return b; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/iand.hpp000066400000000000000000000003731416264035500227120ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_IAND_HPP #define PYTHONIC_OPERATOR_IAND_HPP #include "pythonic/include/operator_/iand.hpp" #define OPERATOR_NAME iand #define OPERATOR_SYMBOL & #define OPERATOR_ISYMBOL &= #include "pythonic/operator_/icommon.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/icommon.hpp000066400000000000000000000015461416264035500234430ustar00rootroot00000000000000#ifndef OPERATOR_NAME #error OPERATOR_NAME ! defined #endif #ifndef OPERATOR_SYMBOL #error OPERATOR_SYMBOL ! defined #endif #ifndef OPERATOR_ISYMBOL #error OPERATOR_ISYMBOL ! defined #endif #include "pythonic/utils/functor.hpp" #ifdef USE_XSIMD #include #endif PYTHONIC_NS_BEGIN namespace operator_ { template auto OPERATOR_NAME(bool, A &&a, B &&b, ...) -> decltype(std::forward(a) OPERATOR_SYMBOL std::forward(b)) { return std::forward(a) OPERATOR_SYMBOL std::forward(b); } template auto OPERATOR_NAME(bool, A &&a, B &&b, std::nullptr_t) -> decltype(std::forward(a) OPERATOR_ISYMBOL std::forward(b)) { return std::forward(a) OPERATOR_ISYMBOL std::forward(b); } } PYTHONIC_NS_END #undef OPERATOR_NAME #undef OPERATOR_SYMBOL #undef OPERATOR_ISYMBOL pythran-0.10.0+ds2/pythran/pythonic/operator_/iconcat.hpp000066400000000000000000000014051416264035500234140ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_ICONCAT_HPP #define PYTHONIC_OPERATOR_ICONCAT_HPP #include "pythonic/include/operator_/iconcat.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/list.hpp" #include "pythonic/types/set.hpp" #include "pythonic/types/dict.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template A iconcat(A a, B const &b) { return a += b; } template auto iconcat(types::empty_list a, types::list b) -> decltype(b) { return b; } template auto iconcat(types::empty_dict a, types::dict b) -> decltype(b) { return b; } template auto iconcat(types::empty_set a, types::set b) -> decltype(b) { return b; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/idiv.hpp000066400000000000000000000003731416264035500227320ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_IDIV_HPP #define PYTHONIC_OPERATOR_IDIV_HPP #include "pythonic/include/operator_/idiv.hpp" #define OPERATOR_NAME idiv #define OPERATOR_SYMBOL / #define OPERATOR_ISYMBOL /= #include "pythonic/operator_/icommon.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/ifloordiv.hpp000066400000000000000000000010271416264035500237710ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_IFLOORDIV_HPP #define PYTHONIC_OPERATOR_IFLOORDIV_HPP #include "pythonic/include/operator_/ifloordiv.hpp" #include "pythonic/operator_/mod.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template A ifloordiv(A &&a, B &&b) { a -= mod(a, b); a /= b; return a; } template auto ifloordiv(A const &a, B const &b) -> decltype((a - mod(a, b)) / b) { return (a - mod(a, b)) / b; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/ilshift.hpp000066400000000000000000000004111416264035500234320ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_ILSHIFT_HPP #define PYTHONIC_OPERATOR_ILSHIFT_HPP #include "pythonic/include/operator_/ilshift.hpp" #define OPERATOR_NAME ilshift #define OPERATOR_SYMBOL << #define OPERATOR_ISYMBOL <<= #include "pythonic/operator_/icommon.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/imatmul.hpp000066400000000000000000000011001416264035500234340ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_IMATMUL_HPP #define PYTHONIC_OPERATOR_IMATMUL_HPP #include "pythonic/include/operator_/imatmul.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/numpy/dot.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template A imatmul(A const &a, B &&b) { return numpy::functor::dot{}(a, std::forward(b)); } template A &imatmul(A &a, B &&b) { return a = numpy::functor::dot(a, std::forward(b)); // FIXME: improve that } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/imax.hpp000066400000000000000000000017411416264035500227350ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_IMAX_HPP #define PYTHONIC_OPERATOR_IMAX_HPP #include "pythonic/include/operator_/imax.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/numpy/maximum.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto imax(A &&a, B &&b) -> typename std::enable_if< std::is_const::value || !std::is_assignable::value, decltype(numpy::functor::maximum{}(std::forward(a), std::forward(b)))>::type { return numpy::functor::maximum{}(std::forward(a), std::forward(b)); } template auto imax(A &&a, B &&b) -> typename std::enable_if< !std::is_const::value && std::is_assignable::value, decltype(a = numpy::functor::maximum{}(std::forward(a), std::forward(b)))>::type { return a = numpy::functor::maximum{}(a, std::forward(b)); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/imin.hpp000066400000000000000000000017421416264035500227340ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_IMIN_HPP #define PYTHONIC_OPERATOR_IMIN_HPP #include "pythonic/include/operator_/imin.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/numpy/minimum.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto imin(A &&a, B &&b) -> typename std::enable_if< std::is_const::value || !std::is_assignable::value, decltype(numpy::functor::minimum{}(std::forward(a), std::forward(b)))>::type { return numpy::functor::minimum{}(std::forward(a), std::forward(b)); } template auto imin(A &&a, B &&b) -> typename std::enable_if< !std::is_const::value && std::is_assignable::value, decltype(a = numpy::functor::minimum{}(std::forward(a), std::forward(b)))>::type { return a = numpy::functor::minimum{}(a, std::forward(b)); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/imod.hpp000066400000000000000000000006501416264035500227250ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_IMOD_HPP #define PYTHONIC_OPERATOR_IMOD_HPP #include "pythonic/include/operator_/imod.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template A imod(A const &a, B &&b) { return a % std::forward(b); } template A &imod(A &a, B &&b) { return a %= std::forward(b); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/imul.hpp000066400000000000000000000003731416264035500227450ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_IMUL_HPP #define PYTHONIC_OPERATOR_IMUL_HPP #include "pythonic/include/operator_/imul.hpp" #define OPERATOR_NAME imul #define OPERATOR_SYMBOL * #define OPERATOR_ISYMBOL *= #include "pythonic/operator_/icommon.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/indexOf.hpp000066400000000000000000000011671416264035500233750ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_INDEXOF_HPP #define PYTHONIC_OPERATOR_INDEXOF_HPP #include "pythonic/include/operator_/indexOf.hpp" #include "pythonic/builtins/str.hpp" #include "pythonic/builtins/ValueError.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace operator_ { template long indexOf(A &&a, B &&b) { auto where = std::find(a.begin(), a.end(), b); if (where == a.end()) throw types::ValueError(builtins::anonymous::str(b) + " is not in this sequence"); return where - a.begin(); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/inv.hpp000066400000000000000000000002441416264035500225700ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_INV_HPP #define PYTHONIC_OPERATOR_INV_HPP #include "pythonic/include/operator_/inv.hpp" #include "pythonic/operator_/invert.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/invert.hpp000066400000000000000000000005331416264035500233040ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_INVERT_HPP #define PYTHONIC_OPERATOR_INVERT_HPP #include "pythonic/include/operator_/invert.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto invert(A &&a) -> decltype(~std::forward(a)) { return ~std::forward(a); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/ior.hpp000066400000000000000000000003671416264035500225730ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_IOR_HPP #define PYTHONIC_OPERATOR_IOR_HPP #include "pythonic/include/operator_/ior.hpp" #define OPERATOR_NAME ior #define OPERATOR_SYMBOL | #define OPERATOR_ISYMBOL |= #include "pythonic/operator_/icommon.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/ipow.hpp000066400000000000000000000007541416264035500227600ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_IPOW_HPP #define PYTHONIC_OPERATOR_IPOW_HPP #include "pythonic/include/operator_/ipow.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/builtins/pow.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template A ipow(A const &a, B &&b) { return builtins::pow(a, std::forward(b)); } template A &ipow(A &a, B &&b) { return a = builtins::pow(a, std::forward(b)); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/irshift.hpp000066400000000000000000000004111416264035500234400ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_IRSHIFT_HPP #define PYTHONIC_OPERATOR_IRSHIFT_HPP #include "pythonic/include/operator_/irshift.hpp" #define OPERATOR_NAME irshift #define OPERATOR_SYMBOL >> #define OPERATOR_ISYMBOL >>= #include "pythonic/operator_/icommon.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/is_.hpp000066400000000000000000000010101416264035500225360ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_IS_HPP #define PYTHONIC_OPERATOR_IS_HPP #include "pythonic/include/operator_/is_.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/builtins/id.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto is_(A &&a, B &&b) -> decltype(builtins::id(std::forward(a)) == builtins::id(std::forward(b))) { return builtins::id(std::forward(a)) == builtins::id(std::forward(b)); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/is_not.hpp000066400000000000000000000007631416264035500232750ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_ISNOT_HPP #define PYTHONIC_OPERATOR_ISNOT_HPP #include "pythonic/include/operator_/is_not.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto is_not(A &&a, B &&b) -> decltype(builtins::id(std::forward(a)) != builtins::id(std::forward(b))) { return builtins::id(std::forward(a)) != builtins::id(std::forward(b)); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/isub.hpp000066400000000000000000000003731416264035500227410ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_ISUB_HPP #define PYTHONIC_OPERATOR_ISUB_HPP #include "pythonic/include/operator_/isub.hpp" #define OPERATOR_NAME isub #define OPERATOR_SYMBOL - #define OPERATOR_ISYMBOL -= #include "pythonic/operator_/icommon.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/itemgetter.hpp000066400000000000000000000037751416264035500241610ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_ITEMGETTER_HPP #define PYTHONIC_OPERATOR_ITEMGETTER_HPP #include "pythonic/include/operator_/itemgetter.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/int_.hpp" PYTHONIC_NS_BEGIN namespace operator_ { itemgetter_return::itemgetter_return(long const &item) : i(item) { } template auto itemgetter_return::operator()(A const &a) const -> decltype(a[i]) { return a[i]; } itemgetter_return itemgetter(long item) { return itemgetter_return(item); } template itemgetter_tuple_return::itemgetter_tuple_return(Types... items) : items(items...) { } template itemgetter_tuple_return::itemgetter_tuple_return() { } template template void itemgetter_tuple_return::helper(T &t, A const &a, utils::int_) const { std::get(t) = a[std::get(items)]; helper(t, a, utils::int_()); } template template void itemgetter_tuple_return::helper(T &t, A const &a, utils::int_<0>) const { std::get<0>(t) = a[std::get<0>(items)]; } template template auto itemgetter_tuple_return::operator()(A const &a) const -> std::tuple()])>::type>::type...> { std::tuple()])>::type>::type...> t; helper(t, a, utils::int_()); return t; } template itemgetter_tuple_return itemgetter(long const &item1, long const &item2, L... items) { return {item1, item2, items...}; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/itruediv.hpp000066400000000000000000000016471416264035500236370ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_ITRUEDIV_HPP #define PYTHONIC_OPERATOR_ITRUEDIV_HPP #include "pythonic/include/operator_/itruediv.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/operator_/truediv.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto itruediv(A const &a, B &&b) -> decltype(truediv(a, std::forward(b))) { return truediv(a, std::forward(b)); } template auto itruediv(A &a, B &&b) -> typename std::enable_if< std::is_same(b)))>::value, A &>::type { return a = truediv(a, std::forward(b)); } template auto itruediv(A &a, B &&b) -> typename std::enable_if< !std::is_same(b)))>::value, decltype(truediv(a, std::forward(b)))>::type { return truediv(a, std::forward(b)); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/ixor.hpp000066400000000000000000000003731416264035500227600ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_IXOR_HPP #define PYTHONIC_OPERATOR_IXOR_HPP #include "pythonic/include/operator_/ixor.hpp" #define OPERATOR_NAME ixor #define OPERATOR_SYMBOL ^ #define OPERATOR_ISYMBOL ^= #include "pythonic/operator_/icommon.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/le.hpp000066400000000000000000000007631416264035500224020ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_LE_HPP #define PYTHONIC_OPERATOR_LE_HPP #include "pythonic/include/operator_/le.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace operator_ { template auto le(A &&a, B &&b) -> decltype(std::forward(a) <= std::forward(b)) { return std::forward(a) <= std::forward(b); } bool le(char const *self, char const *other) { return strcmp(self, other) <= 0; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/lshift.hpp000066400000000000000000000010671416264035500232710ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_LSHIFT_HPP #define PYTHONIC_OPERATOR_LSHIFT_HPP #include "pythonic/include/operator_/lshift.hpp" #include "pythonic/operator_/overloads.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto lshift(A &&a, B &&b) -> decltype(std::forward(a) << std::forward(b)) { return std::forward(a) << std::forward(b); } DEFINE_ALL_OPERATOR_OVERLOADS_IMPL( lshift, <<, (a <= (std::numeric_limits::max() >> b))) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/lt.hpp000066400000000000000000000007611416264035500224170ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_LT_HPP #define PYTHONIC_OPERATOR_LT_HPP #include "pythonic/include/operator_/lt.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace operator_ { template auto lt(A &&a, B &&b) -> decltype(std::forward(a) < std::forward(b)) { return std::forward(a) < std::forward(b); } bool lt(char const *self, char const *other) { return strcmp(self, other) < 0; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/matmul.hpp000066400000000000000000000007471416264035500233030ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_MATMUL_HPP #define PYTHONIC_OPERATOR_MATMUL_HPP #include "pythonic/include/operator_/matmul.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/numpy/dot.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto matmul(A &&a, B &&b) -> decltype(numpy::functor::dot{}(std::forward(a), std::forward(b))) { return numpy::functor::dot{}(std::forward(a), std::forward(b)); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/mod.hpp000066400000000000000000000022411416264035500225520ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_MOD_HPP #define PYTHONIC_OPERATOR_MOD_HPP #include "pythonic/include/operator_/mod.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto mod(A &&a, B &&b) -> typename std::enable_if< std::is_fundamental::type>::value && std::is_fundamental::type>::value, decltype(std::forward(a) % std::forward(b))>::type { auto t = std::forward(a) % b; return t < 0 ? (t + b) : t; } inline double mod(double a, long b) { auto t = std::fmod(a, double(b)); return t < 0 ? (t + b) : t; } inline double mod(double a, double b) { auto t = std::fmod(a, b); return t < 0 ? (t + b) : t; } template auto mod(A &&a, B &&b) // for ndarrays -> typename std::enable_if< !std::is_fundamental::type>::value || !std::is_fundamental::type>::value, decltype(std::forward(a) % std::forward(b))>::type { return std::forward(a) % std::forward(b); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/mul.hpp000066400000000000000000000013061416264035500225710ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_MUL_HPP #define PYTHONIC_OPERATOR_MUL_HPP #include "pythonic/include/operator_/mul.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/operator_/overloads.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto mul(A &&a, B &&b) -> decltype(std::forward(a) * std::forward(b)) { return std::forward(a) * std::forward(b); } DEFINE_ALL_OPERATOR_OVERLOADS_IMPL( mul, *, (b == 0 || (a * b >= 0 && std::abs(a) <= std::numeric_limits::max() / std::abs(b)) || (a * b <= 0 && std::abs(a) >= std::numeric_limits::min() / std::abs(b)))) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/ne.hpp000066400000000000000000000007241416264035500224010ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_NE_HPP #define PYTHONIC_OPERATOR_NE_HPP #include "pythonic/include/operator_/ne.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto ne(A &&a, B &&b) -> decltype(std::forward(a) != std::forward(b)) { return std::forward(a) != std::forward(b); } bool ne(char const *a, char const *b) { return strcmp(a, b) != 0; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/neg.hpp000066400000000000000000000005171416264035500225500ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_NEG_HPP #define PYTHONIC_OPERATOR_NEG_HPP #include "pythonic/include/operator_/neg.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto neg(A &&a) -> decltype(-std::forward(a)) { return -std::forward(a); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/not_.hpp000066400000000000000000000006661416264035500227430ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_NOT_HPP #define PYTHONIC_OPERATOR_NOT_HPP #include "pythonic/include/operator_/not_.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto not_(T &&a) -> decltype(!std::forward(a)) { return !std::forward(a); } template bool not_(std::complex const &a) { return !a.real() && !a.imag(); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/or_.hpp000066400000000000000000000007451416264035500225610ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_OR_HPP #define PYTHONIC_OPERATOR_OR_HPP #include "pythonic/include/operator_/or_.hpp" #include "pythonic/operator_/overloads.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto or_(A &&a, B &&b) -> decltype(std::forward(a) | std::forward(b)) { return std::forward(a) | std::forward(b); } DEFINE_ALL_OPERATOR_OVERLOADS_IMPL(or_, |, true) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/overloads.hpp000066400000000000000000000032171416264035500237750ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_OVERLOADS_HPP #define PYTHONIC_OPERATOR_OVERLOADS_HPP #include "pythonic/include/operator_/overloads.hpp" #include #define PYTHONIC_OPERATOR_OVERLOAD_IMPL(type, opname, op, overflow_check) \ type opname(type a, type b) \ { \ assert((overflow_check) && "overflow check"); \ return a op b; \ } // workaround the fact that char and short computations are done using int in C, // while they are done at their respective type in numpy #define DEFINE_ALL_OPERATOR_OVERLOADS_IMPL(opname, op, overflow_check) \ PYTHONIC_OPERATOR_OVERLOAD_IMPL(bool, opname, op, true) \ PYTHONIC_OPERATOR_OVERLOAD_IMPL(unsigned char, opname, op, true) \ PYTHONIC_OPERATOR_OVERLOAD_IMPL(signed char, opname, op, overflow_check) \ PYTHONIC_OPERATOR_OVERLOAD_IMPL(unsigned short, opname, op, true) \ PYTHONIC_OPERATOR_OVERLOAD_IMPL(signed short, opname, op, overflow_check) \ PYTHONIC_OPERATOR_OVERLOAD_IMPL(unsigned int, opname, op, true) \ PYTHONIC_OPERATOR_OVERLOAD_IMPL(signed int, opname, op, overflow_check) \ PYTHONIC_OPERATOR_OVERLOAD_IMPL(unsigned long, opname, op, true) \ PYTHONIC_OPERATOR_OVERLOAD_IMPL(signed long, opname, op, overflow_check) \ PYTHONIC_OPERATOR_OVERLOAD_IMPL(unsigned long long, opname, op, true) \ PYTHONIC_OPERATOR_OVERLOAD_IMPL(signed long long, opname, op, overflow_check) #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/pos.hpp000066400000000000000000000004361416264035500226000ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_POS_HPP #define PYTHONIC_OPERATOR_POS_HPP #include "pythonic/include/operator_/pos.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template A pos(A const &a) { return a; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/pow.hpp000066400000000000000000000002401416264035500225750ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_POW_HPP #define PYTHONIC_OPERATOR_POW_HPP #include "pythonic/include/operator_/pow.hpp" #include "pythonic/builtins/pow.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/rshift.hpp000066400000000000000000000007761416264035500233050ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_RSHIFT_HPP #define PYTHONIC_OPERATOR_RSHIFT_HPP #include "pythonic/include/operator_/rshift.hpp" #include "pythonic/operator_/overloads.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto rshift(A &&a, B &&b) -> decltype(std::forward(a) >> std::forward(b)) { return std::forward(a) >> std::forward(b); } DEFINE_ALL_OPERATOR_OVERLOADS_IMPL(rshift, >>, true) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/sub.hpp000066400000000000000000000011671416264035500225720ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_SUB_HPP #define PYTHONIC_OPERATOR_SUB_HPP #include "pythonic/include/operator_/sub.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/operator_/overloads.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto sub(A &&a, B &&b) -> decltype(std::forward(a) - std::forward(b)) { return std::forward(a) - std::forward(b); } DEFINE_ALL_OPERATOR_OVERLOADS_IMPL( sub, -, (((b < 0) ? (a <= std::numeric_limits::max() + b) : (std::numeric_limits::min() + b <= a)))) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/truediv.hpp000066400000000000000000000006561416264035500234650ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_TRUEDIV_HPP #define PYTHONIC_OPERATOR_TRUEDIV_HPP #include "pythonic/include/operator_/truediv.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto truediv(A &&a, B &&b) -> decltype(std::forward(a) / (double)std::forward(b)) { return std::forward(a) / ((double)std::forward(b)); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/truth.hpp000066400000000000000000000004261416264035500231440ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_TRUTH_HPP #define PYTHONIC_OPERATOR_TRUTH_HPP #include "pythonic/include/operator_/truth.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { bool truth(bool const &a) { return a; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/operator_/xor_.hpp000066400000000000000000000007511416264035500227460ustar00rootroot00000000000000#ifndef PYTHONIC_OPERATOR_XOR_HPP #define PYTHONIC_OPERATOR_XOR_HPP #include "pythonic/include/operator_/xor_.hpp" #include "pythonic/operator_/overloads.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace operator_ { template auto xor_(A &&a, B &&b) -> decltype(std::forward(a) ^ std::forward(b)) { return std::forward(a) ^ std::forward(b); } DEFINE_ALL_OPERATOR_OVERLOADS_IMPL(xor_, ^, true) } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/os/000077500000000000000000000000001416264035500177125ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/os/path/000077500000000000000000000000001416264035500206465ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/os/path/join.hpp000066400000000000000000000027241416264035500223230ustar00rootroot00000000000000#ifndef PYTHONIC_OS_PATH_JOIN_HPP #define PYTHONIC_OS_PATH_JOIN_HPP #ifdef WIN32 #define OS_SEP '\\' #else #define OS_SEP '/' #endif #include "pythonic/include/os/path/join.hpp" #include "pythonic/types/str.hpp" PYTHONIC_NS_BEGIN namespace os { namespace path { namespace { template size_t sizeof_string(T const &s) { return s.size(); } template size_t sizeof_string(T const &s, Types &&... tail) { return s.size() + sizeof_string(std::forward(tail)...); } void _join(types::str &buffer) { } template void _join(types::str &buffer, T &&head, Types &&... tail) { if (((types::str)head)[0] == "/") buffer = std::forward(head); else if (!buffer || *buffer.chars().rbegin() == OS_SEP || *buffer.rbegin() == "/") buffer += std::forward(head); else { buffer.chars() += OS_SEP; buffer += std::forward(head); } _join(buffer, std::forward(tail)...); } } template T join(T &&head) { return head; } template types::str join(T &&head, Types &&... tail) { types::str p = head; p.reserve(sizeof_string(tail...)); _join(p, std::forward(tail)...); return p; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/patch/000077500000000000000000000000001416264035500203705ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/patch/README.rst000066400000000000000000000016471416264035500220670ustar00rootroot00000000000000================= Pythran std patch ================= The implementation of std::complex is very slow, due to the complex type not implementing the limited range (see `-fcx-limited-range`) optimization. Numpy does implement it, so we have to conform to numpy's version. the only way I (SG) found to fix this is to monkey-patch `std::complex`. Inheritance or defining a new class does not work because nt2 assumes we use std::complex. The original source is libcxx, the diff is rather small (mostly removed libcxx internal stuff and use numpy-compliant version of the multiply operator). The speedup is very interesting! GCC does provide the flag `-fcx-limited-range` to fix the issue in a more elegant way, but it is not supported by clang. The CPython impelmentation for complex division can be found in ``Objects/complexobject.c``, and the numpy implementation lies in ``numpy/core/src/umath/loops.c.src``, for those interested. pythran-0.10.0+ds2/pythran/pythonic/patch/complex000066400000000000000000001301301416264035500217600ustar00rootroot00000000000000// -*- C++ -*- //===--------------------------- complex ----------------------------------===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #ifndef _LIBCPP_COMPLEX #define _LIBCPP_COMPLEX #if defined _GLIBCXX_COMPLEX #error monkey patching of failed #endif /* complex synopsis namespace std { template class complex { public: typedef T value_type; complex(const T& re = T(), const T& im = T()); // constexpr in C++14 complex(const complex&); // constexpr in C++14 template complex(const complex&); // constexpr in C++14 T real() const; // constexpr in C++14 T imag() const; // constexpr in C++14 void real(T); void imag(T); complex& operator= (const T&); complex& operator+=(const T&); complex& operator-=(const T&); complex& operator*=(const T&); complex& operator/=(const T&); complex& operator=(const complex&); template complex& operator= (const complex&); template complex& operator+=(const complex&); template complex& operator-=(const complex&); template complex& operator*=(const complex&); template complex& operator/=(const complex&); }; template<> class complex { public: typedef float value_type; constexpr complex(float re = 0.0f, float im = 0.0f); explicit constexpr complex(const complex&); explicit constexpr complex(const complex&); constexpr float real() const; void real(float); constexpr float imag() const; void imag(float); complex& operator= (float); complex& operator+=(float); complex& operator-=(float); complex& operator*=(float); complex& operator/=(float); complex& operator=(const complex&); template complex& operator= (const complex&); template complex& operator+=(const complex&); template complex& operator-=(const complex&); template complex& operator*=(const complex&); template complex& operator/=(const complex&); }; template<> class complex { public: typedef double value_type; constexpr complex(double re = 0.0, double im = 0.0); constexpr complex(const complex&); explicit constexpr complex(const complex&); constexpr double real() const; void real(double); constexpr double imag() const; void imag(double); complex& operator= (double); complex& operator+=(double); complex& operator-=(double); complex& operator*=(double); complex& operator/=(double); complex& operator=(const complex&); template complex& operator= (const complex&); template complex& operator+=(const complex&); template complex& operator-=(const complex&); template complex& operator*=(const complex&); template complex& operator/=(const complex&); }; template<> class complex { public: typedef long double value_type; constexpr complex(long double re = 0.0L, long double im = 0.0L); constexpr complex(const complex&); constexpr complex(const complex&); constexpr long double real() const; void real(long double); constexpr long double imag() const; void imag(long double); complex& operator=(const complex&); complex& operator= (long double); complex& operator+=(long double); complex& operator-=(long double); complex& operator*=(long double); complex& operator/=(long double); template complex& operator= (const complex&); template complex& operator+=(const complex&); template complex& operator-=(const complex&); template complex& operator*=(const complex&); template complex& operator/=(const complex&); }; // 26.3.6 operators: template complex operator+(const complex&, const complex&); template complex operator+(const complex&, const T&); template complex operator+(const T&, const complex&); template complex operator-(const complex&, const complex&); template complex operator-(const complex&, const T&); template complex operator-(const T&, const complex&); template complex operator*(const complex&, const complex&); template complex operator*(const complex&, const T&); template complex operator*(const T&, const complex&); template complex operator/(const complex&, const complex&); template complex operator/(const complex&, const T&); template complex operator/(const T&, const complex&); template complex operator+(const complex&); template complex operator-(const complex&); template bool operator==(const complex&, const complex&); // constexpr in C++14 template bool operator==(const complex&, const T&); // constexpr in C++14 template bool operator==(const T&, const complex&); // constexpr in C++14 template bool operator!=(const complex&, const complex&); // constexpr in C++14 template bool operator!=(const complex&, const T&); // constexpr in C++14 template bool operator!=(const T&, const complex&); // constexpr in C++14 template basic_istream& operator>>(basic_istream&, complex&); template basic_ostream& operator<<(basic_ostream&, const complex&); // 26.3.7 values: template T real(const complex&); // constexpr in C++14 long double real(long double); // constexpr in C++14 double real(double); // constexpr in C++14 template double real(T); // constexpr in C++14 float real(float); // constexpr in C++14 template T imag(const complex&); // constexpr in C++14 long double imag(long double); // constexpr in C++14 double imag(double); // constexpr in C++14 template double imag(T); // constexpr in C++14 float imag(float); // constexpr in C++14 template T abs(const complex&); template T arg(const complex&); long double arg(long double); double arg(double); template double arg(T); float arg(float); template T norm(const complex&); long double norm(long double); double norm(double); template double norm(T); float norm(float); template complex conj(const complex&); complex conj(long double); complex conj(double); template complex conj(T); complex conj(float); template complex proj(const complex&); complex proj(long double); complex proj(double); template complex proj(T); complex proj(float); template complex polar(const T&, const T& = 0); // 26.3.8 transcendentals: template complex acos(const complex&); template complex asin(const complex&); template complex atan(const complex&); template complex acosh(const complex&); template complex asinh(const complex&); template complex atanh(const complex&); template complex cos (const complex&); template complex cosh (const complex&); template complex exp (const complex&); template complex log (const complex&); template complex log10(const complex&); template complex pow(const complex&, const T&); template complex pow(const complex&, const complex&); template complex pow(const T&, const complex&); template complex sin (const complex&); template complex sinh (const complex&); template complex sqrt (const complex&); template complex tan (const complex&); template complex tanh (const complex&); template basic_istream& operator>>(basic_istream& is, complex& x); template basic_ostream& operator<<(basic_ostream& o, const complex& x); } // std */ // pythran #include <__config> #define _LIBCPP_BEGIN_NAMESPACE_STD \ namespace std \ { #define _LIBCPP_END_NAMESPACE_STD } #define _LIBCPP_TYPE_VIS_ONLY #define _VSTD std #define _LIBCPP_INLINE_VISIBILITY #define _LIBCPP_CONSTEXPR_AFTER_CXX11 constexpr #define _LIBCPP_CONSTEXPR #include #include #include #include #if defined(_LIBCPP_NO_EXCEPTIONS) #include #endif #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) #pragma GCC system_header #endif _LIBCPP_BEGIN_NAMESPACE_STD template struct __promote { typedef decltype(std::declval() + std::declval()) type; }; template class _LIBCPP_TYPE_VIS_ONLY complex; template complex<_Tp> operator*(const complex<_Tp> &__z, const complex<_Tp> &__w); template complex<_Tp> operator/(const complex<_Tp> &__x, const complex<_Tp> &__y); template class _LIBCPP_TYPE_VIS_ONLY complex { public: typedef _Tp value_type; private: value_type __re_; value_type __im_; public: _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 complex(const value_type &__re = value_type(), const value_type &__im = value_type()) : __re_(__re), __im_(__im) { } template _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 complex(const complex<_Xp> &__c) : __re_(__c.real()), __im_(__c.imag()) { } _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 value_type real() const { return __re_; } _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 value_type imag() const { return __im_; } _LIBCPP_INLINE_VISIBILITY void real(value_type __re) { __re_ = __re; } _LIBCPP_INLINE_VISIBILITY void imag(value_type __im) { __im_ = __im; } _LIBCPP_INLINE_VISIBILITY complex &operator=(const value_type &__re) { __re_ = __re; __im_ = value_type(); return *this; } _LIBCPP_INLINE_VISIBILITY complex &operator+=(const value_type &__re) { __re_ += __re; return *this; } _LIBCPP_INLINE_VISIBILITY complex &operator-=(const value_type &__re) { __re_ -= __re; return *this; } _LIBCPP_INLINE_VISIBILITY complex &operator*=(const value_type &__re) { __re_ *= __re; __im_ *= __re; return *this; } _LIBCPP_INLINE_VISIBILITY complex &operator/=(const value_type &__re) { __re_ /= __re; __im_ /= __re; return *this; } template _LIBCPP_INLINE_VISIBILITY complex &operator=(const complex<_Xp> &__c) { __re_ = __c.real(); __im_ = __c.imag(); return *this; } template _LIBCPP_INLINE_VISIBILITY complex &operator+=(const complex<_Xp> &__c) { __re_ += __c.real(); __im_ += __c.imag(); return *this; } template _LIBCPP_INLINE_VISIBILITY complex &operator-=(const complex<_Xp> &__c) { __re_ -= __c.real(); __im_ -= __c.imag(); return *this; } template _LIBCPP_INLINE_VISIBILITY complex &operator*=(const complex<_Xp> &__c) { *this = *this *complex(__c.real(), __c.imag()); return *this; } template _LIBCPP_INLINE_VISIBILITY complex &operator/=(const complex<_Xp> &__c) { *this = *this / complex(__c.real(), __c.imag()); return *this; } }; template <> class _LIBCPP_TYPE_VIS_ONLY complex; template <> class _LIBCPP_TYPE_VIS_ONLY complex; template <> class _LIBCPP_TYPE_VIS_ONLY complex { float __re_; float __im_; public: typedef float value_type; _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR complex(float __re = 0.0f, float __im = 0.0f) : __re_(__re), __im_(__im) { } explicit _LIBCPP_CONSTEXPR complex(const complex &__c); explicit _LIBCPP_CONSTEXPR complex(const complex &__c); _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR float real() const { return __re_; } _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR float imag() const { return __im_; } _LIBCPP_INLINE_VISIBILITY void real(value_type __re) { __re_ = __re; } _LIBCPP_INLINE_VISIBILITY void imag(value_type __im) { __im_ = __im; } _LIBCPP_INLINE_VISIBILITY complex &operator=(float __re) { __re_ = __re; __im_ = value_type(); return *this; } _LIBCPP_INLINE_VISIBILITY complex &operator+=(float __re) { __re_ += __re; return *this; } _LIBCPP_INLINE_VISIBILITY complex &operator-=(float __re) { __re_ -= __re; return *this; } _LIBCPP_INLINE_VISIBILITY complex &operator*=(float __re) { __re_ *= __re; __im_ *= __re; return *this; } _LIBCPP_INLINE_VISIBILITY complex &operator/=(float __re) { __re_ /= __re; __im_ /= __re; return *this; } template _LIBCPP_INLINE_VISIBILITY complex &operator=(const complex<_Xp> &__c) { __re_ = __c.real(); __im_ = __c.imag(); return *this; } template _LIBCPP_INLINE_VISIBILITY complex &operator+=(const complex<_Xp> &__c) { __re_ += __c.real(); __im_ += __c.imag(); return *this; } template _LIBCPP_INLINE_VISIBILITY complex &operator-=(const complex<_Xp> &__c) { __re_ -= __c.real(); __im_ -= __c.imag(); return *this; } template _LIBCPP_INLINE_VISIBILITY complex &operator*=(const complex<_Xp> &__c) { *this = *this *complex(__c.real(), __c.imag()); return *this; } template _LIBCPP_INLINE_VISIBILITY complex &operator/=(const complex<_Xp> &__c) { *this = *this / complex(__c.real(), __c.imag()); return *this; } }; template <> class _LIBCPP_TYPE_VIS_ONLY complex { double __re_; double __im_; public: typedef double value_type; _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR complex(double __re = 0.0, double __im = 0.0) : __re_(__re), __im_(__im) { } _LIBCPP_CONSTEXPR complex(const complex &__c); explicit _LIBCPP_CONSTEXPR complex(const complex &__c); _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR double real() const { return __re_; } _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR double imag() const { return __im_; } _LIBCPP_INLINE_VISIBILITY void real(value_type __re) { __re_ = __re; } _LIBCPP_INLINE_VISIBILITY void imag(value_type __im) { __im_ = __im; } _LIBCPP_INLINE_VISIBILITY complex &operator=(double __re) { __re_ = __re; __im_ = value_type(); return *this; } _LIBCPP_INLINE_VISIBILITY complex &operator+=(double __re) { __re_ += __re; return *this; } _LIBCPP_INLINE_VISIBILITY complex &operator-=(double __re) { __re_ -= __re; return *this; } _LIBCPP_INLINE_VISIBILITY complex &operator*=(double __re) { __re_ *= __re; __im_ *= __re; return *this; } _LIBCPP_INLINE_VISIBILITY complex &operator/=(double __re) { __re_ /= __re; __im_ /= __re; return *this; } template _LIBCPP_INLINE_VISIBILITY complex &operator=(const complex<_Xp> &__c) { __re_ = __c.real(); __im_ = __c.imag(); return *this; } template _LIBCPP_INLINE_VISIBILITY complex &operator+=(const complex<_Xp> &__c) { __re_ += __c.real(); __im_ += __c.imag(); return *this; } template _LIBCPP_INLINE_VISIBILITY complex &operator-=(const complex<_Xp> &__c) { __re_ -= __c.real(); __im_ -= __c.imag(); return *this; } template _LIBCPP_INLINE_VISIBILITY complex &operator*=(const complex<_Xp> &__c) { *this = *this *complex(__c.real(), __c.imag()); return *this; } template _LIBCPP_INLINE_VISIBILITY complex &operator/=(const complex<_Xp> &__c) { *this = *this / complex(__c.real(), __c.imag()); return *this; } }; template <> class _LIBCPP_TYPE_VIS_ONLY complex { long double __re_; long double __im_; public: typedef long double value_type; _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR complex(long double __re = 0.0L, long double __im = 0.0L) : __re_(__re), __im_(__im) { } _LIBCPP_CONSTEXPR complex(const complex &__c); _LIBCPP_CONSTEXPR complex(const complex &__c); _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR long double real() const { return __re_; } _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR long double imag() const { return __im_; } _LIBCPP_INLINE_VISIBILITY void real(value_type __re) { __re_ = __re; } _LIBCPP_INLINE_VISIBILITY void imag(value_type __im) { __im_ = __im; } _LIBCPP_INLINE_VISIBILITY complex &operator=(long double __re) { __re_ = __re; __im_ = value_type(); return *this; } _LIBCPP_INLINE_VISIBILITY complex &operator+=(long double __re) { __re_ += __re; return *this; } _LIBCPP_INLINE_VISIBILITY complex &operator-=(long double __re) { __re_ -= __re; return *this; } _LIBCPP_INLINE_VISIBILITY complex &operator*=(long double __re) { __re_ *= __re; __im_ *= __re; return *this; } _LIBCPP_INLINE_VISIBILITY complex &operator/=(long double __re) { __re_ /= __re; __im_ /= __re; return *this; } template _LIBCPP_INLINE_VISIBILITY complex &operator=(const complex<_Xp> &__c) { __re_ = __c.real(); __im_ = __c.imag(); return *this; } template _LIBCPP_INLINE_VISIBILITY complex &operator+=(const complex<_Xp> &__c) { __re_ += __c.real(); __im_ += __c.imag(); return *this; } template _LIBCPP_INLINE_VISIBILITY complex &operator-=(const complex<_Xp> &__c) { __re_ -= __c.real(); __im_ -= __c.imag(); return *this; } template _LIBCPP_INLINE_VISIBILITY complex &operator*=(const complex<_Xp> &__c) { *this = *this *complex(__c.real(), __c.imag()); return *this; } template _LIBCPP_INLINE_VISIBILITY complex &operator/=(const complex<_Xp> &__c) { *this = *this / complex(__c.real(), __c.imag()); return *this; } }; inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR complex::complex(const complex &__c) : __re_(__c.real()), __im_(__c.imag()) { } inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR complex::complex(const complex &__c) : __re_(__c.real()), __im_(__c.imag()) { } inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR complex::complex(const complex &__c) : __re_(__c.real()), __im_(__c.imag()) { } inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR complex::complex(const complex &__c) : __re_(__c.real()), __im_(__c.imag()) { } inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR complex::complex(const complex &__c) : __re_(__c.real()), __im_(__c.imag()) { } inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR complex::complex(const complex &__c) : __re_(__c.real()), __im_(__c.imag()) { } // 26.3.6 operators: template inline _LIBCPP_INLINE_VISIBILITY complex<_Tp> operator+(const complex<_Tp> &__x, const complex<_Tp> &__y) { complex<_Tp> __t(__x); __t += __y; return __t; } template inline _LIBCPP_INLINE_VISIBILITY complex<_Tp> operator+(const complex<_Tp> &__x, const _Tp &__y) { complex<_Tp> __t(__x); __t += __y; return __t; } template inline _LIBCPP_INLINE_VISIBILITY complex<_Tp> operator+(const _Tp &__x, const complex<_Tp> &__y) { complex<_Tp> __t(__y); __t += __x; return __t; } template inline _LIBCPP_INLINE_VISIBILITY complex<_Tp> operator-(const complex<_Tp> &__x, const complex<_Tp> &__y) { complex<_Tp> __t(__x); __t -= __y; return __t; } template inline _LIBCPP_INLINE_VISIBILITY complex<_Tp> operator-(const complex<_Tp> &__x, const _Tp &__y) { complex<_Tp> __t(__x); __t -= __y; return __t; } template inline _LIBCPP_INLINE_VISIBILITY complex<_Tp> operator-(const _Tp &__x, const complex<_Tp> &__y) { complex<_Tp> __t(-__y); __t += __x; return __t; } template complex<_Tp> operator*(const complex<_Tp> &__z, const complex<_Tp> &__w) { _Tp __a = __z.real(); _Tp __b = __z.imag(); _Tp __c = __w.real(); _Tp __d = __w.imag(); _Tp __ac = __a * __c; _Tp __bd = __b * __d; _Tp __ad = __a * __d; _Tp __bc = __b * __c; _Tp __x = __ac - __bd; _Tp __y = __ad + __bc; #if 0 // not required by numpy! if (isnan(__x) && isnan(__y)) { bool __recalc = false; if (isinf(__a) || isinf(__b)) { __a = copysign(isinf(__a) ? _Tp(1) : _Tp(0), __a); __b = copysign(isinf(__b) ? _Tp(1) : _Tp(0), __b); if (isnan(__c)) __c = copysign(_Tp(0), __c); if (isnan(__d)) __d = copysign(_Tp(0), __d); __recalc = true; } if (isinf(__c) || isinf(__d)) { __c = copysign(isinf(__c) ? _Tp(1) : _Tp(0), __c); __d = copysign(isinf(__d) ? _Tp(1) : _Tp(0), __d); if (isnan(__a)) __a = copysign(_Tp(0), __a); if (isnan(__b)) __b = copysign(_Tp(0), __b); __recalc = true; } if (!__recalc && (isinf(__ac) || isinf(__bd) || isinf(__ad) || isinf(__bc))) { if (isnan(__a)) __a = copysign(_Tp(0), __a); if (isnan(__b)) __b = copysign(_Tp(0), __b); if (isnan(__c)) __c = copysign(_Tp(0), __c); if (isnan(__d)) __d = copysign(_Tp(0), __d); __recalc = true; } if (__recalc) { __x = _Tp(INFINITY) * (__a * __c - __b * __d); __y = _Tp(INFINITY) * (__a * __d + __b * __c); } } #endif return complex<_Tp>(__x, __y); } template inline _LIBCPP_INLINE_VISIBILITY complex<_Tp> operator*(const complex<_Tp> &__x, const _Tp &__y) { complex<_Tp> __t(__x); __t *= __y; return __t; } template inline _LIBCPP_INLINE_VISIBILITY complex<_Tp> operator*(const _Tp &__x, const complex<_Tp> &__y) { complex<_Tp> __t(__y); __t *= __x; return __t; } template complex<_Tp> operator/(const complex<_Tp> &__z, const complex<_Tp> &__w) { int __ilogbw = 0; _Tp __a = __z.real(); _Tp __b = __z.imag(); _Tp __c = __w.real(); _Tp __d = __w.imag(); #if 0 // not required by numpy _Tp __logbw = logb(fmax(fabs(__c), fabs(__d))); if (isfinite(__logbw)) { __ilogbw = static_cast(__logbw); __c = scalbn(__c, -__ilogbw); __d = scalbn(__d, -__ilogbw); } _Tp __denom = __c * __c + __d * __d; _Tp __x = scalbn((__a * __c + __b * __d) / __denom, -__ilogbw); _Tp __y = scalbn((__b * __c - __a * __d) / __denom, -__ilogbw); if (isnan(__x) && isnan(__y)) { if ((__denom == _Tp(0)) && (!isnan(__a) || !isnan(__b))) { __x = copysign(_Tp(INFINITY), __c) * __a; __y = copysign(_Tp(INFINITY), __c) * __b; } else if ((isinf(__a) || isinf(__b)) && isfinite(__c) && isfinite(__d)) { __a = copysign(isinf(__a) ? _Tp(1) : _Tp(0), __a); __b = copysign(isinf(__b) ? _Tp(1) : _Tp(0), __b); __x = _Tp(INFINITY) * (__a * __c + __b * __d); __y = _Tp(INFINITY) * (__b * __c - __a * __d); } else if (isinf(__logbw) && __logbw > _Tp(0) && isfinite(__a) && isfinite(__b)) { __c = copysign(isinf(__c) ? _Tp(1) : _Tp(0), __c); __d = copysign(isinf(__d) ? _Tp(1) : _Tp(0), __d); __x = _Tp(0) * (__a * __c + __b * __d); __y = _Tp(0) * (__b * __c - __a * __d); } } return complex<_Tp>(__x, __y); #else _Tp __e = __c * __c + __d * __d; return complex<_Tp>(__c * __a + __d * __b, __c * __b - __d * __a) / __e; #endif } template inline _LIBCPP_INLINE_VISIBILITY complex<_Tp> operator/(const complex<_Tp> &__x, const _Tp &__y) { return complex<_Tp>(__x.real() / __y, __x.imag() / __y); } template inline _LIBCPP_INLINE_VISIBILITY complex<_Tp> operator/(const _Tp &__x, const complex<_Tp> &__y) { complex<_Tp> __t(__x); __t /= __y; return __t; } template inline _LIBCPP_INLINE_VISIBILITY complex<_Tp> operator+(const complex<_Tp> &__x) { return __x; } template inline _LIBCPP_INLINE_VISIBILITY complex<_Tp> operator-(const complex<_Tp> &__x) { return complex<_Tp>(-__x.real(), -__x.imag()); } template inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 bool operator==(const complex<_Tp> &__x, const complex<_Tp> &__y) { return __x.real() == __y.real() && __x.imag() == __y.imag(); } template inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 bool operator==(const complex<_Tp> &__x, const _Tp &__y) { return __x.real() == __y && __x.imag() == 0; } template inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 bool operator==(const _Tp &__x, const complex<_Tp> &__y) { return __x == __y.real() && 0 == __y.imag(); } template inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 bool operator!=(const complex<_Tp> &__x, const complex<_Tp> &__y) { return !(__x == __y); } template inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 bool operator!=(const complex<_Tp> &__x, const _Tp &__y) { return !(__x == __y); } template inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 bool operator!=(const _Tp &__x, const complex<_Tp> &__y) { return !(__x == __y); } // 26.3.7 values: // real template inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 _Tp real(const complex<_Tp> &__c) { return __c.real(); } inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 long double real(long double __re) { return __re; } inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 double real(double __re) { return __re; } template inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 typename enable_if::value, double>::type real(_Tp __re) { return __re; } inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 float real(float __re) { return __re; } // imag template inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 _Tp imag(const complex<_Tp> &__c) { return __c.imag(); } inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 long double imag(long double __re) { return 0; } inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 double imag(double __re) { return 0; } template inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 typename enable_if::value, double>::type imag(_Tp __re) { return 0; } inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX11 float imag(float __re) { return 0; } // abs template inline _LIBCPP_INLINE_VISIBILITY _Tp abs(const complex<_Tp> &__c) { return hypot(__c.real(), __c.imag()); } // arg template inline _LIBCPP_INLINE_VISIBILITY _Tp arg(const complex<_Tp> &__c) { return atan2(__c.imag(), __c.real()); } inline _LIBCPP_INLINE_VISIBILITY long double arg(long double __re) { return atan2l(0.L, __re); } inline _LIBCPP_INLINE_VISIBILITY double arg(double __re) { return atan2(0., __re); } template inline _LIBCPP_INLINE_VISIBILITY typename enable_if::value, double>::type arg(_Tp __re) { return atan2(0., __re); } inline _LIBCPP_INLINE_VISIBILITY float arg(float __re) { return atan2f(0.F, __re); } // norm template inline _LIBCPP_INLINE_VISIBILITY _Tp norm(const complex<_Tp> &__c) { if (isinf(__c.real())) return abs(__c.real()); if (isinf(__c.imag())) return abs(__c.imag()); return __c.real() * __c.real() + __c.imag() * __c.imag(); } inline _LIBCPP_INLINE_VISIBILITY long double norm(long double __re) { return __re * __re; } inline _LIBCPP_INLINE_VISIBILITY double norm(double __re) { return __re * __re; } template inline _LIBCPP_INLINE_VISIBILITY typename enable_if::value, double>::type norm(_Tp __re) { return (double)__re * __re; } inline _LIBCPP_INLINE_VISIBILITY float norm(float __re) { return __re * __re; } // conj template inline _LIBCPP_INLINE_VISIBILITY complex<_Tp> conj(const complex<_Tp> &__c) { return complex<_Tp>(__c.real(), -__c.imag()); } inline _LIBCPP_INLINE_VISIBILITY complex conj(long double __re) { return complex(__re); } inline _LIBCPP_INLINE_VISIBILITY complex conj(double __re) { return complex(__re); } template inline _LIBCPP_INLINE_VISIBILITY typename enable_if::value, complex>::type conj(_Tp __re) { return complex(__re); } inline _LIBCPP_INLINE_VISIBILITY complex conj(float __re) { return complex(__re); } // proj template inline _LIBCPP_INLINE_VISIBILITY complex<_Tp> proj(const complex<_Tp> &__c) { std::complex<_Tp> __r = __c; if (isinf(__c.real()) || isinf(__c.imag())) __r = complex<_Tp>(INFINITY, copysign(_Tp(0), __c.imag())); return __r; } inline _LIBCPP_INLINE_VISIBILITY complex proj(long double __re) { if (isinf(__re)) __re = abs(__re); return complex(__re); } inline _LIBCPP_INLINE_VISIBILITY complex proj(double __re) { if (isinf(__re)) __re = abs(__re); return complex(__re); } template inline _LIBCPP_INLINE_VISIBILITY typename enable_if::value, complex>::type proj(_Tp __re) { return complex(__re); } inline _LIBCPP_INLINE_VISIBILITY complex proj(float __re) { if (isinf(__re)) __re = abs(__re); return complex(__re); } // polar template complex<_Tp> polar(const _Tp &__rho, const _Tp &__theta = _Tp(0)) { if (isnan(__rho) || signbit(__rho)) return complex<_Tp>(_Tp(NAN), _Tp(NAN)); if (isnan(__theta)) { if (isinf(__rho)) return complex<_Tp>(__rho, __theta); return complex<_Tp>(__theta, __theta); } if (isinf(__theta)) { if (isinf(__rho)) return complex<_Tp>(__rho, _Tp(NAN)); return complex<_Tp>(_Tp(NAN), _Tp(NAN)); } _Tp __x = __rho * cos(__theta); if (isnan(__x)) __x = 0; _Tp __y = __rho * sin(__theta); if (isnan(__y)) __y = 0; return complex<_Tp>(__x, __y); } // log template inline _LIBCPP_INLINE_VISIBILITY complex<_Tp> log(const complex<_Tp> &__x) { return complex<_Tp>(log(abs(__x)), arg(__x)); } // log10 template inline _LIBCPP_INLINE_VISIBILITY complex<_Tp> log10(const complex<_Tp> &__x) { return log(__x) / log(_Tp(10)); } // sqrt template complex<_Tp> sqrt(const complex<_Tp> &__x) { if (isinf(__x.imag())) return complex<_Tp>(_Tp(INFINITY), __x.imag()); if (isinf(__x.real())) { if (__x.real() > _Tp(0)) return complex<_Tp>(__x.real(), isnan(__x.imag()) ? __x.imag() : copysign(_Tp(0), __x.imag())); return complex<_Tp>(isnan(__x.imag()) ? __x.imag() : _Tp(0), copysign(__x.real(), __x.imag())); } return polar(sqrt(abs(__x)), arg(__x) / _Tp(2)); } // exp template complex<_Tp> exp(const complex<_Tp> &__x) { _Tp __i = __x.imag(); if (isinf(__x.real())) { if (__x.real() < _Tp(0)) { if (!isfinite(__i)) __i = _Tp(1); } else if (__i == 0 || !isfinite(__i)) { if (isinf(__i)) __i = _Tp(NAN); return complex<_Tp>(__x.real(), __i); } } else if (isnan(__x.real()) && __x.imag() == 0) return __x; _Tp __e = exp(__x.real()); return complex<_Tp>(__e * cos(__i), __e * sin(__i)); } // pow template inline _LIBCPP_INLINE_VISIBILITY complex<_Tp> pow(const complex<_Tp> &__x, const complex<_Tp> &__y) { return exp(__y * log(__x)); } template inline _LIBCPP_INLINE_VISIBILITY complex::type> pow(const complex<_Tp> &__x, const complex<_Up> &__y) { typedef complex::type> result_type; return _VSTD::pow(result_type(__x), result_type(__y)); } template inline _LIBCPP_INLINE_VISIBILITY typename enable_if::value, complex::type>>::type pow(const complex<_Tp> &__x, const _Up &__y) { typedef complex::type> result_type; return _VSTD::pow(result_type(__x), result_type(__y)); } template inline _LIBCPP_INLINE_VISIBILITY typename enable_if::value, complex::type>>::type pow(const _Tp &__x, const complex<_Up> &__y) { typedef complex::type> result_type; return _VSTD::pow(result_type(__x), result_type(__y)); } // asinh template complex<_Tp> asinh(const complex<_Tp> &__x) { const _Tp __pi(atan2(+0., -0.)); if (isinf(__x.real())) { if (isnan(__x.imag())) return __x; if (isinf(__x.imag())) return complex<_Tp>(__x.real(), copysign(__pi * _Tp(0.25), __x.imag())); return complex<_Tp>(__x.real(), copysign(_Tp(0), __x.imag())); } if (isnan(__x.real())) { if (isinf(__x.imag())) return complex<_Tp>(__x.imag(), __x.real()); if (__x.imag() == 0) return __x; return complex<_Tp>(__x.real(), __x.real()); } if (isinf(__x.imag())) return complex<_Tp>(copysign(__x.imag(), __x.real()), copysign(__pi / _Tp(2), __x.imag())); complex<_Tp> __z = log(__x + sqrt(pow(__x, _Tp(2)) + _Tp(1))); return complex<_Tp>(copysign(__z.real(), __x.real()), copysign(__z.imag(), __x.imag())); } // acosh template complex<_Tp> acosh(const complex<_Tp> &__x) { const _Tp __pi(atan2(+0., -0.)); if (isinf(__x.real())) { if (isnan(__x.imag())) return complex<_Tp>(abs(__x.real()), __x.imag()); if (isinf(__x.imag())) { if (__x.real() > 0) return complex<_Tp>(__x.real(), copysign(__pi * _Tp(0.25), __x.imag())); else return complex<_Tp>(-__x.real(), copysign(__pi * _Tp(0.75), __x.imag())); } if (__x.real() < 0) return complex<_Tp>(-__x.real(), copysign(__pi, __x.imag())); return complex<_Tp>(__x.real(), copysign(_Tp(0), __x.imag())); } if (isnan(__x.real())) { if (isinf(__x.imag())) return complex<_Tp>(abs(__x.imag()), __x.real()); return complex<_Tp>(__x.real(), __x.real()); } if (isinf(__x.imag())) return complex<_Tp>(abs(__x.imag()), copysign(__pi / _Tp(2), __x.imag())); complex<_Tp> __z = log(__x + sqrt(pow(__x, _Tp(2)) - _Tp(1))); return complex<_Tp>(copysign(__z.real(), _Tp(0)), copysign(__z.imag(), __x.imag())); } // atanh template complex<_Tp> atanh(const complex<_Tp> &__x) { const _Tp __pi(atan2(+0., -0.)); if (isinf(__x.imag())) { return complex<_Tp>(copysign(_Tp(0), __x.real()), copysign(__pi / _Tp(2), __x.imag())); } if (isnan(__x.imag())) { if (isinf(__x.real()) || __x.real() == 0) return complex<_Tp>(copysign(_Tp(0), __x.real()), __x.imag()); return complex<_Tp>(__x.imag(), __x.imag()); } if (isnan(__x.real())) { return complex<_Tp>(__x.real(), __x.real()); } if (isinf(__x.real())) { return complex<_Tp>(copysign(_Tp(0), __x.real()), copysign(__pi / _Tp(2), __x.imag())); } if (abs(__x.real()) == _Tp(1) && __x.imag() == _Tp(0)) { return complex<_Tp>(copysign(_Tp(INFINITY), __x.real()), copysign(_Tp(0), __x.imag())); } complex<_Tp> __z = log((_Tp(1) + __x) / (_Tp(1) - __x)) / _Tp(2); return complex<_Tp>(copysign(__z.real(), __x.real()), copysign(__z.imag(), __x.imag())); } // sinh template complex<_Tp> sinh(const complex<_Tp> &__x) { if (isinf(__x.real()) && !isfinite(__x.imag())) return complex<_Tp>(__x.real(), _Tp(NAN)); if (__x.real() == 0 && !isfinite(__x.imag())) return complex<_Tp>(__x.real(), _Tp(NAN)); if (__x.imag() == 0 && !isfinite(__x.real())) return __x; return complex<_Tp>(sinh(__x.real()) * cos(__x.imag()), cosh(__x.real()) * sin(__x.imag())); } // cosh template complex<_Tp> cosh(const complex<_Tp> &__x) { if (isinf(__x.real()) && !isfinite(__x.imag())) return complex<_Tp>(abs(__x.real()), _Tp(NAN)); if (__x.real() == 0 && !isfinite(__x.imag())) return complex<_Tp>(_Tp(NAN), __x.real()); if (__x.real() == 0 && __x.imag() == 0) return complex<_Tp>(_Tp(1), __x.imag()); if (__x.imag() == 0 && !isfinite(__x.real())) return complex<_Tp>(abs(__x.real()), __x.imag()); return complex<_Tp>(cosh(__x.real()) * cos(__x.imag()), sinh(__x.real()) * sin(__x.imag())); } // tanh template complex<_Tp> tanh(const complex<_Tp> &__x) { if (isinf(__x.real())) { if (!isfinite(__x.imag())) return complex<_Tp>(_Tp(1), _Tp(0)); return complex<_Tp>(_Tp(1), copysign(_Tp(0), sin(_Tp(2) * __x.imag()))); } if (isnan(__x.real()) && __x.imag() == 0) return __x; _Tp __2r(_Tp(2) * __x.real()); _Tp __2i(_Tp(2) * __x.imag()); _Tp __d(cosh(__2r) + cos(__2i)); _Tp __2rsh(sinh(__2r)); if (isinf(__2rsh) && isinf(__d)) return complex<_Tp>(__2rsh > _Tp(0) ? _Tp(1) : _Tp(-1), __2i > _Tp(0) ? _Tp(0) : _Tp(-0.)); return complex<_Tp>(__2rsh / __d, sin(__2i) / __d); } // asin template complex<_Tp> asin(const complex<_Tp> &__x) { complex<_Tp> __z = asinh(complex<_Tp>(-__x.imag(), __x.real())); return complex<_Tp>(__z.imag(), -__z.real()); } // acos template complex<_Tp> acos(const complex<_Tp> &__x) { const _Tp __pi(atan2(+0., -0.)); if (isinf(__x.real())) { if (isnan(__x.imag())) return complex<_Tp>(__x.imag(), __x.real()); if (isinf(__x.imag())) { if (__x.real() < _Tp(0)) return complex<_Tp>(_Tp(0.75) * __pi, -__x.imag()); return complex<_Tp>(_Tp(0.25) * __pi, -__x.imag()); } if (__x.real() < _Tp(0)) return complex<_Tp>(__pi, signbit(__x.imag()) ? -__x.real() : __x.real()); return complex<_Tp>(_Tp(0), signbit(__x.imag()) ? __x.real() : -__x.real()); } if (isnan(__x.real())) { if (isinf(__x.imag())) return complex<_Tp>(__x.real(), -__x.imag()); return complex<_Tp>(__x.real(), __x.real()); } if (isinf(__x.imag())) return complex<_Tp>(__pi / _Tp(2), -__x.imag()); if (__x.real() == 0) return complex<_Tp>(__pi / _Tp(2), -__x.imag()); complex<_Tp> __z = log(__x + sqrt(pow(__x, _Tp(2)) - _Tp(1))); if (signbit(__x.imag())) return complex<_Tp>(abs(__z.imag()), abs(__z.real())); return complex<_Tp>(abs(__z.imag()), -abs(__z.real())); } // atan template complex<_Tp> atan(const complex<_Tp> &__x) { complex<_Tp> __z = atanh(complex<_Tp>(-__x.imag(), __x.real())); return complex<_Tp>(__z.imag(), -__z.real()); } // sin template complex<_Tp> sin(const complex<_Tp> &__x) { complex<_Tp> __z = sinh(complex<_Tp>(-__x.imag(), __x.real())); return complex<_Tp>(__z.imag(), -__z.real()); } // cos template inline _LIBCPP_INLINE_VISIBILITY complex<_Tp> cos(const complex<_Tp> &__x) { return cosh(complex<_Tp>(-__x.imag(), __x.real())); } // tan template complex<_Tp> tan(const complex<_Tp> &__x) { complex<_Tp> __z = tanh(complex<_Tp>(-__x.imag(), __x.real())); return complex<_Tp>(__z.imag(), -__z.real()); } template basic_istream<_CharT, _Traits> &operator>>(basic_istream<_CharT, _Traits> &__is, complex<_Tp> &__x) { if (__is.good()) { ws(__is); if (__is.peek() == _CharT('(')) { __is.get(); _Tp __r; __is >> __r; if (!__is.fail()) { ws(__is); _CharT __c = __is.peek(); if (__c == _CharT(',')) { __is.get(); _Tp __i; __is >> __i; if (!__is.fail()) { ws(__is); __c = __is.peek(); if (__c == _CharT(')')) { __is.get(); __x = complex<_Tp>(__r, __i); } else __is.setstate(ios_base::failbit); } else __is.setstate(ios_base::failbit); } else if (__c == _CharT(')')) { __is.get(); __x = complex<_Tp>(__r, _Tp(0)); } else __is.setstate(ios_base::failbit); } else __is.setstate(ios_base::failbit); } else { _Tp __r; __is >> __r; if (!__is.fail()) __x = complex<_Tp>(__r, _Tp(0)); else __is.setstate(ios_base::failbit); } } else __is.setstate(ios_base::failbit); return __is; } template basic_ostream<_CharT, _Traits> &operator<<(basic_ostream<_CharT, _Traits> &__os, const complex<_Tp> &__x) { basic_ostringstream<_CharT, _Traits> __s; __s.flags(__os.flags()); __s.imbue(__os.getloc()); __s.precision(__os.precision()); __s << '(' << __x.real() << ',' << __x.imag() << ')'; return __os << __s.str(); } #if _LIBCPP_STD_VER > 11 // Literal suffix for complex number literals [complex.literals] inline namespace literals { inline namespace complex_literals { constexpr complex operator""il(long double __im) { return {0.0l, __im}; } constexpr complex operator""il(unsigned long long __im) { return {0.0l, static_cast(__im)}; } constexpr complex operator""i(long double __im) { return {0.0, static_cast(__im)}; } constexpr complex operator""i(unsigned long long __im) { return {0.0, static_cast(__im)}; } constexpr complex operator""if(long double __im) { return {0.0f, static_cast(__im)}; } constexpr complex operator""if(unsigned long long __im) { return {0.0f, static_cast(__im)}; } } } #endif _LIBCPP_END_NAMESPACE_STD #endif // _LIBCPP_COMPLEX pythran-0.10.0+ds2/pythran/pythonic/python/000077500000000000000000000000001416264035500206125ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/python/core.hpp000066400000000000000000000116501416264035500222560ustar00rootroot00000000000000#ifndef PYTHONIC_PYTHON_CORE_HPP #define PYTHONIC_PYTHON_CORE_HPP #ifdef ENABLE_PYTHON_MODULE #include "Python.h" // Python defines this for windows, and it's not needed in C++ #undef copysign #include #include #include // Cython still uses the deprecated API, so we can't set this macro in this // case! #ifndef CYTHON_ABI #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #endif #include "numpy/arrayobject.h" PYTHONIC_NS_BEGIN template struct to_python; template struct from_python; PYTHONIC_NS_END template auto to_python(T &&value) -> decltype(pythonic::to_python< typename std::remove_cv::type>::type>:: convert(std::forward(value))) { return pythonic::to_python< typename std::remove_cv::type>::type>:: convert(std::forward(value)); } template T from_python(PyObject *obj) { return pythonic::from_python::convert(obj); } template bool is_convertible(PyObject *obj) { return pythonic::from_python::is_convertible(obj); } PYTHONIC_NS_BEGIN namespace python { #ifndef PyString_AS_STRING #define PyString_AS_STRING (char *) _PyUnicode_COMPACT_DATA #endif void PyObject_TypePrettyPrinter(std::ostream &oss, PyObject *obj) { if (PyTuple_Check(obj)) { oss << '('; for (long n = PyTuple_GET_SIZE(obj), i = 0; i < n; ++i) { PyObject_TypePrettyPrinter(oss, PyTuple_GET_ITEM(obj, i)); if (i != n - 1) oss << ", "; } oss << ')'; } else if (PyArray_Check(obj)) { auto *arr = (PyArrayObject *)obj; auto *descr = PyArray_DESCR(arr); auto *dtype = descr->typeobj; auto *repr = PyObject_GetAttrString((PyObject *)dtype, "__name__"); oss << PyString_AS_STRING(repr); Py_DECREF(repr); oss << '['; for (int i = 0, n = PyArray_NDIM(arr); i < n; ++i) { oss << ':'; if (i != n - 1) oss << ", "; } oss << ']'; if ((PyArray_FLAGS(arr) & NPY_ARRAY_F_CONTIGUOUS) && ((PyArray_FLAGS(arr) & NPY_ARRAY_C_CONTIGUOUS) == 0) && (PyArray_NDIM(arr) > 1)) { oss << " (with unsupported column-major layout)"; } else if (PyArray_BASE(arr)) { oss << " (is a view)"; } else { auto const *stride = PyArray_STRIDES(arr); auto const *dims = PyArray_DIMS(arr); long current_stride = PyArray_ITEMSIZE(arr); for (long i = PyArray_NDIM(arr) - 1; i >= 0; i--) { if (stride[i] != current_stride) { oss << " (is strided)"; break; } current_stride *= dims[i]; } } } else if (PyList_Check(obj)) { if (PyObject_Not(obj)) { oss << "empty list"; } else { PyObject_TypePrettyPrinter(oss, PySequence_Fast_GET_ITEM(obj, 0)); oss << " list"; } } else if (PySet_Check(obj)) { PyObject *iterator = PyObject_GetIter(obj); if (PyObject *item = PyIter_Next(iterator)) { PyObject_TypePrettyPrinter(oss, item); Py_DECREF(item); Py_DECREF(iterator); oss << " set"; } else { Py_DECREF(iterator); oss << "empty set"; } } else if (PyDict_Check(obj)) { PyObject *key, *value; Py_ssize_t pos = 0; if (PyDict_Next(obj, &pos, &key, &value)) { PyObject_TypePrettyPrinter(oss, key); oss << ", "; PyObject_TypePrettyPrinter(oss, value); oss << " dict"; } else oss << "empty dict"; } else if (PyCapsule_CheckExact(obj)) { oss << PyCapsule_GetName(obj); } else { auto *repr = PyObject_GetAttrString((PyObject *)Py_TYPE(obj), "__name__"); oss << PyString_AS_STRING(repr); Py_DECREF(repr); } } std::nullptr_t raise_invalid_argument(char const name[], char const alternatives[], PyObject *args, PyObject *kwargs) { std::ostringstream oss; oss << "Invalid call to pythranized function `" << name << '('; for (long n = PyTuple_GET_SIZE(args), i = 0; i < n; ++i) { PyObject_TypePrettyPrinter(oss, PyTuple_GET_ITEM(args, i)); if (i != n - 1 || (kwargs && PyDict_Size(kwargs))) oss << ", "; } if (kwargs) { PyObject *key, *value; Py_ssize_t pos = 0; for (int next = PyDict_Next(kwargs, &pos, &key, &value); next;) { PyObject *vrepr = PyObject_GetAttrString((PyObject *)Py_TYPE(value), "__name__"); oss << PyString_AS_STRING(key) << '=' << PyString_AS_STRING(vrepr); Py_DECREF(vrepr); if ((next = PyDict_Next(kwargs, &pos, &key, &value))) oss << ", "; } } oss << ")'\nCandidates are:\n" << alternatives << "\n"; PyErr_SetString(PyExc_TypeError, oss.str().c_str()); return nullptr; } } PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/python/exception_handler.hpp000066400000000000000000000241111416264035500250150ustar00rootroot00000000000000#ifndef PYTHONIC_PYTHON_EXCEPTION_HANDLER_HPP #define PYTHONIC_PYTHON_EXCEPTION_HANDLER_HPP #ifdef ENABLE_PYTHON_MODULE #include "Python.h" #include PYTHONIC_NS_BEGIN // This function have to be include after every others exceptions to have // correct exception macro defined. template PyObject *handle_python_exception(F &&f) { try { return f(); } #ifdef PYTHONIC_BUILTIN_SYNTAXWARNING_HPP catch (pythonic::types::SyntaxWarning &e) { PyErr_SetString(PyExc_SyntaxWarning, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_RUNTIMEWARNING_HPP catch (pythonic::types::RuntimeWarning &e) { PyErr_SetString(PyExc_RuntimeWarning, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_DEPRECATIONWARNING_HPP catch (pythonic::types::DeprecationWarning &e) { PyErr_SetString(PyExc_DeprecationWarning, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_IMPORTWARNING_HPP catch (pythonic::types::ImportWarning &e) { PyErr_SetString(PyExc_ImportWarning, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_UNICODEWARNING_HPP catch (pythonic::types::UnicodeWarning &e) { PyErr_SetString(PyExc_UnicodeWarning, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_BYTESWARNING_HPP catch (pythonic::types::BytesWarning &e) { PyErr_SetString(PyExc_BytesWarning, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_USERWARNING_HPP catch (pythonic::types::UserWarning &e) { PyErr_SetString(PyExc_UserWarning, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_FUTUREWARNING_HPP catch (pythonic::types::FutureWarning &e) { PyErr_SetString(PyExc_FutureWarning, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_PENDINGDEPRECATIONWARNING_HPP catch (pythonic::types::PendingDeprecationWarning &e) { PyErr_SetString(PyExc_PendingDeprecationWarning, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_WARNING_HPP catch (pythonic::types::Warning &e) { PyErr_SetString(PyExc_Warning, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_UNICODEERROR_HPP catch (pythonic::types::UnicodeError &e) { PyErr_SetString(PyExc_UnicodeError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_VALUEERROR_HPP catch (pythonic::types::ValueError &e) { PyErr_SetString(PyExc_ValueError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_TYPEERROR_HPP catch (pythonic::types::TypeError &e) { PyErr_SetString(PyExc_TypeError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_SYSTEMERROR_HPP catch (pythonic::types::SystemError &e) { PyErr_SetString(PyExc_SystemError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_TABERROR_HPP catch (pythonic::types::TabError &e) { PyErr_SetString(PyExc_TabError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_INDENTATIONERROR_HPP catch (pythonic::types::IndentationError &e) { PyErr_SetString(PyExc_IndentationError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_SYNTAXERROR_HPP catch (pythonic::types::SyntaxError &e) { PyErr_SetString(PyExc_SyntaxError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_NOTIMPLEMENTEDERROR_HPP catch (pythonic::types::NotImplementedError &e) { PyErr_SetString(PyExc_NotImplementedError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_RUNTIMEERROR_HPP catch (pythonic::types::RuntimeError &e) { PyErr_SetString(PyExc_RuntimeError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_REFERENCEERROR_HPP catch (pythonic::types::ReferenceError &e) { PyErr_SetString(PyExc_ReferenceError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_UNBOUNDLOCALERROR_HPP catch (pythonic::types::UnboundLocalError &e) { PyErr_SetString(PyExc_UnboundLocalError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_NAMEERROR_HPP catch (pythonic::types::NameError &e) { PyErr_SetString(PyExc_NameError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_MEMORYERROR_HPP catch (pythonic::types::MemoryError &e) { PyErr_SetString(PyExc_MemoryError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_KEYERROR_HPP catch (pythonic::types::KeyError &e) { PyErr_SetString(PyExc_KeyError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_INDEXERROR_HPP catch (pythonic::types::IndexError &e) { PyErr_SetString(PyExc_IndexError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_LOOKUPERROR_HPP catch (pythonic::types::LookupError &e) { PyErr_SetString(PyExc_LookupError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_IMPORTERROR_HPP catch (pythonic::types::ImportError &e) { PyErr_SetString(PyExc_ImportError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_EOFERROR_HPP catch (pythonic::types::EOFError &e) { PyErr_SetString(PyExc_EOFError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_OSERROR_HPP catch (pythonic::types::OSError &e) { PyErr_SetString(PyExc_OSError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_IOERROR_HPP catch (pythonic::types::IOError &e) { PyErr_SetString(PyExc_IOError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_ENVIRONMENTERROR_HPP catch (pythonic::types::EnvironmentError &e) { PyErr_SetString(PyExc_EnvironmentError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_ATTRIBUTEERROR_HPP catch (pythonic::types::AttributeError &e) { PyErr_SetString(PyExc_AttributeError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_ASSERTIONERROR_HPP catch (pythonic::types::AssertionError &e) { PyErr_SetString(PyExc_AssertionError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_ZERODIVISIONERROR_HPP catch (pythonic::types::ZeroDivisionError &e) { PyErr_SetString(PyExc_ZeroDivisionError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_OVERFLOWERROR_HPP catch (pythonic::types::OverflowError &e) { PyErr_SetString(PyExc_OverflowError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_FLOATINGPOINTERROR_HPP catch (pythonic::types::FloatingPointError &e) { PyErr_SetString(PyExc_FloatingPointError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_ARITHMETICERROR_HPP catch (pythonic::types::ArithmeticError &e) { PyErr_SetString(PyExc_ArithmeticError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_FILENOTFOUNDERROR_HPP catch (pythonic::types::FileNotFoundError &e) { PyErr_SetString(PyExc_FileNotFoundError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_BUFFERERROR_HPP catch (pythonic::types::BufferError &e) { PyErr_SetString(PyExc_BufferError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_STANDARDERROR_HPP catch (pythonic::types::StandardError &e) { PyErr_SetString(PyExc_StandardError, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_STOPITERATION_HPP catch (pythonic::types::StopIteration &e) { PyErr_SetString(PyExc_StopIteration, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_EXCEPTION_HPP catch (pythonic::types::Exception &e) { PyErr_SetString(PyExc_Exception, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_GENERATOREXIT_HPP catch (pythonic::types::GeneratorExit &e) { PyErr_SetString(PyExc_GeneratorExit, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_KEYBOARDINTERRUPT_HPP catch (pythonic::types::KeyboardInterrupt &e) { PyErr_SetString(PyExc_KeyboardInterrupt, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_SYSTEMEXIT_HPP catch (pythonic::types::SystemExit &e) { PyErr_SetString(PyExc_SystemExit, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif #ifdef PYTHONIC_BUILTIN_BASEEXCEPTION_HPP catch (pythonic::types::BaseException &e) { PyErr_SetString(PyExc_BaseException, pythonic::builtins::functor::str{}(e.args).c_str()); } #endif catch (...) { PyErr_SetString(PyExc_RuntimeError, "Something happened on the way to heaven"); } return nullptr; } PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/random/000077500000000000000000000000001416264035500205515ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/random/choice.hpp000066400000000000000000000021661416264035500225210ustar00rootroot00000000000000#ifndef PYTHONIC_RANDOM_CHOICE_HPP #define PYTHONIC_RANDOM_CHOICE_HPP #include "pythonic/include/random/choice.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/random/random.hpp" #include "pythonic/types/traits.hpp" PYTHONIC_NS_BEGIN namespace random { namespace details { template typename std::enable_if::value, typename Seq::value_type>::type choice(Seq const &seq) { auto tmp = seq.begin(); // std::advance not usable because it requires operator-- for (long n = random() * seq.size(); n; --n) ++tmp; return *tmp; } template typename std::enable_if::value, typename Seq::value_type>::type choice(Seq const &seq) { std::vector::type> tmp( seq.begin(), seq.end()); return tmp[long(random() * tmp.size())]; } } template typename Seq::value_type choice(Seq const &seq) { return details::choice(seq); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/random/expovariate.hpp000066400000000000000000000005741416264035500236170ustar00rootroot00000000000000#ifndef PYTHONIC_RANDOM_EXPOVARIATE_HPP #define PYTHONIC_RANDOM_EXPOVARIATE_HPP #include "pythonic/include/random/expovariate.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/random/random.hpp" PYTHONIC_NS_BEGIN namespace random { double expovariate(double l) { return std::exponential_distribution<>(l)(__random_generator); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/random/gauss.hpp000066400000000000000000000005671416264035500224140ustar00rootroot00000000000000#ifndef PYTHONIC_RANDOM_GAUSS_HPP #define PYTHONIC_RANDOM_GAUSS_HPP #include "pythonic/include/random/gauss.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/random/random.hpp" PYTHONIC_NS_BEGIN namespace random { double gauss(double mu, double sigma) { return std::normal_distribution<>(mu, sigma)(__random_generator); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/random/randint.hpp000066400000000000000000000006301416264035500227200ustar00rootroot00000000000000#ifndef PYTHONIC_RANDOM_RANDINT_HPP #define PYTHONIC_RANDOM_RANDINT_HPP #include "pythonic/include/random/randint.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/random/randrange.hpp" PYTHONIC_NS_BEGIN namespace random { long randint(long a, long b) { // TODO: It should be implemented with an uniform_int_distribution return randrange(a, b + 1); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/random/random.hpp000066400000000000000000000006011416264035500225370ustar00rootroot00000000000000#ifndef PYTHONIC_RANDOM_RANDOM_HPP #define PYTHONIC_RANDOM_RANDOM_HPP #include "pythonic/include/random/random.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace random { double random() { static std::uniform_real_distribution<> uniform_distrib(0.0, 1.0); return uniform_distrib(__random_generator); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/random/randrange.hpp000066400000000000000000000011231416264035500232200ustar00rootroot00000000000000#ifndef PYTHONIC_RANDOM_RANDRANGE_HPP #define PYTHONIC_RANDOM_RANDRANGE_HPP #include "pythonic/include/random/randrange.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/random/random.hpp" #include PYTHONIC_NS_BEGIN namespace random { long randrange(long stop) { return long(random() * stop); } long randrange(long start, long stop) { return start + long(random() * (stop - start)); } long randrange(long start, long stop, long step) { return start + step * long((random() * (stop - start)) / std::abs(step)); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/random/sample.hpp000066400000000000000000000020011416264035500225340ustar00rootroot00000000000000#ifndef PYTHONIC_RANDOM_SAMPLE_HPP #define PYTHONIC_RANDOM_SAMPLE_HPP #include "pythonic/include/random/sample.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/random/random.hpp" #include "pythonic/types/list.hpp" PYTHONIC_NS_BEGIN namespace random { template types::list::type>:: type::iterator>::value_type> sample(Iterable &&s, size_t k) { using value_type = typename std::iterator_traits::type>::type::iterator>:: value_type; types::list tmp(s.begin(), s.end()); std::vector indices(tmp.size()); std::iota(indices.begin(), indices.end(), 0); std::random_shuffle(indices.begin(), indices.end()); types::list out(k); for (size_t i = 0; i < k; i++) out[i] = tmp[indices[i]]; return out; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/random/seed.hpp000066400000000000000000000010021416264035500221730ustar00rootroot00000000000000#ifndef PYTHONIC_RANDOM_SEED_HPP #define PYTHONIC_RANDOM_SEED_HPP #include "pythonic/include/random/seed.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/random/random.hpp" #include PYTHONIC_NS_BEGIN namespace random { types::none_type seed(long s) { __random_generator.seed(s); return builtins::None; } types::none_type seed() { __random_generator.seed(time(nullptr)); return builtins::None; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/random/shuffle.hpp000066400000000000000000000024421416264035500227200ustar00rootroot00000000000000#ifndef PYTHONIC_RANDOM_SHUFFLE_HPP #define PYTHONIC_RANDOM_SHUFFLE_HPP #include "pythonic/include/random/shuffle.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/random/random.hpp" #include PYTHONIC_NS_BEGIN namespace random { template types::none_type shuffle(T &seq) { std::shuffle(seq.begin(), seq.end(), __random_generator); return builtins::None; } namespace details { template struct URG { URG(function &&f) : randf(f) { } typedef unsigned result_type; static constexpr result_type min() { return 0; } /* -1 because of the floor() operation performed by the float->unsigned * conversion */ static constexpr result_type max() { return std::numeric_limits::max() - 1; } result_type operator()() { return randf() * std::numeric_limits::max(); } function randf; }; } template types::none_type shuffle(T &seq, function &&randf) { std::shuffle(seq.begin(), seq.end(), details::URG(std::forward(randf))); return builtins::None; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/random/uniform.hpp000066400000000000000000000005261416264035500227440ustar00rootroot00000000000000#ifndef PYTHONIC_RANDOM_UNIFORM_HPP #define PYTHONIC_RANDOM_UNIFORM_HPP #include "pythonic/include/random/uniform.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/random/random.hpp" PYTHONIC_NS_BEGIN namespace random { double uniform(double a, double b) { return a + (b - a) * random(); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/scipy/000077500000000000000000000000001416264035500204205ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/scipy/special/000077500000000000000000000000001416264035500220405ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/scipy/special/binom.hpp000066400000000000000000000020101416264035500236460ustar00rootroot00000000000000#ifndef PYTHONIC_SCIPY_SPECIAL_BINOM_HPP #define PYTHONIC_SCIPY_SPECIAL_BINOM_HPP #include "pythonic/include/scipy/special/binom.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_traits.hpp" #define BOOST_MATH_THREAD_LOCAL thread_local #include PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template double binom(T0 n, T1 k) { static_assert(std::is_integral::value && std::is_integral::value, "only support integer case of scipy.special.binom"); using namespace boost::math::policies; return boost::math::binomial_coefficient( n, k, make_policy(promote_double())); } } #define NUMPY_NARY_FUNC_NAME binom #define NUMPY_NARY_FUNC_SYM details::binom #include "pythonic/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/scipy/special/gamma.hpp000066400000000000000000000007271416264035500236410ustar00rootroot00000000000000#ifndef PYTHONIC_SCIPY_SPECIAL_GAMMA_HPP #define PYTHONIC_SCIPY_SPECIAL_GAMMA_HPP #include "pythonic/include/scipy/special/gamma.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace scipy { namespace special { #define NUMPY_NARY_FUNC_NAME gamma #define NUMPY_NARY_FUNC_SYM xsimd::tgamma #include "pythonic/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/scipy/special/gammaln.hpp000066400000000000000000000007371416264035500241740ustar00rootroot00000000000000#ifndef PYTHONIC_SCIPY_SPECIAL_GAMMALN_HPP #define PYTHONIC_SCIPY_SPECIAL_GAMMALN_HPP #include "pythonic/include/scipy/special/gammaln.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace scipy { namespace special { #define NUMPY_NARY_FUNC_NAME gammaln #define NUMPY_NARY_FUNC_SYM xsimd::lgamma #include "pythonic/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/scipy/special/hankel1.hpp000066400000000000000000000014361416264035500241000ustar00rootroot00000000000000#ifndef PYTHONIC_SCIPY_SPECIAL_HANKEL1_HPP #define PYTHONIC_SCIPY_SPECIAL_HANKEL1_HPP #include "pythonic/include/scipy/special/hankel1.hpp" #include "pythonic/types/complex.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_traits.hpp" #define BOOST_MATH_THREAD_LOCAL thread_local #include PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template std::complex hankel1(T0 x, T1 y) { return boost::math::cyl_hankel_1(x, y); } } #define NUMPY_NARY_FUNC_NAME hankel1 #define NUMPY_NARY_FUNC_SYM details::hankel1 #include "pythonic/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/scipy/special/hankel2.hpp000066400000000000000000000014361416264035500241010ustar00rootroot00000000000000#ifndef PYTHONIC_SCIPY_SPECIAL_HANKEL2_HPP #define PYTHONIC_SCIPY_SPECIAL_HANKEL2_HPP #include "pythonic/include/scipy/special/hankel2.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/complex.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_traits.hpp" #define BOOST_MATH_THREAD_LOCAL thread_local #include PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template std::complex hankel2(T0 x, T1 y) { return boost::math::cyl_hankel_2(x, y); } } #define NUMPY_NARY_FUNC_NAME hankel2 #define NUMPY_NARY_FUNC_SYM details::hankel2 #include "pythonic/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/scipy/special/iv.hpp000066400000000000000000000015111416264035500231650ustar00rootroot00000000000000#ifndef PYTHONIC_SCIPY_SPECIAL_IV_HPP #define PYTHONIC_SCIPY_SPECIAL_IV_HPP #include "pythonic/include/scipy/special/iv.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_traits.hpp" #define BOOST_MATH_THREAD_LOCAL thread_local #include PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template double iv(T0 x, T1 y) { using namespace boost::math::policies; return boost::math::cyl_bessel_i(x, y, make_policy(promote_double())); } } #define NUMPY_NARY_FUNC_NAME iv #define NUMPY_NARY_FUNC_SYM details::iv #include "pythonic/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/scipy/special/ivp.hpp000066400000000000000000000014771416264035500233600ustar00rootroot00000000000000#ifndef PYTHONIC_SCIPY_SPECIAL_IVP_HPP #define PYTHONIC_SCIPY_SPECIAL_IVP_HPP #include "pythonic/include/scipy/special/ivp.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_traits.hpp" #define BOOST_MATH_THREAD_LOCAL thread_local #include PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template double ivp(T0 x, T1 y) { using namespace boost::math::policies; return boost::math::cyl_bessel_i_prime( x, y, make_policy(promote_double())); } } #define NUMPY_NARY_FUNC_NAME ivp #define NUMPY_NARY_FUNC_SYM details::ivp #include "pythonic/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/scipy/special/jv.hpp000066400000000000000000000015111416264035500231660ustar00rootroot00000000000000#ifndef PYTHONIC_SCIPY_SPECIAL_JV_HPP #define PYTHONIC_SCIPY_SPECIAL_JV_HPP #include "pythonic/include/scipy/special/jv.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_traits.hpp" #define BOOST_MATH_THREAD_LOCAL thread_local #include PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template double jv(T0 x, T1 y) { using namespace boost::math::policies; return boost::math::cyl_bessel_j(x, y, make_policy(promote_double())); } } #define NUMPY_NARY_FUNC_NAME jv #define NUMPY_NARY_FUNC_SYM details::jv #include "pythonic/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/scipy/special/jvp.hpp000066400000000000000000000014771416264035500233610ustar00rootroot00000000000000#ifndef PYTHONIC_SCIPY_SPECIAL_JVP_HPP #define PYTHONIC_SCIPY_SPECIAL_JVP_HPP #include "pythonic/include/scipy/special/jvp.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_traits.hpp" #define BOOST_MATH_THREAD_LOCAL thread_local #include PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template double jvp(T0 x, T1 y) { using namespace boost::math::policies; return boost::math::cyl_bessel_j_prime( x, y, make_policy(promote_double())); } } #define NUMPY_NARY_FUNC_NAME jvp #define NUMPY_NARY_FUNC_SYM details::jvp #include "pythonic/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/scipy/special/kv.hpp000066400000000000000000000015111416264035500231670ustar00rootroot00000000000000#ifndef PYTHONIC_SCIPY_SPECIAL_KV_HPP #define PYTHONIC_SCIPY_SPECIAL_KV_HPP #include "pythonic/include/scipy/special/kv.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_traits.hpp" #define BOOST_MATH_THREAD_LOCAL thread_local #include PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template double kv(T0 x, T1 y) { using namespace boost::math::policies; return boost::math::cyl_bessel_k(x, y, make_policy(promote_double())); } } #define NUMPY_NARY_FUNC_NAME kv #define NUMPY_NARY_FUNC_SYM details::kv #include "pythonic/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/scipy/special/kvp.hpp000066400000000000000000000014771416264035500233620ustar00rootroot00000000000000#ifndef PYTHONIC_SCIPY_SPECIAL_KVP_HPP #define PYTHONIC_SCIPY_SPECIAL_KVP_HPP #include "pythonic/include/scipy/special/kvp.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_traits.hpp" #define BOOST_MATH_THREAD_LOCAL thread_local #include PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template double kvp(T0 x, T1 y) { using namespace boost::math::policies; return boost::math::cyl_bessel_k_prime( x, y, make_policy(promote_double())); } } #define NUMPY_NARY_FUNC_NAME kvp #define NUMPY_NARY_FUNC_SYM details::kvp #include "pythonic/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/scipy/special/spherical_jn.hpp000066400000000000000000000023171416264035500252150ustar00rootroot00000000000000#ifndef PYTHONIC_SCIPY_SPECIAL_SPHERICAL_JN_HPP #define PYTHONIC_SCIPY_SPECIAL_SPHERICAL_JN_HPP #include "pythonic/include/scipy/special/spherical_jn.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_traits.hpp" #define BOOST_MATH_THREAD_LOCAL thread_local #include #include PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template double spherical_jn(T0 v, T1 x, bool derivative) { assert(v == (long)v && "only supported for integral value as first arg"); using namespace boost::math::policies; if (derivative) { return boost::math::sph_bessel_prime( v, x, make_policy(promote_double())); } else { return boost::math::sph_bessel(v, x, make_policy(promote_double())); } } } #define NUMPY_NARY_FUNC_NAME spherical_jn #define NUMPY_NARY_FUNC_SYM details::spherical_jn #include "pythonic/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/scipy/special/spherical_yn.hpp000066400000000000000000000023221416264035500252300ustar00rootroot00000000000000#ifndef PYTHONIC_SCIPY_SPECIAL_SPHERICAL_YN_HPP #define PYTHONIC_SCIPY_SPECIAL_SPHERICAL_YN_HPP #include "pythonic/include/scipy/special/spherical_yn.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_traits.hpp" #define BOOST_MATH_THREAD_LOCAL thread_local #include #include PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template double spherical_yn(T0 v, T1 x, bool derivative) { assert(v == (long)v && "only supported for integral value as first arg"); using namespace boost::math::policies; if (derivative) { return boost::math::sph_neumann_prime( v, x, make_policy(promote_double())); } else { return boost::math::sph_neumann(v, x, make_policy(promote_double())); } } } #define NUMPY_NARY_FUNC_NAME spherical_yn #define NUMPY_NARY_FUNC_SYM details::spherical_yn #include "pythonic/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/scipy/special/yv.hpp000066400000000000000000000015071416264035500232120ustar00rootroot00000000000000#ifndef PYTHONIC_SCIPY_SPECIAL_YV_HPP #define PYTHONIC_SCIPY_SPECIAL_YV_HPP #include "pythonic/include/scipy/special/yv.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_traits.hpp" #define BOOST_MATH_THREAD_LOCAL thread_local #include PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template double yv(T0 x, T1 y) { using namespace boost::math::policies; return boost::math::cyl_neumann(x, y, make_policy(promote_double())); } } #define NUMPY_NARY_FUNC_NAME yv #define NUMPY_NARY_FUNC_SYM details::yv #include "pythonic/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/scipy/special/yvp.hpp000066400000000000000000000014761416264035500233770ustar00rootroot00000000000000#ifndef PYTHONIC_SCIPY_SPECIAL_YVP_HPP #define PYTHONIC_SCIPY_SPECIAL_YVP_HPP #include "pythonic/include/scipy/special/yvp.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/numpy_traits.hpp" #define BOOST_MATH_THREAD_LOCAL thread_local #include PYTHONIC_NS_BEGIN namespace scipy { namespace special { namespace details { template double yvp(T0 x, T1 y) { using namespace boost::math::policies; return boost::math::cyl_neumann_prime( x, y, make_policy(promote_double())); } } #define NUMPY_NARY_FUNC_NAME yvp #define NUMPY_NARY_FUNC_SYM details::yvp #include "pythonic/types/numpy_nary_expr.hpp" } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/string/000077500000000000000000000000001416264035500205775ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/string/ascii_letters.hpp000066400000000000000000000002221416264035500241360ustar00rootroot00000000000000#ifndef PYTHONIC_STRING_ASCII_LETTERS_HPP #define PYTHONIC_STRING_ASCII_LETTERS_HPP #include "pythonic/include/string/ascii_letters.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/string/ascii_lowercase.hpp000066400000000000000000000002301416264035500244370ustar00rootroot00000000000000#ifndef PYTHONIC_STRING_ASCII_LOWERCASE_HPP #define PYTHONIC_STRING_ASCII_LOWERCASE_HPP #include "pythonic/include/string/ascii_lowercase.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/string/ascii_uppercase.hpp000066400000000000000000000002301416264035500244420ustar00rootroot00000000000000#ifndef PYTHONIC_STRING_ASCII_UPPERCASE_HPP #define PYTHONIC_STRING_ASCII_UPPERCASE_HPP #include "pythonic/include/string/ascii_uppercase.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/string/digits.hpp000066400000000000000000000001751416264035500225760ustar00rootroot00000000000000#ifndef PYTHONIC_STRING_DIGITS_HPP #define PYTHONIC_STRING_DIGITS_HPP #include "pythonic/include/string/digits.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/string/find.hpp000066400000000000000000000005521416264035500222320ustar00rootroot00000000000000#ifndef PYTHONIC_STRING_FIND_HPP #define PYTHONIC_STRING_FIND_HPP #include "pythonic/include/string/find.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/str.hpp" PYTHONIC_NS_BEGIN namespace string { template long find(types::str const &s, T &&val) { return s.find(std::forward(val)); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/string/hexdigits.hpp000066400000000000000000000002061416264035500232760ustar00rootroot00000000000000#ifndef PYTHONIC_STRING_HEXDIGITS_HPP #define PYTHONIC_STRING_HEXDIGITS_HPP #include "pythonic/include/string/hexdigits.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/string/octdigits.hpp000066400000000000000000000002061416264035500232770ustar00rootroot00000000000000#ifndef PYTHONIC_STRING_OCTDIGITS_HPP #define PYTHONIC_STRING_OCTDIGITS_HPP #include "pythonic/include/string/octdigits.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/time/000077500000000000000000000000001416264035500202275ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/time/sleep.hpp000066400000000000000000000006641416264035500220560ustar00rootroot00000000000000#ifndef PYTHONIC_TIME_SLEEP_HPP #define PYTHONIC_TIME_SLEEP_HPP #include "pythonic/include/time/sleep.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/builtins/None.hpp" #include #include PYTHONIC_NS_BEGIN namespace time { types::none_type sleep(double const value) { std::this_thread::sleep_for(std::chrono::duration(value)); return builtins::None; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/time/time.hpp000066400000000000000000000007441416264035500217030ustar00rootroot00000000000000#ifndef PYTHONIC_TIME_TIME_HPP #define PYTHONIC_TIME_TIME_HPP #include "pythonic/include/time/time.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace time { double time() { std::chrono::time_point tp = std::chrono::steady_clock::now(); return std::chrono::duration_cast( tp.time_since_epoch()).count() / 1000.; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/types/000077500000000000000000000000001416264035500204355ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/types/NoneType.hpp000066400000000000000000000202071416264035500227100ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_NONE_HPP #define PYTHONIC_TYPES_NONE_HPP #include "pythonic/include/types/NoneType.hpp" #include "pythonic/types/assignable.hpp" #include "pythonic/builtins/id.hpp" #include "pythonic/builtins/bool_.hpp" #include "pythonic/operator_/mod.hpp" PYTHONIC_NS_BEGIN namespace types { /// none_type implementation none_type::none_type() { } intptr_t none_type::id() const { return NONE_ID; } /// none implementation /* Type adapator to simulate an option type * * see http://en.wikipedia.org/wiki/Option_type */ template none::none(none_type const &) : T(), is_none(true) { } template bool none::operator==(none_type const &) const { return is_none; } template template bool none::operator==(O const &t) const { return !is_none && static_cast(*this) == t; } template bool none::operator!=(none_type const &other) const { return !(*this == other); } template template bool none::operator!=(O const &other) const { return !(*this == other); } template none::operator bool() const { return !is_none && builtins::functor::bool_{}(static_cast(*this)); } template intptr_t none::id() const { return is_none ? NONE_ID : builtins::id(static_cast(*this)); } template std::ostream &operator<<(std::ostream &os, none const &v) { if (v.is_none) return os << none_type(); else return os << static_cast(v); } /* specialization of none for integral types we cannot derive from */ template none::none() : data(), is_none(false) { } template none::none(none_type const &) : data(), is_none(true) { } template none::none(T const &data) : data(data), is_none(false) { } template bool none::operator==(none_type const &) const { return is_none; } template template bool none::operator==(O const &t) const { return !is_none && data == t; } template bool none::operator!=(none_type const &other) const { return !(*this == other); } template template bool none::operator!=(O const &other) const { return !(*this == other); } template T &none::operator=(T const &t) { is_none = false; return data = t; } template intptr_t none::id() const { return is_none ? NONE_ID : reinterpret_cast(&data); } template T operator+(none const &t0, T const &t1) { return t0.data + t1; } template T operator+(T const &t0, none const &t1) { return t0 + t1.data; } template none operator+(none const &t0, none const &t1) { if (t0.is_none && t1.is_none) return none_type{}; else return {t0.data + t1.data}; } template bool operator>(none const &t0, T const &t1) { return t0.data > t1; } template bool operator>(T const &t0, none const &t1) { return t0 > t1.data; } template none operator>(none const &t0, none const &t1) { if (t0.is_none && t1.is_none) return none_type{}; else return {t0.data > t1.data}; } template bool operator>=(none const &t0, T const &t1) { return t0.data >= t1; } template bool operator>=(T const &t0, none const &t1) { return t0 >= t1.data; } template none operator>=(none const &t0, none const &t1) { if (t0.is_none && t1.is_none) return none_type{}; else return {t0.data >= t1.data}; } template bool operator<(none const &t0, T const &t1) { return t0.data < t1; } template bool operator<(T const &t0, none const &t1) { return t0 < t1.data; } template none operator<(none const &t0, none const &t1) { if (t0.is_none && t1.is_none) return none_type{}; else return {t0.data < t1.data}; } template bool operator<=(none const &t0, T const &t1) { return t0.data <= t1; } template bool operator<=(T const &t0, none const &t1) { return t0 <= t1.data; } template none operator<=(none const &t0, none const &t1) { if (t0.is_none && t1.is_none) return none_type{}; else return {t0.data <= t1.data}; } template T operator-(none const &t0, T const &t1) { return t0.data - t1; } template T operator-(T const &t0, none const &t1) { return t0 - t1.data; } template none operator-(none const &t0, none const &t1) { if (t0.is_none && t1.is_none) return none_type{}; else return {t0.data - t1.data}; } template T operator*(none const &t0, T const &t1) { return t0.data * t1; } template T operator*(T const &t0, none const &t1) { return t0 * t1.data; } template none operator*(none const &t0, none const &t1) { if (t0.is_none && t1.is_none) return none_type{}; else return {t0.data * t1.data}; } template T operator/(none const &t0, T const &t1) { return t0.data / t1; } template T operator/(T const &t0, none const &t1) { return t0 / t1.data; } template none operator/(none const &t0, none const &t1) { if (t0.is_none && t1.is_none) return none_type{}; else return {t0.data / t1.data}; } template decltype(operator_::mod(std::declval(), std::declval())) operator%(none const &t0, T1 const &t1) { return operator_::mod(t0.data, t1); } template decltype(operator_::mod(std::declval(), std::declval())) operator%(T0 const &t0, none const &t1) { return operator_::mod(t0, t1.data); } template none(), std::declval())), true> operator%(none const &t0, none const &t1) { if (t0.is_none && t1.is_none) return none_type{}; else return {operator_::mod(t0, t1.data)}; } template template none &none::operator+=(T1 other) { if (!is_none) data += other; return *this; } template template none &none::operator-=(T1 other) { if (!is_none) data -= other; return *this; } template template none &none::operator*=(T1 other) { if (!is_none) data *= other; return *this; } template template none &none::operator/=(T1 other) { if (!is_none) data /= other; return *this; } template std::ostream &operator<<(std::ostream &os, none const &v) { if (v.is_none) return os << none_type(); else return os << v.data; } } PYTHONIC_NS_END #ifdef ENABLE_PYTHON_MODULE PYTHONIC_NS_BEGIN bool from_python::is_convertible(PyObject *obj) { return obj == Py_None; } types::none_type from_python::convert(PyObject *obj) { return {}; } PyObject *to_python::convert(types::none_type) { Py_RETURN_NONE; } template PyObject *to_python>::convert(types::none const &n) { if (n.is_none) { Py_RETURN_NONE; } else { return ::to_python(static_cast(n)); } } PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/types/assignable.hpp000066400000000000000000000002061416264035500232540ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_ASSIGNABLE_HPP #define PYTHONIC_TYPES_ASSIGNABLE_HPP #include "pythonic/include/types/assignable.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/types/attr.hpp000066400000000000000000000001641416264035500221210ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_ATTR_HPP #define PYTHONIC_TYPES_ATTR_HPP #include "pythonic/include/types/attr.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/types/bool.hpp000066400000000000000000000007301416264035500221010ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_BOOL_HPP #define PYTHONIC_TYPES_BOOL_HPP #include "pythonic/include/types/bool.hpp" #ifdef ENABLE_PYTHON_MODULE PYTHONIC_NS_BEGIN PyObject *to_python::convert(bool b) { if (b) Py_RETURN_TRUE; else Py_RETURN_FALSE; } bool from_python::is_convertible(PyObject *obj) { return obj == Py_True || obj == Py_False; } bool from_python::convert(PyObject *obj) { return obj == Py_True; } PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/types/cfun.hpp000066400000000000000000000022001416264035500220730ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_CFUN_HPP #define PYTHONIC_TYPES_CFUN_HPP #include "pythonic/include/types/cfun.hpp" PYTHONIC_NS_BEGIN namespace types { template cfun::cfun(ReturnType (*fun)(ArgsType...)) : ptr(fun) { } template ReturnType cfun::operator()(ArgsType... args) const { return (*ptr)(args...); } } PYTHONIC_NS_END #ifdef ENABLE_PYTHON_MODULE PYTHONIC_NS_BEGIN template PyObject * to_python>::convert(types::cfun const &v) { return PyCapsule_New(v.ptr, nullptr, nullptr); } template bool from_python>::is_convertible(PyObject *obj) { return PyCapsule_CheckExact(obj); } template types::cfun from_python>::convert(PyObject *obj) { void *ptr = PyCapsule_GetPointer( obj, PyCapsule_GetName(obj) /* avoid the string check*/); return {reinterpret_cast(ptr)}; } PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/types/combined.hpp000066400000000000000000000002571416264035500227320ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_COMBINED_HPP #define PYTHONIC_TYPES_COMBINED_HPP #include "pythonic/include/types/combined.hpp" #include "pythonic/types/variant_functor.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/types/complex.hpp000066400000000000000000000146251416264035500226250ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_COMPLEX_HPP #define PYTHONIC_TYPES_COMPLEX_HPP #include "pythonic/include/types/complex.hpp" #include "pythonic/numpy/complex64.hpp" #include "pythonic/numpy/complex128.hpp" #include "pythonic/numpy/complex256.hpp" #include "pythonic/types/attr.hpp" namespace std { template complex_broadcast_t operator+(std::complex self, S other) { return (complex_broadcast_t)self + (typename std::common_type::type)(other); } template complex_broadcast_t operator+(S self, std::complex other) { return (typename std::common_type::type)(self) + (complex_broadcast_t)other; } template complex_broadcast_t operator-(std::complex self, S other) { return (complex_broadcast_t)self - (typename std::common_type::type)(other); } template complex_broadcast_t operator-(S self, std::complex other) { return (typename std::common_type::type)(self) - (complex_broadcast_t)other; } template complex_broadcast_t operator*(std::complex self, S other) { return (complex_broadcast_t)self * (typename std::common_type::type)(other); } template complex_broadcast_t operator*(S self, std::complex other) { return (typename std::common_type::type)(self) * (complex_broadcast_t)other; } template complex_broadcast_t operator/(std::complex self, S other) { return (complex_broadcast_t)self / (typename std::common_type::type)(other); } template complex_broadcast_t operator/(S self, std::complex other) { return (typename std::common_type::type)(self) / (complex_broadcast_t)other; } template complex_bool_t operator==(std::complex self, S other) { return self == T(other); } template complex_bool_t operator==(S self, std::complex other) { return T(self) == other; } template complex_bool_t operator!=(std::complex self, S other) { return self != T(other); } template complex_bool_t operator!=(S self, std::complex other) { return T(self) != other; } template bool operator<(std::complex self, std::complex other) { return self.real() == other.real() ? self.imag() < other.imag() : self.real() < other.real(); } template bool operator<=(std::complex self, std::complex other) { return self.real() == other.real() ? self.imag() <= other.imag() : self.real() <= other.real(); } template bool operator>(std::complex self, std::complex other) { return self.real() == other.real() ? self.imag() > other.imag() : self.real() > other.real(); } template bool operator>=(std::complex self, std::complex other) { return self.real() == other.real() ? self.imag() >= other.imag() : self.real() >= other.real(); } template bool operator&&(std::complex self, std::complex other) { return (self.real() || self.imag()) && (other.real() || other.imag()); } template bool operator||(std::complex self, std::complex other) { return (self.real() || self.imag()) || (other.real() || other.imag()); } template bool operator!(std::complex self) { return !self.real() && !self.imag(); } template size_t hash>::operator()(std::complex const &x) const { return std::hash{}(x.real()) ^ std::hash{}(x.imag()); }; } PYTHONIC_NS_BEGIN namespace builtins { template T getattr(types::attr::REAL, std::complex const &self) { return std::real(self); } template T getattr(types::attr::IMAG, std::complex const &self) { return std::imag(self); } numpy::functor::complex64 getattr(types::attr::DTYPE, std::complex const &self) { return {}; } numpy::functor::complex128 getattr(types::attr::DTYPE, std::complex const &self) { return {}; } numpy::functor::complex256 getattr(types::attr::DTYPE, std::complex const &self) { return {}; } } PYTHONIC_NS_END #ifdef ENABLE_PYTHON_MODULE #include "pythonic/python/core.hpp" #include "numpy/arrayscalars.h" PYTHONIC_NS_BEGIN template <> PyObject *to_python>::convert( std::complex const &c) { return PyArray_Scalar(const_cast *>(&c), PyArray_DescrFromType(NPY_CLONGDOUBLE), nullptr); } template <> PyObject * to_python>::convert(std::complex const &c) { return PyComplex_FromDoubles(c.real(), c.imag()); } template <> PyObject *to_python>::convert(std::complex const &c) { return PyArray_Scalar(const_cast *>(&c), PyArray_DescrFromType(NPY_CFLOAT), nullptr); } template <> bool from_python>::is_convertible(PyObject *obj) { return PyArray_IsScalar(obj, CLongDouble); } template <> bool from_python>::is_convertible(PyObject *obj) { return PyComplex_Check(obj); } template <> bool from_python>::is_convertible(PyObject *obj) { return PyArray_IsScalar(obj, CFloat); } template <> std::complex from_python>::convert(PyObject *obj) { auto val = PyArrayScalar_VAL(obj, CLongDouble); return {val.real, val.imag}; } template <> std::complex from_python>::convert(PyObject *obj) { return {PyComplex_RealAsDouble(obj), PyComplex_ImagAsDouble(obj)}; } template <> std::complex from_python>::convert(PyObject *obj) { auto val = PyArrayScalar_VAL(obj, CFloat); return {val.real, val.imag}; } PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/types/complex128.hpp000066400000000000000000000002541416264035500230510ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_COMPLEX128_HPP #define PYTHONIC_TYPES_COMPLEX128_HPP #include "pythonic/include/types/complex128.hpp" #include "pythonic/types/complex.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/types/complex256.hpp000066400000000000000000000002541416264035500230530ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_COMPLEX256_HPP #define PYTHONIC_TYPES_COMPLEX256_HPP #include "pythonic/include/types/complex256.hpp" #include "pythonic/types/complex.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/types/complex64.hpp000066400000000000000000000002511416264035500227650ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_COMPLEX64_HPP #define PYTHONIC_TYPES_COMPLEX64_HPP #include "pythonic/include/types/complex64.hpp" #include "pythonic/types/complex.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/types/dict.hpp000066400000000000000000000331731416264035500221000ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_DICT_HPP #define PYTHONIC_TYPES_DICT_HPP #include "pythonic/include/types/dict.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/types/empty_iterator.hpp" #include "pythonic/utils/iterator.hpp" #include "pythonic/utils/reserve.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/utils/shared_ref.hpp" #include #include #include #include #include PYTHONIC_NS_BEGIN namespace types { /// item implementation template item_iterator_adaptator::item_iterator_adaptator(I const &i) : I(i) { } template typename item_iterator_adaptator::value_type item_iterator_adaptator:: operator*() const { auto &&tmp = I::operator*(); return make_tuple(tmp.first, tmp.second); } /// key_iterator_adaptator implementation template key_iterator_adaptator::key_iterator_adaptator() : I() { } template key_iterator_adaptator::key_iterator_adaptator(I const &i) : I(i) { } template typename key_iterator_adaptator::value_type key_iterator_adaptator:: operator*() const { return (*this)->first; } /// value_iterator_adaptator implementation template value_iterator_adaptator::value_iterator_adaptator() : I() { } template value_iterator_adaptator::value_iterator_adaptator(I const &i) : I(i) { } template typename value_iterator_adaptator::value_type value_iterator_adaptator:: operator*() const { return (*this)->second; } template dict_items::dict_items() { } template dict_items::dict_items(D const &d) : data(d) { } template typename dict_items::iterator dict_items::begin() const { return data.item_begin(); } template typename dict_items::iterator dict_items::end() const { return data.item_end(); } template long dict_items::size() const { return data.size(); } template dict_keys::dict_keys() { } template dict_keys::dict_keys(D const &d) : data(d) { } template typename dict_keys::iterator dict_keys::begin() const { return data.key_begin(); } template typename dict_keys::iterator dict_keys::end() const { return data.key_end(); } template long dict_keys::size() const { return data.size(); } template dict_values::dict_values() { } template dict_values::dict_values(D const &d) : data(d) { } template typename dict_values::iterator dict_values::begin() const { return data.value_begin(); } template typename dict_values::iterator dict_values::end() const { return data.value_end(); } template long dict_values::size() const { return data.size(); } template dict::dict() : data(utils::no_memory()) { } template dict::dict(empty_dict const &) : data(DEFAULT_DICT_CAPACITY) { } template dict::dict(std::initializer_list l) : data(l.begin(), l.end()) { } template dict::dict(dict const &other) : data(other.data) { } template template dict::dict(dict const &other) : data(other.data->begin(), other.data->end()) { } template template dict::dict(B begin, E end) : data(begin, end) { } // iterators template typename dict::iterator dict::begin() { return typename dict::iterator(data->begin()); } template typename dict::const_iterator dict::begin() const { return key_iterator_adaptator< typename dict::container_type::const_iterator>(data->begin()); } template typename dict::iterator dict::end() { return typename dict::iterator(data->end()); } template typename dict::const_iterator dict::end() const { return key_iterator_adaptator< typename dict::container_type::const_iterator>(data->end()); } template typename dict::item_iterator dict::item_begin() { return item_iterator_adaptator< typename dict::container_type::iterator>(data->begin()); } template typename dict::item_const_iterator dict::item_begin() const { return item_iterator_adaptator< typename dict::container_type::const_iterator>(data->begin()); } template typename dict::item_iterator dict::item_end() { return item_iterator_adaptator< typename dict::container_type::iterator>(data->end()); } template typename dict::item_const_iterator dict::item_end() const { return item_iterator_adaptator< typename dict::container_type::const_iterator>(data->end()); } template typename dict::key_iterator dict::key_begin() { return key_iterator_adaptator< typename dict::container_type::iterator>(data->begin()); } template typename dict::key_const_iterator dict::key_begin() const { return key_iterator_adaptator< typename dict::container_type::const_iterator>(data->begin()); } template typename dict::key_iterator dict::key_end() { return key_iterator_adaptator< typename dict::container_type::iterator>(data->end()); } template typename dict::key_const_iterator dict::key_end() const { return key_iterator_adaptator< typename dict::container_type::const_iterator>(data->end()); } template typename dict::value_iterator dict::value_begin() { return value_iterator_adaptator< typename dict::container_type::iterator>(data->begin()); } template typename dict::value_const_iterator dict::value_begin() const { return value_iterator_adaptator< typename dict::container_type::const_iterator>(data->begin()); } template typename dict::value_iterator dict::value_end() { return value_iterator_adaptator< typename dict::container_type::iterator>(data->end()); } template typename dict::value_const_iterator dict::value_end() const { return value_iterator_adaptator< typename dict::container_type::const_iterator>(data->end()); } // dict interface template dict::operator bool() { return !data->empty(); } template V &dict::operator[](K const &key) { return fast(key); } template V const &dict::operator[](K const &key) const { return fast(key); } template V &dict::fast(K const &key) { return (*data)[key]; } template V const &dict::fast(K const &key) const { return (*data)[key]; } template typename dict::item_const_iterator dict::find(K const &key) const { return item_iterator_adaptator< typename dict::container_type::const_iterator>(data->find(key)); } template void dict::clear() { return data->clear(); } template dict dict::copy() const { return dict(this->data->begin(), this->data->end()); } template template typename __combined::type dict::get(K const &key, W d) const { auto ivalue = data->find(key); if (ivalue != data->end()) return ivalue->second; else return d; } template none dict::get(K const &key) const { auto ivalue = data->find(key); if (ivalue != data->end()) return ivalue->second; else return builtins::None; } template template V &dict::setdefault(K const &key, W d) { auto ivalue = data->find(key); if (ivalue != data->end()) return ivalue->second; else return (*data)[key] = d; } template none &dict::setdefault(K const &key) { auto ivalue = data->find(key); if (ivalue != data->end()) return ivalue->second; else return (*data)[key] = builtins::None; } template template void dict::update(dict const &d) { for (auto kv : *d.data) (*data)[kv.first] = kv.second; } template template void dict::update(Iterable const &d) { for (auto kv : d) (*data)[std::get<0>(kv)] = std::get<1>(kv); } template template typename __combined::type dict::pop(K const &key, W d) { auto ivalue = data->find(key); if (ivalue != data->end()) { auto tmp = ivalue->second; data->erase(ivalue); return tmp; } else return d; } template V dict::pop(K const &key) { auto ivalue = data->find(key); if (ivalue != data->end()) { auto tmp = ivalue->second; data->erase(ivalue); return tmp; } else throw std::range_error("KeyError"); } template make_tuple_t dict::popitem() { auto b = data->begin(); if (b == data->end()) throw std::range_error("KeyError"); else { auto r = *b; data->erase(b); return make_tuple_t{r.first, r.second}; } } template long dict::size() const { return data->size(); } template dict_items> dict::items() const { return dict_items>(*this); } template dict_keys> dict::keys() const { return dict_keys>(*this); } template dict_values> dict::values() const { return dict_values>(*this); } // id interface template intptr_t dict::id() const { return reinterpret_cast(&(*data)); } template template bool dict::contains(T const &key) const { return data->find(key) != data->end(); } template dict empty_dict::operator+(dict const &s) { return s; } empty_dict empty_dict::operator+(empty_dict const &) { return empty_dict(); } empty_dict::operator bool() const { return false; } typename empty_dict::iterator empty_dict::begin() const { return empty_iterator(); } typename empty_dict::iterator empty_dict::end() const { return empty_iterator(); } template bool empty_dict::contains(V const &) const { return false; } template dict operator+(dict const &d, empty_dict) { return d; } } std::ostream &operator<<(std::ostream &os, types::empty_dict const &) { return os << "{}"; } template std::ostream &operator<<(std::ostream &os, std::pair const &p) { os << p.first << ": "; return os << p.second; } template std::ostream &operator<<(std::ostream &os, types::dict const &v) { os << '{'; auto iter = v.item_begin(); if (iter != v.item_end()) { auto niter = iter; ++niter; while (niter != v.item_end()) { os << *iter << ", "; ++niter, ++iter; } os << *iter; } return os << '}'; } PYTHONIC_NS_END /* overload std::get */ namespace std { template auto get(pythonic::types::dict &d) -> decltype(d[I]) { return d[I]; } template auto get(pythonic::types::dict const &d) -> decltype(d[I]) { return d[I]; } } #ifdef ENABLE_PYTHON_MODULE #include "pythonic/python/core.hpp" PYTHONIC_NS_BEGIN template PyObject *to_python>::convert(types::dict const &v) { PyObject *ret = PyDict_New(); for (auto kv = v.item_begin(); kv != v.item_end(); ++kv) { PyObject *kobj = ::to_python(kv->first), *vobj = ::to_python(kv->second); PyDict_SetItem(ret, kobj, vobj); Py_DECREF(kobj); Py_DECREF(vobj); } return ret; } PyObject *to_python::convert(types::empty_dict) { return PyDict_New(); } template bool from_python>:: is_convertible(PyObject *obj) { if (PyDict_Check(obj)) { PyObject *key, *value; Py_ssize_t pos = 0; if (PyDict_Next(obj, &pos, &key, &value)) { return ::is_convertible(key) && ::is_convertible(value); } else return true; } return false; } template types::dict from_python>::convert(PyObject *obj) { types::dict v = types::empty_dict(); PyObject *key, *value; Py_ssize_t pos = 0; while (PyDict_Next(obj, &pos, &key, &value)) v[ ::from_python(key)] = ::from_python(value); return v; } PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/types/dynamic_tuple.hpp000066400000000000000000000071771416264035500240170ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_DYNAMIC_TUPLE_HPP #define PYTHONIC_TYPES_DYNAMIC_TUPLE_HPP #include "pythonic/include/types/dynamic_tuple.hpp" #include "pythonic/types/assignable.hpp" #include "pythonic/types/traits.hpp" #include "pythonic/types/nditerator.hpp" #include "pythonic/utils/int_.hpp" #include "pythonic/utils/seq.hpp" #include "pythonic/utils/shared_ref.hpp" #include "pythonic/utils/nested_container.hpp" #include #include PYTHONIC_NS_BEGIN namespace types { template template long dynamic_tuple::_flat_size(E const &e, utils::int_<1>) const { return e.size(); } template intptr_t dynamic_tuple::id() const { return reinterpret_cast(&(*data)); } template template long dynamic_tuple::_flat_size(E const &e, utils::int_) const { return e.size() * _flat_size(e.fast(0), utils::int_{}); } template long dynamic_tuple::flat_size() const { return _flat_size(*this, utils::int_{}); } template bool dynamic_tuple::operator==(dynamic_tuple const &other) const { return size() == other.size() && std::equal(begin(), end(), other.begin()); } template bool dynamic_tuple::operator!=(dynamic_tuple const &other) const { return !(*this == other); } template bool dynamic_tuple::operator<(dynamic_tuple const &other) const { return std::lexicographical_compare(begin(), end(), other.begin(), other.end(), std::less()); } template bool dynamic_tuple::operator<=(dynamic_tuple const &other) const { if (size() == other.size() && std::equal(begin(), end(), other.begin())) return true; return std::lexicographical_compare(begin(), end(), other.begin(), other.end(), std::less()); } template bool dynamic_tuple::operator>(dynamic_tuple const &other) const { return std::lexicographical_compare(begin(), end(), other.begin(), other.end(), std::greater()); } template bool dynamic_tuple::operator>=(dynamic_tuple const &other) const { if (size() == other.size() && std::equal(begin(), end(), other.begin())) return true; return std::lexicographical_compare(begin(), end(), other.begin(), other.end(), std::greater()); } template dynamic_tuple dynamic_tuple:: operator+(dynamic_tuple const &other) const { dynamic_tuple result(begin(), end()); result.data->resize(size() + other.size()); std::copy(other.begin(), other.end(), result.data->begin() + size()); return result; } } PYTHONIC_NS_END namespace std { template size_t hash>:: operator()(pythonic::types::dynamic_tuple const &l) const { std::hash hasher; size_t seed = 0x9e3779b9; for (auto &&v : l) seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); return seed; } } #ifdef ENABLE_PYTHON_MODULE #include "pythonic/include/utils/seq.hpp" #include "pythonic/include/utils/fwd.hpp" #include "pythonic/python/core.hpp" PYTHONIC_NS_BEGIN template PyObject * to_python>::convert(types::dynamic_tuple const &t) { size_t N = t.size(); PyObject *out = PyTuple_New(N); for (size_t i = 0; i < N; ++i) PyTuple_SET_ITEM(out, i, ::to_python(t.fast(i))); return out; } PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/types/empty_iterator.hpp000066400000000000000000000015401416264035500242150ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_EMPTY_ITERATOR_HPP #define PYTHONIC_TYPES_EMPTY_ITERATOR_HPP #include "pythonic/include/types/empty_iterator.hpp" PYTHONIC_NS_BEGIN namespace types { empty_iterator::empty_iterator() { } empty_iterator::empty_iterator(empty_iterator const &) { } bool empty_iterator::operator==(empty_iterator const &) const { return true; } bool empty_iterator::operator!=(empty_iterator const &) const { return false; } bool empty_iterator::operator<(empty_iterator const &) const { return false; } empty_iterator &empty_iterator::operator++() { return *this; } empty_iterator &empty_iterator::operator++(int) { return *this; } double empty_iterator::operator*() const { return {}; } void empty_iterator::operator->() const { return; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/types/exceptions.hpp000066400000000000000000000244521416264035500233360ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_EXCEPTIONS_HPP #define PYTHONIC_TYPES_EXCEPTIONS_HPP #include "pythonic/include/types/exceptions.hpp" #include "pythonic/types/str.hpp" #include "pythonic/types/dynamic_tuple.hpp" #include "pythonic/types/attr.hpp" #include "pythonic/builtins/None.hpp" #include "pythonic/builtins/str.hpp" #include PYTHONIC_NS_BEGIN namespace types { template BaseException::BaseException(Types const &... types) : args({builtins::functor::str{}(types)...}) { } // Use this to create a python exception class #define CLASS_EXCEPTION_IMPL(name, parent) CLASS_EXCEPTION_IMPL(SystemExit, BaseException); CLASS_EXCEPTION_IMPL(KeyboardInterrupt, BaseException); CLASS_EXCEPTION_IMPL(GeneratorExit, BaseException); CLASS_EXCEPTION_IMPL(Exception, BaseException); CLASS_EXCEPTION_IMPL(StopIteration, Exception); CLASS_EXCEPTION_IMPL(StandardError, Exception); CLASS_EXCEPTION_IMPL(Warning, Exception); CLASS_EXCEPTION_IMPL(BytesWarning, Warning); CLASS_EXCEPTION_IMPL(UnicodeWarning, Warning); CLASS_EXCEPTION_IMPL(ImportWarning, Warning); CLASS_EXCEPTION_IMPL(FutureWarning, Warning); CLASS_EXCEPTION_IMPL(UserWarning, Warning); CLASS_EXCEPTION_IMPL(SyntaxWarning, Warning); CLASS_EXCEPTION_IMPL(RuntimeWarning, Warning); CLASS_EXCEPTION_IMPL(PendingDeprecationWarning, Warning); CLASS_EXCEPTION_IMPL(DeprecationWarning, Warning); CLASS_EXCEPTION_IMPL(BufferError, StandardError); CLASS_EXCEPTION_IMPL(FileNotFoundError, StandardError); CLASS_EXCEPTION_IMPL(ArithmeticError, StandardError); CLASS_EXCEPTION_IMPL(AssertionError, StandardError); CLASS_EXCEPTION_IMPL(AttributeError, StandardError); CLASS_EXCEPTION_IMPL(EnvironmentError, StandardError); CLASS_EXCEPTION_IMPL(EOFError, StandardError); CLASS_EXCEPTION_IMPL(ImportError, StandardError); CLASS_EXCEPTION_IMPL(LookupError, StandardError); CLASS_EXCEPTION_IMPL(MemoryError, StandardError); CLASS_EXCEPTION_IMPL(NameError, StandardError); CLASS_EXCEPTION_IMPL(ReferenceError, StandardError); CLASS_EXCEPTION_IMPL(RuntimeError, StandardError); CLASS_EXCEPTION_IMPL(SyntaxError, StandardError); CLASS_EXCEPTION_IMPL(SystemError, StandardError); CLASS_EXCEPTION_IMPL(TypeError, StandardError); CLASS_EXCEPTION_IMPL(ValueError, StandardError); CLASS_EXCEPTION_IMPL(FloatingPointError, ArithmeticError); CLASS_EXCEPTION_IMPL(OverflowError, ArithmeticError); CLASS_EXCEPTION_IMPL(ZeroDivisionError, ArithmeticError); CLASS_EXCEPTION_IMPL(IOError, EnvironmentError); CLASS_EXCEPTION_IMPL(OSError, EnvironmentError); CLASS_EXCEPTION_IMPL(WindowsError, OSError); CLASS_EXCEPTION_IMPL(VMSError, OSError); CLASS_EXCEPTION_IMPL(IndexError, LookupError); CLASS_EXCEPTION_IMPL(KeyError, LookupError); CLASS_EXCEPTION_IMPL(UnboundLocalError, NameError); CLASS_EXCEPTION_IMPL(NotImplementedError, RuntimeError); CLASS_EXCEPTION_IMPL(IndentationError, SyntaxError); CLASS_EXCEPTION_IMPL(TabError, IndentationError); CLASS_EXCEPTION_IMPL(UnicodeError, ValueError); } PYTHONIC_NS_END #include "pythonic/utils/functor.hpp" #define PYTHONIC_EXCEPTION_IMPL(name) \ template \ types::name name(Types const &... args) \ { \ return types::name(args...); \ } /* pythran attribute system { */ #define IMPL_EXCEPTION_GETATTR(name) \ PYTHONIC_NS_BEGIN \ namespace builtins \ { \ types::none> \ getattr(types::attr::ARGS, types::name const &f) \ { \ return f.args; \ } \ } \ PYTHONIC_NS_END #define IMPL_EXCEPTION_GETATTR_FULL(name) \ PYTHONIC_NS_BEGIN \ namespace builtins \ { \ types::none> \ getattr(types::attr::ARGS, types::name const &e) \ { \ if (e.args.size() > 3 || e.args.size() < 2) \ return e.args; \ else \ return types::dynamic_tuple(e.args.begin(), \ e.args.begin() + 2); \ } \ types::none getattr(types::attr::ERRNO, types::name const &e) \ { \ if (e.args.size() > 3 || e.args.size() < 2) \ return builtins::None; \ else \ return e.args[0]; \ } \ types::none getattr(types::attr::STRERROR, \ types::name const &e) \ { \ if (e.args.size() > 3 || e.args.size() < 2) \ return builtins::None; \ else \ return e.args[1]; \ } \ types::none getattr(types::attr::FILENAME, \ types::name const &e) \ { \ if (e.args.size() != 3) \ return builtins::None; \ else \ return e.args[2]; \ } \ } \ PYTHONIC_NS_END IMPL_EXCEPTION_GETATTR(BaseException); IMPL_EXCEPTION_GETATTR(SystemExit); IMPL_EXCEPTION_GETATTR(KeyboardInterrupt); IMPL_EXCEPTION_GETATTR(GeneratorExit); IMPL_EXCEPTION_GETATTR(Exception); IMPL_EXCEPTION_GETATTR(StopIteration); IMPL_EXCEPTION_GETATTR(StandardError); IMPL_EXCEPTION_GETATTR(Warning); IMPL_EXCEPTION_GETATTR(BytesWarning); IMPL_EXCEPTION_GETATTR(UnicodeWarning); IMPL_EXCEPTION_GETATTR(ImportWarning); IMPL_EXCEPTION_GETATTR(FutureWarning); IMPL_EXCEPTION_GETATTR(UserWarning); IMPL_EXCEPTION_GETATTR(SyntaxWarning); IMPL_EXCEPTION_GETATTR(RuntimeWarning); IMPL_EXCEPTION_GETATTR(PendingDeprecationWarning); IMPL_EXCEPTION_GETATTR(DeprecationWarning); IMPL_EXCEPTION_GETATTR(BufferError); IMPL_EXCEPTION_GETATTR(FileNotFoundError); IMPL_EXCEPTION_GETATTR(ArithmeticError); IMPL_EXCEPTION_GETATTR(AssertionError); IMPL_EXCEPTION_GETATTR(AttributeError); IMPL_EXCEPTION_GETATTR(EOFError); IMPL_EXCEPTION_GETATTR(ImportError); IMPL_EXCEPTION_GETATTR(LookupError); IMPL_EXCEPTION_GETATTR(MemoryError); IMPL_EXCEPTION_GETATTR(NameError); IMPL_EXCEPTION_GETATTR(ReferenceError); IMPL_EXCEPTION_GETATTR(RuntimeError); IMPL_EXCEPTION_GETATTR(SyntaxError); IMPL_EXCEPTION_GETATTR(SystemError); IMPL_EXCEPTION_GETATTR(TypeError); IMPL_EXCEPTION_GETATTR(ValueError); IMPL_EXCEPTION_GETATTR(FloatingPointError); IMPL_EXCEPTION_GETATTR(OverflowError); IMPL_EXCEPTION_GETATTR(ZeroDivisionError); IMPL_EXCEPTION_GETATTR(IndexError); IMPL_EXCEPTION_GETATTR(KeyError); IMPL_EXCEPTION_GETATTR(UnboundLocalError); IMPL_EXCEPTION_GETATTR(NotImplementedError); IMPL_EXCEPTION_GETATTR(IndentationError); IMPL_EXCEPTION_GETATTR(TabError); IMPL_EXCEPTION_GETATTR(UnicodeError); IMPL_EXCEPTION_GETATTR_FULL(IOError); IMPL_EXCEPTION_GETATTR_FULL(EnvironmentError); IMPL_EXCEPTION_GETATTR_FULL(OSError); PYTHONIC_NS_BEGIN namespace types { std::ostream &operator<<(std::ostream &o, BaseException const &e) { return o << e.args; } /* @brief Convert EnvironmentError to a string. * * The number of arguments used when creating the EnvironmentError impact * the resulting "type" || formatting of the chain. We aim to mimic python * behavior of course: * - only one arg, then assume it can be converted to string, * - two args, then the first one is the errno, the next one a string, * - three args, like two args, adding "filename" as third one (after ':') * - four || more args, the "tuple" used to construct the exception * */ std::ostream &operator<<(std::ostream &o, EnvironmentError const &e) { if (e.args.size() == 1) return o << e.args[0]; if (e.args.size() == 2) return o << "[Errno " << e.args[0] << "] " << e.args[1]; else if (e.args.size() == 3) return o << "[Errno " << e.args[0] << "] " << e.args[1] << ": '" << e.args[2] << "'"; else { // Generate "('a', 'b', 'c', 'd') if a,b,c, && d are in e.args std::string listsep = ""; o << "("; for (auto &arg : e.args) { o << listsep << "'" << arg << "'"; listsep = ", "; } o << ")"; return o; } } } PYTHONIC_NS_END /* } */ #endif pythran-0.10.0+ds2/pythran/pythonic/types/file.hpp000066400000000000000000000171301416264035500220670ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_FILE_HPP #define PYTHONIC_TYPES_FILE_HPP #include "pythonic/include/types/file.hpp" #include "pythonic/types/assignable.hpp" #include "pythonic/utils/shared_ref.hpp" #include "pythonic/types/str.hpp" #include "pythonic/types/list.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/types/attr.hpp" #include "pythonic/builtins/IOError.hpp" #include "pythonic/builtins/ValueError.hpp" #include "pythonic/builtins/RuntimeError.hpp" #include "pythonic/builtins/StopIteration.hpp" #include #include #include #include #include #include #ifdef _WIN32 #include #else #include #endif PYTHONIC_NS_BEGIN namespace types { /// _file implementation _file::_file() : f(nullptr) { } // TODO : no check on file existance? _file::_file(types::str const &filename, types::str const &strmode) : f(fopen(filename.c_str(), strmode.c_str())) { } FILE *_file::operator*() const { return f; } _file::~_file() { if (f) fclose(f); } /// file implementation // Constructors file::file() : file_iterator(), data(utils::no_memory()) { } file::file(types::str const &filename, types::str const &strmode) : file_iterator(), data(utils::no_memory()), mode(strmode), name(filename), newlines('\n') { open(filename, strmode); if (mode.find_first_of("r+") != -1) *(file_iterator *)this = file_iterator(*this); } // Iterators file::iterator file::begin() { return *this; } file::iterator file::end() { return {}; } // Modifiers void file::open(types::str const &filename, types::str const &strmode) { const char *smode = strmode.c_str(); // Python enforces that the mode, after stripping 'U', begins with 'r', // 'w' || 'a'. if (*smode == 'U') { ++smode; } // Not implemented yet data = utils::shared_ref(filename, smode); if (!**data) throw types::IOError("Couldn't open file " + filename); is_open = true; } void file::close() { fclose(**data); data->f = nullptr; is_open = false; } bool file::closed() const { return !is_open; } types::str const &file::getmode() const { return mode; } types::str const &file::getname() const { return name; } types::str const &file::getnewlines() const { // Python seems to always return none... Doing the same here return newlines; } bool file::eof() { return ::feof(**data); } void file::flush() { if (!is_open) throw ValueError("I/O operation on closed file"); fflush(**data); } long file::fileno() const { if (!is_open) throw ValueError("I/O operation on closed file"); return ::fileno(**data); } bool file::isatty() const { if (!is_open) throw ValueError("I/O operation on closed file"); return ::isatty(this->fileno()); } types::str file::read(long size) { if (!is_open) throw ValueError("I/O operation on closed file"); if (mode.find_first_of("r+") == -1) throw IOError("File not open for reading"); if (size == 0 || (feof(**data) && mode.find_first_of("ra") == -1)) return types::str(); long curr_pos = tell(); seek(0, SEEK_END); size = size < 0 ? tell() - curr_pos : size; seek(curr_pos); std::unique_ptr content{new char[size + 1]}; // This part needs a new implementation of types::str(char*) to avoid // unnecessary copy. types::str res(content.get(), fread(content.get(), sizeof(char), size, **data)); return res; } types::str file::readline(long size) { if (!is_open) throw ValueError("I/O operation on closed file"); if (mode.find_first_of("r+") == -1) throw IOError("File not open for reading"); constexpr static long BUFFER_SIZE = 1024; types::str res; char read_str[BUFFER_SIZE]; for (long i = 0; i < size; i += BUFFER_SIZE) { // +1 because we read the last chunk so we don't want to count \0 if (fgets(read_str, std::min(BUFFER_SIZE - 1, size - i) + 1, **data)) res += read_str; if (feof(**data) || res[res.size() - 1] == "\n") break; } return res; } types::list file::readlines(long sizehint) { // Official python doc specifies that sizehint is used as a max of chars // But it has not been implemented in the standard python interpreter... types::str str; types::list lst(0); while ((str = readline())) lst.push_back(str); return lst; } void file::seek(long offset, long whence) { if (!is_open) throw ValueError("I/O operation on closed file"); if (whence != SEEK_SET && whence != SEEK_CUR && whence != SEEK_END) throw IOError("file.seek() : Invalid argument."); fseek(**data, offset, whence); } long file::tell() const { if (!is_open) throw ValueError("I/O operation on closed file"); return ftell(**data); } void file::truncate(long size) { if (!is_open) throw ValueError("I/O operation on closed file"); if (mode.find_first_of("wa+") == -1) throw IOError("file.write() : File not open for writing."); if (size < 0) size = this->tell(); #ifdef _WIN32 long error = _chsize_s(fileno(), size); #else long error = ftruncate(fileno(), size); #endif if (error == -1) throw RuntimeError(strerror(errno)); } long file::write(types::str const &str) { if (!is_open) throw ValueError("I/O operation on closed file"); if (mode.find_first_of("wa+") == -1) throw IOError("file.write() : File not open for writing."); return fwrite(str.c_str(), sizeof(char), str.size(), **data); } template void file::writelines(T const &seq) { auto end = seq.end(); for (auto it = seq.begin(); it != end; ++it) write(*it); } /// file_iterator implementation // TODO : What if the file disappears before the end? // Like in : // for line in open("myfile"): // print line file_iterator::file_iterator(file &ref) : f(&ref), set(false), curr(), position(ref.tell()) { } file_iterator::file_iterator() : f(nullptr), set(false), curr(), position(std::numeric_limits::max()){}; bool file_iterator::operator==(file_iterator const &f2) const { return position == f2.position; } bool file_iterator::operator!=(file_iterator const &f2) const { return position != f2.position; } bool file_iterator::operator<(file_iterator const &f2) const { // Not really elegant... // Equivalent to 'return *this != f2;' return position < f2.position; } file_iterator &file_iterator::operator++() { if (f->eof()) return *this; operator*(); set = false; operator*(); position = f->eof() ? std::numeric_limits::max() : f->tell(); return *this; } types::str file_iterator::operator*() const { if (!set) { curr = f->readline(); set = true; } return curr.chars(); // to make a copy } } PYTHONIC_NS_END /* pythran attribute system { */ PYTHONIC_NS_BEGIN namespace builtins { bool getattr(types::attr::CLOSED, types::file const &f) { return f.closed(); } types::str const &getattr(types::attr::MODE, types::file const &f) { return f.getmode(); } types::str const &getattr(types::attr::NAME, types::file const &f) { return f.getname(); } // Python seems to always return none... Doing the same. types::none_type getattr(types::attr::NEWLINES, types::file const &f) { return builtins::None; } } PYTHONIC_NS_END /* } */ #endif pythran-0.10.0+ds2/pythran/pythonic/types/finfo.hpp000066400000000000000000000012621416264035500222500ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_FINFO_HPP #define PYTHONIC_TYPES_FINFO_HPP #include "pythonic/include/types/finfo.hpp" #include "pythonic/types/attr.hpp" #include PYTHONIC_NS_BEGIN namespace types { template T finfo>::eps() const { return std::numeric_limits::epsilon(); } template T finfo::eps() const { return std::numeric_limits::epsilon(); } } PYTHONIC_NS_END /* pythran attribute system { */ PYTHONIC_NS_BEGIN namespace builtins { template auto getattr(types::attr::EPS, pythonic::types::finfo const &f) -> decltype(f.eps()) { return f.eps(); } } PYTHONIC_NS_END /* } */ #endif pythran-0.10.0+ds2/pythran/pythonic/types/float.hpp000066400000000000000000000023651416264035500222610ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_FLOAT_HPP #define PYTHONIC_TYPES_FLOAT_HPP #include "pythonic/include/types/float.hpp" #include "pythonic/types/attr.hpp" #include #ifdef ENABLE_PYTHON_MODULE #include "pythonic/python/core.hpp" #include "numpy/arrayscalars.h" #include PYTHONIC_NS_BEGIN PyObject *to_python::convert(long double d) { return PyArray_Scalar(&d, PyArray_DescrFromType(NPY_LONGDOUBLE), nullptr); } PyObject *to_python::convert(double d) { return PyFloat_FromDouble(d); } PyObject *to_python::convert(float d) { return PyArray_Scalar(&d, PyArray_DescrFromType(NPY_FLOAT), nullptr); } bool from_python::is_convertible(PyObject *obj) { return PyArray_IsScalar(obj, LongDouble); } long double from_python::convert(PyObject *obj) { return PyArrayScalar_VAL(obj, LongDouble); } bool from_python::is_convertible(PyObject *obj) { return PyFloat_Check(obj); } double from_python::convert(PyObject *obj) { return PyFloat_AsDouble(obj); } bool from_python::is_convertible(PyObject *obj) { return PyArray_IsScalar(obj, Float); } float from_python::convert(PyObject *obj) { return PyArrayScalar_VAL(obj, Float); } PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/types/float128.hpp000066400000000000000000000001771416264035500225130ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_FLOAT128_HPP #define PYTHONIC_TYPES_FLOAT128_HPP #include "pythonic/include/types/float64.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/types/float32.hpp000066400000000000000000000001751416264035500224230ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_FLOAT32_HPP #define PYTHONIC_TYPES_FLOAT32_HPP #include "pythonic/include/types/float32.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/types/float64.hpp000066400000000000000000000001751416264035500224300ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_FLOAT64_HPP #define PYTHONIC_TYPES_FLOAT64_HPP #include "pythonic/include/types/float64.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/types/generator.hpp000066400000000000000000000035321416264035500231370ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_GENERATOR_HPP #define PYTHONIC_TYPES_GENERATOR_HPP #include "pythonic/include/types/generator.hpp" #include "pythonic/builtins/StopIteration.hpp" #include PYTHONIC_NS_BEGIN namespace types { template generator_iterator::generator_iterator() : the_generator() { the_generator.__generator_state = -1; } // this represents the end template generator_iterator::generator_iterator(T const &a_generator) : the_generator(a_generator) { } template generator_iterator &generator_iterator::operator++() { try { the_generator.next(); } catch (types::StopIteration const &) { the_generator.__generator_state = -1; } return *this; } template typename T::result_type generator_iterator::operator*() const { return *the_generator; } template bool generator_iterator:: operator!=(generator_iterator const &other) const { assert(other.the_generator.__generator_state == -1 || the_generator.__generator_state == -1); return the_generator.__generator_state != other.the_generator.__generator_state; } template bool generator_iterator:: operator==(generator_iterator const &other) const { assert(other.the_generator.__generator_state == -1 || the_generator.__generator_state == -1); return the_generator.__generator_state == other.the_generator.__generator_state; } template bool generator_iterator:: operator<(generator_iterator const &other) const { assert(other.the_generator.__generator_state == -1 || the_generator.__generator_state == -1); return the_generator.__generator_state != other.the_generator.__generator_state; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/types/int.hpp000066400000000000000000000117521416264035500217460ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_INT_HPP #define PYTHONIC_TYPES_INT_HPP #include #include "pythonic/include/types/int.hpp" #include "pythonic/types/attr.hpp" PYTHONIC_NS_BEGIN namespace builtins { template typename std::enable_if::value, T>::type getattr(types::attr::REAL, T self) { return self; } template typename std::enable_if::value, T>::type getattr(types::attr::IMAG, T self) { return T(0); } } PYTHONIC_NS_END #ifdef ENABLE_PYTHON_MODULE #include "pythonic/python/core.hpp" #include "numpy/arrayobject.h" PYTHONIC_NS_BEGIN template struct c_type_to_numpy_type : c_type_to_numpy_type()())> { }; template <> struct c_type_to_numpy_type : std::integral_constant { }; template <> struct c_type_to_numpy_type : std::integral_constant { }; template <> struct c_type_to_numpy_type : std::integral_constant { }; template <> struct c_type_to_numpy_type> : std::integral_constant { }; template <> struct c_type_to_numpy_type> : std::integral_constant { }; template <> struct c_type_to_numpy_type> : std::integral_constant { }; template <> struct c_type_to_numpy_type : std::integral_constant { }; template <> struct c_type_to_numpy_type : std::integral_constant { }; template <> struct c_type_to_numpy_type : std::integral_constant { }; template <> struct c_type_to_numpy_type : std::integral_constant { }; template <> struct c_type_to_numpy_type : std::integral_constant { }; template <> struct c_type_to_numpy_type : std::integral_constant { }; template <> struct c_type_to_numpy_type : std::integral_constant { }; template <> struct c_type_to_numpy_type : std::integral_constant { }; template <> struct c_type_to_numpy_type : std::integral_constant { }; template <> struct c_type_to_numpy_type : std::integral_constant { }; template <> struct c_type_to_numpy_type : std::integral_constant { }; template <> struct c_type_to_numpy_type : std::integral_constant { }; #ifndef PyInt_FromLong #define PyInt_FromLong PyLong_FromLong #ifndef PyInt_CheckExact #define PyInt_CheckExact PyLong_CheckExact #endif #ifndef PyInt_AsLong #define PyInt_AsLong PyLong_AsLong #endif #endif #define PYTHONIC_INT_TO_PYTHON(TYPE) \ PyObject *to_python::convert(TYPE l) \ { \ return PyArray_Scalar( \ &l, PyArray_DescrFromType(c_type_to_numpy_type::value), \ nullptr); \ } PYTHONIC_INT_TO_PYTHON(char) PYTHONIC_INT_TO_PYTHON(unsigned char) PYTHONIC_INT_TO_PYTHON(signed char) PYTHONIC_INT_TO_PYTHON(unsigned short) PYTHONIC_INT_TO_PYTHON(signed short) PYTHONIC_INT_TO_PYTHON(unsigned int) PYTHONIC_INT_TO_PYTHON(signed int) PYTHONIC_INT_TO_PYTHON(unsigned long) PyObject *to_python::convert(signed long l) { return PyInt_FromLong(l); } PYTHONIC_INT_TO_PYTHON(unsigned long long) PYTHONIC_INT_TO_PYTHON(signed long long) #undef PYTHONIC_INT_TO_PYTHON #define PYTHONIC_INT_FROM_PYTHON(TYPE, NTYPE) \ bool from_python::is_convertible(PyObject *obj) \ { \ return PyInt_CheckExact(obj) || \ PyObject_TypeCheck(obj, &Py##NTYPE##ArrType_Type); \ } \ TYPE from_python::convert(PyObject *obj) \ { \ return PyInt_AsLong(obj); \ } PYTHONIC_INT_FROM_PYTHON(unsigned char, UByte) PYTHONIC_INT_FROM_PYTHON(signed char, Byte) PYTHONIC_INT_FROM_PYTHON(unsigned short, UShort) PYTHONIC_INT_FROM_PYTHON(signed short, Short) PYTHONIC_INT_FROM_PYTHON(unsigned int, UInt) PYTHONIC_INT_FROM_PYTHON(signed int, Int) PYTHONIC_INT_FROM_PYTHON(unsigned long, ULong) PYTHONIC_INT_FROM_PYTHON(signed long, Long) PYTHONIC_INT_FROM_PYTHON(unsigned long long, ULongLong) PYTHONIC_INT_FROM_PYTHON(signed long long, LongLong) #undef PYTHONIC_INT_FROM_PYTHON PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/types/int16.hpp000066400000000000000000000001671416264035500221130ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_INT16_HPP #define PYTHONIC_TYPES_INT16_HPP #include "pythonic/include/types/int16.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/types/int32.hpp000066400000000000000000000001671416264035500221110ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_INT32_HPP #define PYTHONIC_TYPES_INT32_HPP #include "pythonic/include/types/int32.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/types/int64.hpp000066400000000000000000000001671416264035500221160ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_INT64_HPP #define PYTHONIC_TYPES_INT64_HPP #include "pythonic/include/types/int64.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/types/int8.hpp000066400000000000000000000001641416264035500220310ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_INT8_HPP #define PYTHONIC_TYPES_INT8_HPP #include "pythonic/include/types/int8.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/types/intc.hpp000066400000000000000000000001641416264035500221040ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_INTC_HPP #define PYTHONIC_TYPES_INTC_HPP #include "pythonic/include/types/intc.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/types/intp.hpp000066400000000000000000000001641416264035500221210ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_INTP_HPP #define PYTHONIC_TYPES_INTP_HPP #include "pythonic/include/types/intp.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/types/list.hpp000066400000000000000000000523231416264035500221260ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_LIST_HPP #define PYTHONIC_TYPES_LIST_HPP #include "pythonic/include/types/list.hpp" #include "pythonic/types/nditerator.hpp" #include "pythonic/builtins/len.hpp" #include "pythonic/types/slice.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/types/bool.hpp" #include "pythonic/utils/shared_ref.hpp" #include "pythonic/utils/reserve.hpp" #include #include PYTHONIC_NS_BEGIN namespace types { /// Sliced list // Constructors template sliced_list::sliced_list() : data(utils::no_memory()) { } template sliced_list::sliced_list(sliced_list const &s) : data(s.data), slicing(s.slicing) { } template sliced_list::sliced_list(list const &other, S const &s) : data(other.data), slicing(s.normalize(other.size())) { } template template sliced_list::sliced_list(utils::shared_ref const &other, Sn const &s) : data(other), slicing(s) { } // iterators template typename sliced_list::iterator sliced_list::begin() { return {*this, 0}; } template typename sliced_list::const_iterator sliced_list::begin() const { return {*this, 0}; } template typename sliced_list::iterator sliced_list::end() { return {*this, size()}; } template typename sliced_list::const_iterator sliced_list::end() const { return {*this, size()}; } // size template long sliced_list::size() const { return slicing.size(); } template sliced_list::operator bool() const { return slicing.size(); } // accessor template typename sliced_list::const_reference sliced_list::fast(long i) const { assert(0 <= i && i < size()); auto const index = slicing.get(i); assert(0 <= index && index < (long)data->size()); return (*data)[index]; } template typename sliced_list::const_reference sliced_list:: operator[](long i) const { assert(i < size()); auto const index = slicing.get(i); assert(0 <= index && index < (long)data->size()); return (*data)[index]; } template typename sliced_list::reference sliced_list::operator[](long i) { assert(i < size()); auto const index = slicing.get(i); assert(0 <= index && index < (long)data->size()); return (*data)[index]; } template template typename std::enable_if< is_slice::value, sliced_list() * std::declval())>>::type sliced_list:: operator[](Sp s) const { return {data, slicing * s.normalize(this->size())}; } // io template std::ostream &operator<<(std::ostream &os, sliced_list const &v) { os << '['; auto iter = v.begin(); if (iter != v.end()) { while (iter + 1 != v.end()) { os << *iter << ", "; ++iter; } os << *iter; } return os << ']'; } // comparison template template bool sliced_list::operator==(list const &other) const { if (size() != other.size()) return false; return std::equal(begin(), end(), other.begin()); } template bool sliced_list::operator==(empty_list const &other) const { return size() == 0; } template inline sliced_list &sliced_list:: operator=(sliced_list const &s) { slicing = s.slicing; if (data != s.data) data = s.data; return *this; } template sliced_list &sliced_list::operator=(list const &seq) { if (slicing.step == 1) { data->erase(data->begin() + slicing.lower, data->begin() + slicing.upper); data->insert(data->begin() + slicing.lower, seq.begin(), seq.end()); } else assert(!"not implemented yet"); return *this; } template list sliced_list::operator+(list const &s) const { list out(size() + s.size()); std::copy(s.begin(), s.end(), std::copy(begin(), end(), out.begin())); return out; } template template list sliced_list::operator+(array_base const &s) const { list out(size() + s.size()); std::copy(s.begin(), s.end(), std::copy(begin(), end(), out.begin())); return out; } template template list::type> sliced_list:: operator+(sliced_list const &s) const { list::type> out(size() + s.size()); std::copy(s.begin(), s.end(), std::copy(begin(), end(), out.begin())); return out; } template list operator*(N n, list const &l) { return l * n; } #ifdef USE_XSIMD template template typename sliced_list::simd_iterator sliced_list::vbegin(vectorizer) const { return {data->data() + slicing.lower}; } template template typename sliced_list::simd_iterator sliced_list::vend(vectorizer) const { using vector_type = typename xsimd::simd_type; static const std::size_t vector_size = vector_type::size; return {data->data() + slicing.lower + long(size() / vector_size * vector_size)}; } #endif // other operations template template bool sliced_list::contains(V const &v) const { return std::find(data->begin(), data->end(), v) != data->end(); } template intptr_t sliced_list::id() const { // sharing is not implemented for sliced list return reinterpret_cast(this); } template long sliced_list::count(T const &x) const { return std::count(begin(), end(), x); } /// List // constructors template list::list() : data(utils::no_memory()) { } template template list::list(InputIterator start, InputIterator stop) : data() { if (std::is_same< typename std::iterator_traits::iterator_category, std::random_access_iterator_tag>::value) data->reserve(std::distance(start, stop)); else data->reserve(DEFAULT_LIST_CAPACITY); std::copy(start, stop, std::back_inserter(*data)); } template list::list(empty_list const &) : data(0) { } template list::list(size_type sz) : data(sz) { } template list::list(T const &value, single_value, size_type sz) : data(sz, value) { } template list::list(std::initializer_list l) : data(std::move(l)) { } template list::list(list &&other) : data(std::move(other.data)) { } template list::list(list const &other) : data(other.data) { } template template list::list(list const &other) : data(other.size()) { std::copy(other.begin(), other.end(), begin()); } template template list::list(sliced_list const &other) : data(other.begin(), other.end()) { } // operators template list &list::operator=(list &&other) { data = std::move(other.data); return *this; } template template list &list::operator=(list const &other) { data = utils::shared_ref{other.size()}; std::copy(other.begin(), other.end(), begin()); return *this; } template list &list::operator=(list const &other) { data = other.data; return *this; } template list &list::operator=(empty_list const &) { data = utils::shared_ref(); return *this; } template template list &list::operator=(array_base const &other) { data = utils::shared_ref(other.begin(), other.end()); return *this; } template template list &list::operator=(sliced_list const &other) { if (other.data == data) { auto it = std::copy(other.begin(), other.end(), data->begin()); data->resize(it - data->begin()); } else data = utils::shared_ref(other.begin(), other.end()); return *this; } template template list &list::operator+=(sliced_list const &other) { data->resize(size() + other.size()); std::copy(other.begin(), other.end(), data->begin()); return *this; } template template list list::operator+(sliced_list const &other) const { list new_list(begin(), end()); new_list.reserve(size() + other.size()); std::copy(other.begin(), other.end(), std::back_inserter(new_list)); return new_list; } template template list list::operator+(array_base const &other) const { list new_list(begin(), end()); new_list.reserve(size() + other.size()); std::copy(other.begin(), other.end(), std::back_inserter(new_list)); return new_list; } // io template std::ostream &operator<<(std::ostream &os, list const &v) { os << '['; auto iter = v.begin(); if (iter != v.end()) { while (iter + 1 != v.end()) os << *iter++ << ", "; os << *iter; } return os << ']'; } // comparison template template bool list::operator==(list const &other) const { if (size() != other.size()) return false; return std::equal(begin(), end(), other.begin()); } template bool list::operator==(empty_list const &) const { return size() == 0; } template template bool list::operator!=(list const &other) const { return !operator==(other); } template bool list::operator!=(empty_list const &) const { return size() != 0; } // iterators template typename list::iterator list::begin() { return data->begin(); } template typename list::const_iterator list::begin() const { return data->begin(); } template typename list::iterator list::end() { return data->end(); } template typename list::const_iterator list::end() const { return data->end(); } template typename list::reverse_iterator list::rbegin() { return data->rbegin(); } template typename list::const_reverse_iterator list::rbegin() const { return data->rbegin(); } template typename list::reverse_iterator list::rend() { return data->rend(); } template typename list::const_reverse_iterator list::rend() const { return data->rend(); } // comparison template bool list::operator<(list const &other) const { return std::lexicographical_compare(begin(), end(), other.begin(), other.end()); } template bool list::operator>(list const &other) const { return std::lexicographical_compare(other.begin(), other.end(), begin(), end()); } template bool list::operator<=(list const &other) const { return !(*this > other); } template bool list::operator>=(list const &other) const { return !(*this < other); } // element access #ifdef USE_XSIMD template template typename list::simd_iterator list::vbegin(vectorizer) const { return {data->data()}; } template template typename list::simd_iterator list::vend(vectorizer) const { using vector_type = typename xsimd::simd_type; static const std::size_t vector_size = vector_type::size; return {data->data() + long(size() / vector_size * vector_size)}; } #endif template typename list::reference list::fast(long n) { return (*data)[n]; } template typename list::reference list::operator[](long n) { if (n < 0) n += size(); assert(0 <= n && n < size()); return fast(n); } template typename list::const_reference list::fast(long n) const { assert(n < size()); return (*data)[n]; } template typename list::const_reference list::operator[](long n) const { if (n < 0) n += size(); assert(0 <= n && n < size()); return fast(n); } template template typename std::enable_if::value, sliced_list>::type list:: operator[](Sp const &s) const { return {*this, s}; } // modifiers template template void list::push_back(Tp &&x) { // FIXME: clang-3.4 doesn't support emplace_back for vector of bool data->push_back(std::forward(x)); } template template void list::insert(long i, Tp &&x) { if (i == size()) data->emplace_back(std::forward(x)); else data->insert(data->begin() + i, std::forward(x)); } template void list::reserve(size_t n) { data->reserve(n); } template void list::resize(size_t n) { data->resize(n); } template typename list::iterator list::erase(size_t n) { return data->erase(data->begin() + n); } template T list::pop(long x) { long sz = size(); x = x % sz; if (x < 0) x += sz; T res = fast(x); erase(x); return res; } // TODO: have to raise a valueError template none_type list::remove(T const &x) { erase(index(x)); return {}; } // Misc template long list::index(T const &x) const { return std::find(begin(), end(), x) - begin(); } // list interface template list::operator bool() const { return !data->empty(); } template template list::type> list:: operator+(list const &s) const { list::type> clone(size() + s.size()); std::copy(s.begin(), s.end(), std::copy(begin(), end(), clone.begin())); return clone; } template template list() + std::declval::value_type>())> list::operator+(sliced_list const &s) const { list() + std::declval::value_type>())> clone(size() + len(s)); std::copy(s.begin(), s.end(), std::copy(begin(), end(), clone.begin())); return clone; } template list list::operator+(empty_list const &) const { return list(begin(), end()); } template list list::operator*(long n) const { if (size() == 1) { return list(fast(0), single_value{}, n); } else { list r(size() * n); auto start = r.begin(); while (start != r.end()) start = std::copy(this->begin(), this->end(), start); return r; } } template list const &list::operator*=(long n) { if (size() == 1) { resize(n); std::fill(begin() + 1, end(), fast(0)); } else { auto const initial_size = size(); resize(n * initial_size); // FIXME: could use less calls to std::copy auto tgt = begin() + initial_size; for (long i = 1; i < n; ++i) tgt = std::copy(begin(), begin() + initial_size, tgt); } return *this; } template template list &list::operator+=(F const &s) { reserve(size() + s.size()); std::copy(s.begin(), s.end(), std::back_inserter(*this)); return *this; } template long list::size() const { return data->size(); } template template long list::_flat_size(E const &e, utils::int_<1>) const { return std::distance(e.begin(), e.end()); } template template long list::_flat_size(E const &e, utils::int_) const { return std::distance(e.begin(), e.end()) * _flat_size(e[0], utils::int_{}); } template long list::flat_size() const { return _flat_size(*this, utils::int_{}); } template template bool list::contains(V const &v) const { return std::find(data->begin(), data->end(), v) != data->end(); } template intptr_t list::id() const { return reinterpret_cast(&(*data)); } template long list::count(T const &x) const { return std::count(begin(), end(), x); } /// Empty list template list empty_list::operator+(list const &s) const { return s; } template sliced_list empty_list::operator+(sliced_list const &s) const { return s; } template static_list empty_list::operator+(array_base const &s) const { return s.template to_array(); } empty_list empty_list::operator+(empty_list const &) const { return empty_list(); } template typename std::enable_if::value, list>::type empty_list:: operator+(F s) const { return {s.begin(), s.end()}; } empty_list::operator bool() const { return false; } template empty_list::operator list() const { return list(0); } constexpr long empty_list::size() { return 0; } std::ostream &operator<<(std::ostream &os, empty_list const &) { return os << "[]"; } } namespace utils { template void reserve(types::list &l, From const &f, typename From::const_iterator *) { l.reserve(builtins::len(f)); } } PYTHONIC_NS_END /* overload std::get */ namespace std { template typename pythonic::types::list::reference get(pythonic::types::list &t) { return t[I]; } template typename pythonic::types::list::const_reference get(pythonic::types::list const &t) { return t[I]; } template typename pythonic::types::list::value_type get(pythonic::types::list &&t) { return std::move(t)[I]; } template typename pythonic::types::sliced_list::reference get(pythonic::types::sliced_list &t) { return t[I]; } template typename pythonic::types::sliced_list::const_reference get(pythonic::types::sliced_list const &t) { return t[I]; } template typename pythonic::types::sliced_list::value_type get(pythonic::types::sliced_list &&t) { return std::move(t)[I]; } } #ifdef ENABLE_PYTHON_MODULE PYTHONIC_NS_BEGIN PyObject *to_python::reference>::convert( typename std::vector::reference const &v) { return ::to_python((bool)v); } PyObject *to_python::const_reference>::value, phantom_type, typename std::vector::const_reference>::type>:: convert(typename std::vector::const_reference const &v) { return ::to_python((bool)v); } template PyObject *to_python>::convert(types::list const &v) { Py_ssize_t n = v.size(); PyObject *ret = PyList_New(n); for (Py_ssize_t i = 0; i < n; i++) PyList_SET_ITEM(ret, i, ::to_python(v[i])); return ret; } template PyObject * to_python>::convert(types::sliced_list const &v) { Py_ssize_t n = v.size(); PyObject *ret = PyList_New(n); for (Py_ssize_t i = 0; i < n; i++) PyList_SET_ITEM(ret, i, ::to_python(v[i])); return ret; } PyObject *to_python::convert(types::empty_list const &) { return PyList_New(0); } template bool from_python>::is_convertible(PyObject *obj) { return PyList_Check(obj) && (PyObject_Not(obj) || ::is_convertible(PySequence_Fast_GET_ITEM(obj, 0))); } template types::list from_python>::convert(PyObject *obj) { Py_ssize_t l = PySequence_Fast_GET_SIZE(obj); types::list v(l); PyObject **core = PySequence_Fast_ITEMS(obj); std::transform(core, core + l, v.begin(), [](PyObject *o) { return ::from_python(o); }); return v; } PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/types/ndarray.hpp000066400000000000000000001540241416264035500226140ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_NDARRAY_HPP #define PYTHONIC_TYPES_NDARRAY_HPP #include "pythonic/include/types/ndarray.hpp" #include "pythonic/types/assignable.hpp" #include "pythonic/types/empty_iterator.hpp" #include "pythonic/types/attr.hpp" #include "pythonic/builtins/ValueError.hpp" #include "pythonic/utils/nested_container.hpp" #include "pythonic/utils/shared_ref.hpp" #include "pythonic/utils/reserve.hpp" #include "pythonic/utils/int_.hpp" #include "pythonic/utils/broadcast_copy.hpp" #include "pythonic/types/slice.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/types/list.hpp" #include "pythonic/types/raw_array.hpp" #include "pythonic/numpy/bool_.hpp" #include "pythonic/numpy/uint8.hpp" #include "pythonic/numpy/int8.hpp" #include "pythonic/numpy/uint16.hpp" #include "pythonic/numpy/int16.hpp" #include "pythonic/numpy/uint32.hpp" #include "pythonic/numpy/int32.hpp" #include "pythonic/numpy/uint64.hpp" #include "pythonic/numpy/int64.hpp" #include "pythonic/numpy/float32.hpp" #include "pythonic/numpy/float64.hpp" #include "pythonic/numpy/complex64.hpp" #include "pythonic/numpy/complex128.hpp" #include "pythonic/types/vectorizable_type.hpp" #include "pythonic/types/numpy_op_helper.hpp" #include "pythonic/types/numpy_expr.hpp" #include "pythonic/types/numpy_texpr.hpp" #include "pythonic/types/numpy_iexpr.hpp" #include "pythonic/types/numpy_gexpr.hpp" #include "pythonic/types/numpy_vexpr.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/utils/array_helper.hpp" #include "pythonic/builtins/len.hpp" #include "pythonic/operator_/iadd.hpp" #include "pythonic/operator_/iand.hpp" #include "pythonic/operator_/idiv.hpp" #include "pythonic/operator_/imul.hpp" #include "pythonic/operator_/ior.hpp" #include "pythonic/operator_/ixor.hpp" #include "pythonic/operator_/isub.hpp" #include #include #include #include #include #include #if !defined(HAVE_SSIZE_T) || !HAVE_SSIZE_T #if defined(_MSC_VER) #include typedef SSIZE_T ssize_t; #endif #endif PYTHONIC_NS_BEGIN namespace types { template array::value> make_strides(pS const &shape, utils::index_sequence) { array::value> out; out[std::tuple_size::value - 1] = 1; (void)std::initializer_list{ (out[std::tuple_size::value - Is - 2] = out[std::tuple_size::value - Is - 1] * std::get::value - Is - 1>(shape))...}; return out; } template array::value> make_strides(pS const &shape) { return make_strides( shape, utils::make_index_sequence::value - 1>()); } template typename type_helper>::iterator type_helper>::make_iterator(ndarray &n, long i) { return {n, i}; } template typename type_helper>::const_iterator type_helper>::make_iterator(ndarray const &n, long i) { return {n, i}; } template template T *type_helper>::initialize_from_iterable(S &shape, T *from, Iter &&iter) { return type_helper const &>::initialize_from_iterable( shape, from, std::forward(iter)); } template numpy_iexpr> type_helper>::get(ndarray &&self, long i) { return {std::move(self), i}; } template typename type_helper const &>::iterator type_helper const &>::make_iterator(ndarray &n, long i) { return {n, i}; } template typename type_helper const &>::const_iterator type_helper const &>::make_iterator(ndarray const &n, long i) { return {n, i}; } template template T *type_helper const &>::initialize_from_iterable(S &shape, T *from, Iter &&iter) { sutils::assign( std::get::value - std::tuple_size::value>(shape), iter.size()); for (auto content : iter) from = type_helper> const &>:: initialize_from_iterable(shape, from, content); return from; } template numpy_iexpr const &> type_helper const &>::get(ndarray const &self, long i) { return numpy_iexpr const &>(self, i); } template typename type_helper>>::iterator type_helper>>::make_iterator(ndarray> &n, long i) { return n.buffer + i; } template typename type_helper>>::const_iterator type_helper>>::make_iterator( ndarray> const &n, long i) { return n.buffer + i; } template template T *type_helper>>::initialize_from_iterable(S &shape, T *from, Iter &&iter) { sutils::assign(std::get::value - 1>(shape), iter.size()); return std::copy(iter.begin(), iter.end(), from); } template typename type_helper>>::type type_helper>>::get(ndarray> &&self, long i) { return self.buffer[i]; } template typename type_helper> const &>::iterator type_helper> const &>::make_iterator( ndarray> &n, long i) { return n.buffer + i; } template typename type_helper> const &>::const_iterator make_iterator(ndarray> const &n, long i) { return n.buffer + i; } template template T *type_helper> const &>::initialize_from_iterable( S &shape, T *from, Iter &&iter) { sutils::assign(std::get::value - 1>(shape), iter.size()); return std::copy(iter.begin(), iter.end(), from); } template typename type_helper> const &>::type & type_helper> const &>::get( ndarray> const &self, long i) { return self.buffer[i]; } template typename type_helper>>::iterator type_helper>>::make_iterator( ndarray> &n, long i) { return n.buffer + i; } template typename type_helper>>::const_iterator type_helper>>::make_iterator( ndarray> const &n, long i) { return n.buffer + i; } template template T *type_helper>>::initialize_from_iterable( S &shape, T *from, Iter &&iter) { sutils::assign(std::get::value - 1>(shape), iter.size()); return std::copy(iter.begin(), iter.end(), from); } template typename type_helper>>::type type_helper>>::get( ndarray> &&self, long i) { return self.buffer[i]; } template typename type_helper> const &>::iterator type_helper> const &>::make_iterator( ndarray> &n, long i) { return n.buffer + i; } template typename type_helper> const &>::const_iterator make_iterator(ndarray> const &n, long i) { return n.buffer + i; } template template T *type_helper> const &>::initialize_from_iterable( S &shape, T *from, Iter &&iter) { sutils::assign(std::get::value - 1>(shape), iter.size()); return std::copy(iter.begin(), iter.end(), from); } template typename type_helper> const &>::type & type_helper> const &>::get( ndarray> const &self, long i) { return self.buffer[i]; } template long patch_index(long index, S const &) { return index; } long patch_index(long index, std::integral_constant const &) { return 0; } template template long noffset::operator()(S const &strides, array const &indices) const { auto index = patch_index( indices[M - L], typename std::tuple_element::type()); return noffset{}(strides, indices) + strides.template strides() * index; } template template long noffset::operator()(S const &strides, array const &indices, pS const &shape) const { auto index = patch_index( indices[M - L], typename std::tuple_element::type()); if (index < 0) index += std::get(shape); assert(0 <= index and index < std::get(shape)); return noffset{}(strides, indices, shape) + strides.template strides() * ((index < 0) ? index + std::get(shape) : index); } template <> template long noffset<1>::operator()(S const &strides, array const &indices) const { auto index = patch_index( indices[M - 1], typename std::tuple_element::type()); return strides.template strides() * index; } template <> template long noffset<1>::operator()(S const &strides, array const &indices, pS const &shape) const { auto index = patch_index( indices[M - 1], typename std::tuple_element::type()); if (index < 0) index += std::get(shape); assert(0 <= index && index < std::get(shape)); return strides.template strides() * ((index < 0) ? index + std::get(shape) : index); } /* constructors */ template ndarray::ndarray() : mem(utils::no_memory()), buffer(nullptr), _shape(), _strides() { } /* from other memory */ template ndarray::ndarray(utils::shared_ref> const &mem, pS const &shape) : mem(mem), buffer(mem->data), _shape(shape), _strides(make_strides(shape)) { } template ndarray::ndarray(utils::shared_ref> &&mem, pS const &shape) : mem(std::move(mem)), buffer(this->mem->data), _shape(shape), _strides(make_strides(shape)) { } /* from other array */ template template ndarray::ndarray(ndarray const &other) : mem(other.flat_size()), buffer(mem->data), _shape(other._shape), _strides(other._strides) { static_assert(std::tuple_size::value == std::tuple_size::value, "compatible shapes"); std::copy(other.fbegin(), other.fend(), fbegin()); } template template ndarray::ndarray(ndarray const &other) : mem(other.mem), buffer(mem->data), _shape(other._shape), _strides(other._strides) { static_assert(std::tuple_size::value == std::tuple_size::value, "compatible shapes"); } /* from a seed */ template ndarray::ndarray(pS const &shape, none_type init) : mem(sutils::sprod(shape)), buffer(mem->data), _shape(shape), _strides(make_strides(shape)) { } template ndarray::ndarray(pS const &shape, T init) : ndarray(shape, none_type()) { std::fill(fbegin(), fend(), init); } /* from a foreign pointer */ template template ndarray::ndarray(T *data, S const *pshape, ownership o) : mem(data, o), buffer(mem->data), _shape(pshape) { _strides = make_strides(_shape); } template ndarray::ndarray(T *data, pS const &pshape, ownership o) : mem(data, o), buffer(mem->data), _shape(pshape) { _strides = make_strides(_shape); } #ifdef ENABLE_PYTHON_MODULE template template ndarray::ndarray(T *data, S const *pshape, PyObject *obj_ptr) : ndarray(data, pshape, ownership::external) { mem.external(obj_ptr); // mark memory as external to decref at the end of // its lifetime } template ndarray::ndarray(T *data, pS const &pshape, PyObject *obj_ptr) : ndarray(data, pshape, ownership::external) { mem.external(obj_ptr); // mark memory as external to decref at the end of // its lifetime } #endif template template ndarray::ndarray(Iterable &&iterable) : mem(utils::nested_container_size::flat_size( std::forward(iterable))), buffer(mem->data), _shape() { type_helper::initialize_from_iterable( _shape, mem->data, std::forward(iterable)); _strides = make_strides(_shape); } /* from a numpy expression */ template template void ndarray::initialize_from_expr(E const &expr) { assert(buffer); utils::broadcast_copy::value>( *this, expr); } template template ndarray::ndarray(numpy_expr const &expr) : mem(expr.flat_size()), buffer(mem->data), _shape(sutils::getshape(expr)), _strides(make_strides(_shape)) { initialize_from_expr(expr); } template template ndarray::ndarray(numpy_texpr const &expr) : mem(expr.flat_size()), buffer(mem->data), _shape(sutils::getshape(expr)), _strides(make_strides(_shape)) { initialize_from_expr(expr); } template template ndarray::ndarray(numpy_texpr_2 const &expr) : mem(expr.flat_size()), buffer(mem->data), _shape(sutils::getshape(expr)), _strides(make_strides(_shape)) { initialize_from_expr(expr); } template template ndarray::ndarray(numpy_gexpr const &expr) : mem(expr.flat_size()), buffer(mem->data), _shape(sutils::getshape(expr)), _strides(make_strides(_shape)) { initialize_from_expr(expr); } template template ndarray::ndarray(numpy_iexpr const &expr) : mem(expr.flat_size()), buffer(mem->data), _shape(sutils::getshape(expr)), _strides(make_strides(_shape)) { initialize_from_expr(expr); } template template ndarray::ndarray(numpy_vexpr const &expr) : mem(expr.flat_size()), buffer(mem->data), _shape(sutils::getshape(expr)), _strides(make_strides(_shape)) { initialize_from_expr(expr); } /* update operators */ template template ndarray &ndarray::update_(Expr const &expr) { using BExpr = typename std::conditional::value, broadcast, Expr const &>::type; BExpr bexpr = expr; utils::broadcast_update< Op, ndarray &, BExpr, value, value - (std::is_scalar::value + utils::dim_of::value), is_vectorizable && types::is_vectorizable::type>::type>::value && std::is_same::type>::type>::value>(*this, bexpr); return *this; } template template ndarray &ndarray::operator+=(Expr const &expr) { return update_(expr); } template template ndarray &ndarray::operator-=(Expr const &expr) { return update_(expr); } template template ndarray &ndarray::operator*=(Expr const &expr) { return update_(expr); } template template ndarray &ndarray::operator/=(Expr const &expr) { return update_(expr); } template template ndarray &ndarray::operator&=(Expr const &expr) { return update_(expr); } template template ndarray &ndarray::operator|=(Expr const &expr) { return update_(expr); } template template ndarray &ndarray::operator^=(Expr const &expr) { return update_(expr); } /* element indexing * differentiate const from non const, && r-value from l-value * */ template template typename std::enable_if::value, T &>::type ndarray::fast(array const &indices) { assert(inbound_indices(indices)); return *(buffer + noffset::value>{}(*this, indices)); } template template typename std::enable_if::value, T>::type ndarray::fast(array const &indices) const { assert(inbound_indices(indices)); return *(buffer + noffset::value>{}(*this, indices)); } template template auto ndarray::fast(array const &indices) const & -> typename std::enable_if::value, decltype(nget().fast(*this, indices))>::type { return nget().fast(*this, indices); } template template auto ndarray::fast(array const &indices) && -> typename std::enable_if::value, decltype(nget().fast(std::move(*this), indices))>::type { return nget().fast(std::move(*this), indices); } template template typename std::enable_if::value, T const &>::type ndarray:: operator[](array const &indices) const { return *(buffer + noffset::value>{}(*this, indices, _shape)); } template template typename std::enable_if::value, T &>::type ndarray:: operator[](array const &indices) { return *(buffer + noffset::value>{}(*this, indices, _shape)); } template template auto ndarray::operator[](array const &indices) const & -> typename std::enable_if::value, decltype(nget()(*this, indices))>::type { return nget()(*this, indices); } template template auto ndarray::operator[](array const &indices) && -> typename std::enable_if::value, decltype(nget()(std::move(*this), indices))>::type { return nget()(std::move(*this), indices); } #ifdef USE_XSIMD template template typename ndarray::simd_iterator ndarray::vbegin(vectorizer) const { return {buffer}; } template template typename ndarray::simd_iterator ndarray::vend(vectorizer) const { using vector_type = typename xsimd::simd_type; static const std::size_t vector_size = vector_type::size; return {buffer + long(std::get<0>(_shape) / vector_size * vector_size)}; } #endif /* slice indexing */ template ndarray>> ndarray::operator[](none_type) const { sutils::push_front_t> new_shape; sutils::copy_shape<1, -1>( new_shape, *this, utils::make_index_sequence::value>()); return reshape(new_shape); } template template typename std::enable_if< is_slice::value, numpy_gexpr const &, normalize_t>>::type ndarray:: operator[](S const &s) const & { return make_gexpr(*this, s); } template template typename std::enable_if::value, numpy_gexpr, normalize_t>>::type ndarray:: operator[](S const &s) && { return make_gexpr(std::move(*this), s); } template long ndarray::size() const { return std::get<0>(_shape); } /* extended slice indexing */ template template auto ndarray::operator()(S0 const &s0, S const &... s) const & -> decltype(extended_slice::value>{}((*this), s0, s...)) { return extended_slice::value>{}((*this), s0, s...); } template template auto ndarray::operator()(S0 const &s0, S const &... s) & -> decltype(extended_slice::value>{}((*this), s0, s...)) { return extended_slice::value>{}((*this), s0, s...); } template template auto ndarray::operator()(S0 const &s0, S const &... s) && -> decltype(extended_slice::value>{}( std::move(*this), s0, s...)) { return extended_slice::value>{}(std::move(*this), s0, s...); } /* element filtering */ template template // indexing through an array of boolean -- a mask typename std::enable_if< is_numexpr_arg::value && std::is_same::value && F::value == 1 && !is_pod_array::value, numpy_vexpr, ndarray>>>::type ndarray::fast(F const &filter) const { long sz = filter.template shape<0>(); long *raw = (long *)malloc(sz * sizeof(long)); long n = 0; for (long i = 0; i < sz; ++i) if (filter.fast(i)) raw[n++] = i; // realloc(raw, n * sizeof(long)); return this->fast(ndarray>(raw, pshape(n), types::ownership::owned)); } template template // indexing through an array of boolean -- a mask typename std::enable_if< is_numexpr_arg::value && std::is_same::value && F::value == 1 && !is_pod_array::value, numpy_vexpr, ndarray>>>::type ndarray:: operator[](F const &filter) const { return fast(filter); } template template // indexing through an array of boolean -- a mask typename std::enable_if< is_numexpr_arg::value && std::is_same::value && F::value != 1 && !is_pod_array::value, numpy_vexpr>, ndarray>>>::type ndarray::fast(F const &filter) const { return flat()[ndarray(filter) .flat()]; } template template // indexing through an array of boolean -- a mask typename std::enable_if< is_numexpr_arg::value && std::is_same::value && F::value != 1 && !is_pod_array::value, numpy_vexpr>, ndarray>>>::type ndarray:: operator[](F const &filter) const { return fast(filter); } template template // indexing through an array of indices -- a view typename std::enable_if::value && !is_array_index::value && !std::is_same::value && !is_pod_array::value, numpy_vexpr, F>>::type ndarray:: operator[](F const &filter) const { return {*this, filter}; } template template // indexing through an array of indices -- a view typename std::enable_if::value && !is_array_index::value && !std::is_same::value && !is_pod_array::value, numpy_vexpr, F>>::type ndarray::fast(F const &filter) const { return {*this, filter}; } template template auto ndarray:: operator[](std::tuple const &indices) const -> typename std::enable_if< is_numexpr_arg::value, decltype(this->_fwdindex( indices, utils::make_index_sequence<2 + sizeof...(Tys)>()))>::type { return _fwdindex(indices, utils::make_index_sequence<2 + sizeof...(Tys)>()); } /* through iterators */ template typename ndarray::iterator ndarray::begin() { return type_helper::make_iterator(*this, 0); } template typename ndarray::const_iterator ndarray::begin() const { return type_helper::make_iterator(*this, 0); } template typename ndarray::iterator ndarray::end() { return type_helper::make_iterator(*this, std::get<0>(_shape)); } template typename ndarray::const_iterator ndarray::end() const { return type_helper::make_iterator(*this, std::get<0>(_shape)); } template typename ndarray::const_flat_iterator ndarray::fbegin() const { return buffer; } template typename ndarray::const_flat_iterator ndarray::fend() const { return buffer + flat_size(); } template typename ndarray::flat_iterator ndarray::fbegin() { return buffer; } template typename ndarray::flat_iterator ndarray::fend() { return buffer + flat_size(); } /* member functions */ template long ndarray::flat_size() const { return sutils::prod(*this); } template bool ndarray::may_overlap(ndarray const &expr) const { return id() == expr.id(); } template template ndarray ndarray::reshape(qS const &shape) const & { return {mem, shape}; } template template ndarray ndarray::reshape(qS const &shape) && { return {std::move(mem), shape}; } template ndarray::operator bool() const { if (sutils::any_of(*this, [](long n) { return n != 1; })) throw ValueError("The truth value of an array with more than one element " "is ambiguous. Use a.any() or a.all()"); return *buffer; } template ndarray> ndarray::flat() const { return {mem, pshape{{flat_size()}}}; } template ndarray ndarray::copy() const { ndarray res(_shape, builtins::None); std::copy(fbegin(), fend(), res.fbegin()); return res; } template intptr_t ndarray::id() const { return reinterpret_cast(&(*mem)); } /* pretty printing { */ namespace impl { template size_t get_spacing(ndarray const &e) { std::ostringstream oss; if (e.flat_size()) { oss << *std::max_element(e.fbegin(), e.fend()); size_t s = oss.str().length(); for (auto iter = e.fbegin(), end = e.fend(); iter != end; ++iter) { oss.str(""); oss.width(s); oss << *iter; size_t ts = oss.str().length(); if (ts > s) s = ts; } return s; } return 0; } template size_t get_spacing(ndarray, pS> const &e) { std::ostringstream oss; if (e.flat_size()) oss << *e.fbegin(); return oss.str().length() + 2; } } template std::ostream &operator<<(std::ostream &os, ndarray const &e) { std::array::value> strides; auto shape = sutils::getshape(e); strides[std::tuple_size::value - 1] = std::get::value - 1>(shape); if (strides[std::tuple_size::value - 1] == 0) return os << "[]"; std::transform(strides.rbegin(), strides.rend() - 1, shape.rbegin() + 1, strides.rbegin() + 1, std::multiplies()); size_t depth = std::tuple_size::value; int step = -1; size_t size = impl::get_spacing(e); auto iter = e.fbegin(); int max_modulo = 1000; os << "["; if (std::get<0>(shape) != 0) do { if (depth == 1) { os.width(size); os << *iter++; for (int i = 1; i < std::get::value - 1>(shape); i++) { os.width(size + 1); os << *iter++; } step = 1; depth++; max_modulo = std::lower_bound( strides.begin(), strides.end(), iter - e.buffer, [](int comp, int val) { return val % comp != 0; }) - strides.begin(); } else if (max_modulo + depth == std::tuple_size::value + 1) { depth--; step = -1; os << "]"; for (size_t i = 0; i < depth; i++) os << std::endl; for (size_t i = 0; i < std::tuple_size::value - depth; i++) os << " "; os << "["; } else { depth += step; if (step == 1) os << "]"; else os << "["; } } while (depth != std::tuple_size::value + 1); return os << "]"; } template typename std::enable_if::value, std::ostream &>::type operator<<(std::ostream &os, E const &e) { return os << ndarray{e}; } /* } */ template template list &list::operator=(ndarray> const &other) { data = utils::shared_ref(other.begin(), other.end()); return *this; } } PYTHONIC_NS_END /* std::get overloads */ namespace std { template auto get(E &&a) -> typename std::enable_if< pythonic::types::is_array::type>::type>::value, decltype(std::forward(a)[I])>::type { return std::forward(a)[I]; } } /* pythran attribute system { */ #include "pythonic/numpy/transpose.hpp" PYTHONIC_NS_BEGIN namespace builtins { namespace details { template template auto _build_gexpr::operator()(E const &a, S const &... slices) -> decltype(_build_gexpr{}(a, types::contiguous_slice(), slices...)) { return _build_gexpr{}(a, types::contiguous_slice(0, a.size()), slices...); } template types::numpy_gexpr...> _build_gexpr<1>:: operator()(E const &a, S const &... slices) { return E(a)(slices...); } template E _make_real(E const &a, utils::int_<0>) { return a; } template auto _make_real(E const &a, utils::int_<1>) -> decltype(_build_gexpr{}( types::ndarray::type, types::array>{}, types::slice())) { using stype = typename types::is_complex::type; auto new_shape = sutils::getshape(a); std::get(new_shape) *= 2; // this is tricky && dangerous! auto translated_mem = reinterpret_cast> const &>( a.mem); types::ndarray> translated{ translated_mem, new_shape}; return _build_gexpr{}( translated, types::slice{0, std::get(new_shape), 2}); } template auto _make_real(types::numpy_expr const &a, utils::int_<1>) -> decltype(_make_real( types::ndarray::dtype, typename types::numpy_expr::shape_t>(a), utils::int_<1>{})) { return _make_real( types::ndarray::dtype, typename types::numpy_expr::shape_t>(a), utils::int_<1>{}); } template types::ndarray _make_imag(E const &a, utils::int_<0>) { // cannot use numpy.zero: forward declaration issue return { (typename E::dtype *)calloc(a.flat_size(), sizeof(typename E::dtype)), sutils::getshape(a), types::ownership::owned}; } template auto _make_imag(types::numpy_expr const &a, utils::int_<1>) -> decltype(_make_imag( types::ndarray::dtype, typename types::numpy_expr::shape_t>(a), utils::int_<1>{})) { return _make_imag( types::ndarray::dtype, typename types::numpy_expr::shape_t>(a), utils::int_<1>{}); } template auto _make_imag(E const &a, utils::int_<1>) -> decltype(_build_gexpr{}( types::ndarray::type, types::array>{}, types::slice())) { using stype = typename types::is_complex::type; auto new_shape = sutils::getshape(a); std::get(new_shape) *= 2; // this is tricky && dangerous! auto translated_mem = reinterpret_cast> const &>( a.mem); types::ndarray> translated{ translated_mem, new_shape}; return _build_gexpr{}( translated, types::slice{1, std::get(new_shape), 2}); } } template types::array getattr(types::attr::SHAPE, E const &a) { return sutils::getshape(a); } template long getattr(types::attr::NDIM, E const &a) { return E::value; } template types::array getattr(types::attr::STRIDES, E const &a) { types::array strides; strides[E::value - 1] = sizeof(typename E::dtype); auto shape = sutils::getshape(a); std::transform(strides.rbegin(), strides.rend() - 1, shape.rbegin(), strides.rbegin() + 1, std::multiplies()); return strides; } template long getattr(types::attr::SIZE, E const &a) { return a.flat_size(); } template long getattr(types::attr::ITEMSIZE, E const &a) { return sizeof(typename E::dtype); } template long getattr(types::attr::NBYTES, E const &a) { return a.flat_size() * sizeof(typename E::dtype); } template auto getattr(types::attr::FLAT, E const &a) -> decltype(a.flat()) { return a.flat(); } template auto getattr(types::attr::REAL, types::ndarray const &a) -> decltype( details::_make_real(a, utils::int_::value>{})) { return details::_make_real(a, utils::int_::value>{}); } template auto getattr(types::attr::REAL, types::numpy_expr const &a) -> decltype(details::_make_real( a, utils::int_::dtype>::value>{})) { return details::_make_real( a, utils::int_::dtype>::value>{}); } template auto getattr(types::attr::REAL, types::numpy_texpr const &a) -> decltype( types::numpy_texpr{ getattr(types::attr::REAL{}, a.arg)}) { auto ta = getattr(types::attr::REAL{}, a.arg); return types::numpy_texpr{ta}; } template auto getattr(types::attr::IMAG, types::ndarray const &a) -> decltype( details::_make_imag(a, utils::int_::value>{})) { return details::_make_imag(a, utils::int_::value>{}); } template auto getattr(types::attr::IMAG, types::numpy_expr const &a) -> decltype(details::_make_imag( a, utils::int_::dtype>::value>{})) { return details::_make_imag( a, utils::int_::dtype>::value>{}); } template auto getattr(types::attr::IMAG, types::numpy_texpr const &a) -> decltype( types::numpy_texpr{ getattr(types::attr::IMAG{}, a.arg)}) { auto ta = getattr(types::attr::IMAG{}, a.arg); return types::numpy_texpr{ta}; } template types::dtype_t::type> getattr(types::attr::DTYPE, E const &a) { return {}; } } PYTHONIC_NS_END /* } */ #include "pythonic/types/numpy_operators.hpp" #ifdef ENABLE_PYTHON_MODULE #include "pythonic/types/int.hpp" PYTHONIC_NS_BEGIN /* wrapper around Python array creation * its purpose is to hide the difference between the shape stored in pythran * (aka long) && the shape stored in numpy (aka npy_intp) * it should work (with an extra copy) on 32 bit architecture && without copy * on 64 bits architecture */ template struct pyarray_new { static_assert(!std::is_same::value, "correctly specialized"); PyObject *from_descr(PyTypeObject *subtype, PyArray_Descr *descr, T *dims, void *data, int flags, PyObject *obj) { npy_intp shape[N]; std::copy(dims, dims + N, shape); return pyarray_new{}.from_descr(subtype, descr, shape, data, flags, obj); } PyObject *from_data(T *dims, int typenum, void *data) { npy_intp shape[N]; std::copy(dims, dims + N, shape); return pyarray_new{}.from_data(shape, typenum, data); } }; template struct pyarray_new { PyObject *from_descr(PyTypeObject *subtype, PyArray_Descr *descr, npy_intp *dims, void *data, int flags, PyObject *obj) { return PyArray_NewFromDescr(subtype, descr, N, dims, nullptr, data, flags, obj); } PyObject *from_data(npy_intp *dims, int typenum, void *data) { return PyArray_SimpleNewFromData(N, dims, typenum, data); } }; template PyObject * to_python>::convert(types::ndarray const &cn, bool transpose) { types::ndarray &n = const_cast &>(cn); if (PyObject *p = n.mem.get_foreign()) { PyArrayObject *arr = reinterpret_cast(p); auto const *pshape = PyArray_DIMS(arr); Py_INCREF(p); // handle complex trick :-/ if ((long)sizeof(T) != PyArray_ITEMSIZE((PyArrayObject *)(arr))) { arr = (PyArrayObject *)PyArray_View( (PyArrayObject *)(arr), PyArray_DescrFromType(c_type_to_numpy_type::value), nullptr); } if (sutils::equals(n, pshape)) { if (transpose && !(PyArray_FLAGS(arr) & NPY_ARRAY_F_CONTIGUOUS)) { PyObject *Transposed = PyArray_Transpose(arr, nullptr); Py_DECREF(arr); return Transposed; } else return p; } else if (sutils::requals(n, pshape)) { if (transpose) return p; else { PyObject *Transposed = PyArray_Transpose(arr, nullptr); Py_DECREF(arr); return Transposed; } } else { Py_INCREF(PyArray_DESCR(arr)); auto array = sutils::array(n._shape); auto *res = pyarray_new::value>{}.from_descr( Py_TYPE(arr), PyArray_DESCR(arr), array.data(), PyArray_DATA(arr), PyArray_FLAGS(arr) & ~NPY_ARRAY_OWNDATA, p); if (transpose && (PyArray_FLAGS(arr) & NPY_ARRAY_F_CONTIGUOUS)) { PyObject *Transposed = PyArray_Transpose(reinterpret_cast(arr), nullptr); Py_DECREF(arr); return Transposed; } else return res; } } else { auto array = sutils::array(n._shape); PyObject *result = pyarray_new::value>{}.from_data( array.data(), c_type_to_numpy_type::value, n.buffer); n.mark_memory_external(result); Py_INCREF(result); // because it's going to be decrefed when n is destroyed if (!result) return nullptr; PyArray_ENABLEFLAGS(reinterpret_cast(result), NPY_ARRAY_OWNDATA); if (transpose) { PyObject *Transposed = PyArray_Transpose(reinterpret_cast(result), nullptr); Py_DECREF(result); return Transposed; } else return result; } } template PyObject * to_python>::convert(types::numpy_iexpr const &v, bool transpose) { PyObject *res = ::to_python(types::ndarray::dtype, typename types::numpy_iexpr::shape_t>(v)); if (transpose) { PyObject *Transposed = PyArray_Transpose(reinterpret_cast(res), nullptr); Py_DECREF(res); return Transposed; } else return res; } template PyObject *to_python>::convert( types::numpy_gexpr const &v, bool transpose) { PyObject *slices = (sizeof...(S) == 1) ? ::to_python(std::get<0>(v.slices)) : ::to_python(v.slices); PyObject *base = ::to_python(v.arg); PyObject *res = PyObject_GetItem(base, slices); Py_DECREF(base); if (transpose) { PyObject *Transposed = PyArray_Transpose(reinterpret_cast(res), nullptr); Py_DECREF(res); return Transposed; } else return res; } namespace impl { template struct is_integral_constant : std::false_type { }; template struct is_integral_constant> : std::true_type { }; template bool check_shape(T const *dims, utils::index_sequence) { types::array dims_match = { (is_integral_constant::type>::value ? (dims[Is] == std::conditional< is_integral_constant< typename std::tuple_element::type>::value, typename std::tuple_element::type, std::integral_constant>::type::value) : true)...}; return std::find(dims_match.begin(), dims_match.end(), false) == dims_match.end(); } template PyArrayObject *check_array_type_and_dims(PyObject *obj) { if (!PyArray_Check(obj)) return nullptr; // the array must have the same dtype && the same number of dimensions PyArrayObject *arr = reinterpret_cast(obj); if (PyArray_TYPE(arr) != c_type_to_numpy_type::value) return nullptr; if (PyArray_NDIM(arr) != std::tuple_size::value) return nullptr; return arr; } template void fill_slice(Slice &slice, long const *strides, long const *offsets, S const *dims, utils::int_<0>) { } void set_slice(types::contiguous_normalized_slice &cs, long lower, long upper, long step) { cs.lower = lower; cs.upper = upper; assert(cs.step == step && "consistent steps"); } void set_slice(types::normalized_slice &s, long lower, long upper, long step) { s.lower = lower; s.upper = upper; s.step = step; } template void fill_slice(Slice &slice, long const *strides, long const *offsets, S const *dims, utils::int_) { set_slice(std::get::value - N>(slice), *offsets / sizeof(T), *offsets / sizeof(T) + *dims * *strides / sizeof(T), *strides / sizeof(T)); fill_slice(slice, strides + 1, offsets + 1, dims + 1, utils::int_()); } } template bool from_python>::is_convertible(PyObject *obj) { PyArrayObject *arr = impl::check_array_type_and_dims(obj); if (!arr) return false; auto const *stride = PyArray_STRIDES(arr); auto const *dims = PyArray_DIMS(arr); long current_stride = PyArray_ITEMSIZE(arr); if (PyArray_SIZE(arr)) { for (long i = std::tuple_size::value - 1; i >= 0; i--) { if (stride[i] == 0 && dims[i] == 1) { // happens when a new dim is added though None/newaxis } else if (stride[i] != current_stride && dims[i] > 1) { return false; } current_stride *= dims[i]; } // this is supposed to be a texpr if ((PyArray_FLAGS(arr) & NPY_ARRAY_F_CONTIGUOUS) && ((PyArray_FLAGS(arr) & NPY_ARRAY_C_CONTIGUOUS) == 0) && (std::tuple_size::value > 1)) { return false; } } // check if dimension size match return impl::check_shape( dims, utils::make_index_sequence::value>()); } template types::ndarray from_python>::convert(PyObject *obj) { PyArrayObject *arr = reinterpret_cast(obj); types::ndarray r((T *)PyArray_BYTES(arr), PyArray_DIMS(arr), obj); Py_INCREF(obj); return r; } template bool from_python, S...>>::is_convertible(PyObject *obj) { PyArrayObject *arr = impl::check_array_type_and_dims(obj); if (!arr) return false; if ((PyArray_FLAGS(arr) & NPY_ARRAY_F_CONTIGUOUS) && ((PyArray_FLAGS(arr) & NPY_ARRAY_C_CONTIGUOUS) == 0) && (std::tuple_size::value > 1)) { return false; } PyObject *base_obj = PyArray_BASE(arr); if (!base_obj || !PyArray_Check(base_obj)) return false; PyArrayObject *base_arr = reinterpret_cast(base_obj); auto const *stride = PyArray_STRIDES(arr); auto const *dims = PyArray_DIMS(arr); /* FIXME If we have at least one stride, we convert the whole * array to a numpy_gexpr, without trying to be smarter with * contiguous slices */ long current_stride = PyArray_ITEMSIZE(arr); bool at_least_one_stride = false; for (long i = std::tuple_size::value - 1; i >= 0; i--) { if (stride[i] < 0) { return false; } if (stride[i] == 0 && dims[i] == 1) { // happens when a new dim is added though None/newaxis } else if (stride[i] != current_stride) { at_least_one_stride = true; break; } current_stride *= dims[i]; } if (at_least_one_stride) { if (PyArray_NDIM(base_arr) != std::tuple_size::value) { return false; } return true; } else return false; } template types::numpy_gexpr, S...> from_python, S...>>::convert( PyObject *obj) { PyArrayObject *arr = reinterpret_cast(obj); PyArrayObject *base_arr = reinterpret_cast(PyArray_BASE(arr)); /* from the base array pointer && this array pointer, we can recover the * full slice informations * unfortunately, the PyArray representation is different from our. * - PyArray_BYTES gives the start of the base pointer * - PyArray_Dims give the dimension array (the shape) * - PyArray_STRIDES gives the stride information, but relative to the * base * pointer && ! relative to the lower dimension */ long offsets[std::tuple_size::value]; long strides[std::tuple_size::value]; auto const *base_dims = PyArray_DIMS(base_arr); auto full_offset = PyArray_BYTES(arr) - PyArray_BYTES(base_arr); auto const *arr_strides = PyArray_STRIDES(arr); long accumulated_dim = 1; offsets[std::tuple_size::value - 1] = full_offset % base_dims[std::tuple_size::value - 1]; strides[std::tuple_size::value - 1] = arr_strides[std::tuple_size::value - 1]; for (ssize_t i = std::tuple_size::value - 2; i >= 0; --i) { accumulated_dim *= base_dims[i + 1]; offsets[i] = full_offset / accumulated_dim; strides[i] = arr_strides[i] / accumulated_dim; } types::ndarray base_array((T *)PyArray_BYTES(base_arr), PyArray_DIMS(base_arr), (PyObject *)base_arr); std::tuple slices; impl::fill_slice(slices, strides, offsets, PyArray_DIMS(arr), utils::int_()); types::numpy_gexpr, S...> r(base_array, slices); Py_INCREF(base_arr); return r; } template bool from_python>:: is_convertible(PyObject *obj) { constexpr auto N = E::value; PyArrayObject *arr = impl::check_array_type_and_dims( obj); if (!arr) return false; // check strides. Note that because it's a texpr, the check is done in the // opposite direction compared to ndarrays auto const *stride = PyArray_STRIDES(arr); auto const *dims = PyArray_DIMS(arr); long current_stride = PyArray_ITEMSIZE(arr); for (size_t i = 0; i < N; i++) { if (stride[i] != current_stride) return false; current_stride *= dims[i]; } return PyArray_FLAGS(arr) & NPY_ARRAY_F_CONTIGUOUS && N > 1; } template types::numpy_texpr from_python>::convert(PyObject *obj) { constexpr size_t N = E::value; using T = typename E::dtype; PyArrayObject *arr = reinterpret_cast(obj); typename E::shape_t shape; auto const *dims = PyArray_DIMS(arr); static_assert(N == 2, "only support texpr of matrices"); sutils::assign(std::get<0>(shape), std::get<1>(dims)); sutils::assign(std::get<1>(shape), std::get<0>(dims)); PyObject *tobj = PyArray_Transpose(arr, nullptr); types::ndarray base_array((T *)PyArray_BYTES(arr), shape, tobj); types::numpy_texpr> r(base_array); return r; } PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/types/nditerator.hpp000066400000000000000000000151411416264035500233230ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_NDITERATOR_HPP #define PYTHONIC_TYPES_NDITERATOR_HPP #include "pythonic/include/types/nditerator.hpp" #include PYTHONIC_NS_BEGIN namespace types { // FIXME: should use the same structure as the numpy_expr iterators /* Iterator over whatever provides a fast(long) method to access its element */ template nditerator::nditerator(E &data, long index) : data(data), index(index) { } template auto nditerator::operator*() -> decltype(data.fast(index)) { return data.fast(index); } template auto nditerator::operator*() const -> decltype(data.fast(index)) { return data.fast(index); } template nditerator &nditerator::operator++() { ++index; return *this; } template nditerator &nditerator::operator--() { --index; return *this; } template nditerator &nditerator::operator+=(long i) { index += i; return *this; } template nditerator &nditerator::operator-=(long i) { index -= i; return *this; } template nditerator nditerator::operator+(long i) const { nditerator other(*this); other += i; return other; } template nditerator nditerator::operator-(long i) const { nditerator other(*this); other -= i; return other; } template long nditerator::operator-(nditerator const &other) const { return index - other.index; } template bool nditerator::operator!=(nditerator const &other) const { return index != other.index; } template bool nditerator::operator==(nditerator const &other) const { return index == other.index; } template bool nditerator::operator<(nditerator const &other) const { return index < other.index; } template nditerator &nditerator::operator=(nditerator const &other) { assert(&data == &other.data); index = other.index; return *this; } /* Const iterator over whatever provides a fast(long) method to access its * element */ template const_nditerator::const_nditerator(E const &data, long index) : data(data), index(index) { } template auto const_nditerator::operator*() const -> decltype(data.fast(index)) { return data.fast(index); } template const_nditerator &const_nditerator::operator++() { ++index; return *this; } template const_nditerator &const_nditerator::operator--() { --index; return *this; } template const_nditerator &const_nditerator::operator+=(long i) { index += i; return *this; } template const_nditerator &const_nditerator::operator-=(long i) { index -= i; return *this; } template const_nditerator const_nditerator::operator+(long i) const { const_nditerator other(*this); other += i; return other; } template const_nditerator const_nditerator::operator-(long i) const { const_nditerator other(*this); other -= i; return other; } template long const_nditerator::operator-(const_nditerator const &other) const { return index - other.index; } template bool const_nditerator::operator!=(const_nditerator const &other) const { return index != other.index; } template bool const_nditerator::operator==(const_nditerator const &other) const { return index == other.index; } template bool const_nditerator::operator<(const_nditerator const &other) const { return index < other.index; } template const_nditerator &const_nditerator:: operator=(const_nditerator const &other) { index = other.index; return *this; } #ifdef USE_XSIMD template const_simd_nditerator::const_simd_nditerator(typename E::dtype const *data) : data(data) { } template auto const_simd_nditerator::operator*() const -> decltype(xsimd::load_unaligned(data)) { return xsimd::load_unaligned(data); } template void const_simd_nditerator::store( xsimd::simd_type const &val) { val.store_unaligned(const_cast(data)); } template const_simd_nditerator &const_simd_nditerator::operator++() { data += vector_size; return *this; } template const_simd_nditerator &const_simd_nditerator::operator+=(long i) { data += vector_size * i; return *this; } template const_simd_nditerator const_simd_nditerator::operator+(long i) { return {data + vector_size * i}; } template const_simd_nditerator &const_simd_nditerator::operator--() { data -= vector_size; return *this; } template long const_simd_nditerator:: operator-(const_simd_nditerator const &other) const { return (data - other.data) / vector_size; } template bool const_simd_nditerator:: operator!=(const_simd_nditerator const &other) const { return data != other.data; } template bool const_simd_nditerator:: operator==(const_simd_nditerator const &other) const { return data == other.data; } template bool const_simd_nditerator:: operator<(const_simd_nditerator const &other) const { return data < other.data; } template const_simd_nditerator &const_simd_nditerator:: operator=(const_simd_nditerator const &other) { data = other.data; return *this; } #endif // build an iterator over T, selecting a raw pointer if possible template template auto make_nditerator::operator()(T &self, long i) -> decltype(nditerator(self, i)) const { return nditerator(self, i); } template typename T::dtype *make_nditerator::operator()(T &self, long i) const { return self.buffer + i; } template template auto make_const_nditerator::operator()(T const &self, long i) -> decltype(const_nditerator(self, i)) const { return const_nditerator(self, i); } template typename T::dtype const *make_const_nditerator:: operator()(T const &self, long i) const { return self.buffer + i; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/types/numpy_binary_op.hpp000066400000000000000000000012721416264035500243620ustar00rootroot00000000000000#ifndef NUMPY_BINARY_FUNC_NAME #error NUMPY_BINARY_FUNC_NAME undefined #endif #ifndef NUMPY_BINARY_FUNC_SYM #error NUMPY_BINARY_FUNC_SYM undefined #endif template typename std::enable_if< types::valid_numop_parameters::type, typename std::decay::type>::value, types::numpy_expr::type, typename types::adapt_type::type>>::type NUMPY_BINARY_FUNC_NAME(E0 &&self, E1 &&other) { return {std::forward(self), std::forward(other)}; } #undef NUMPY_BINARY_FUNC_NAME #undef NUMPY_BINARY_FUNC_SYM pythran-0.10.0+ds2/pythran/pythonic/types/numpy_broadcast.hpp000066400000000000000000000056441416264035500243510ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_NUMPY_BROADCAST_HPP #define PYTHONIC_TYPES_NUMPY_BROADCAST_HPP #include "pythonic/include/types/numpy_broadcast.hpp" #include "pythonic/types/vectorizable_type.hpp" #include "pythonic/types/nditerator.hpp" #include "pythonic/types/slice.hpp" #include "pythonic/types/tuple.hpp" PYTHONIC_NS_BEGIN namespace types { template T const &broadcasted::operator[](long i) const { return ref; } template T const &broadcasted::fast(long i) const { return ref; } #ifdef USE_XSIMD template template typename broadcasted::simd_iterator broadcasted::vbegin(vectorizer) const { return {*this}; } template template typename broadcasted::simd_iterator broadcasted::vend(vectorizer) const { return {*this}; // should ! happen anyway } #endif template template auto broadcasted::operator()(long arg0, Arg1 &&arg1, Args &&... args) const -> decltype(ref(std::forward(arg1), std::forward(args)...)) { return ref(std::forward(arg1), std::forward(args)...); } template template auto broadcasted::operator()(S arg0, Arg1 &&arg1, Args &&... args) const -> decltype(ref((arg0.step, std::forward(arg1)), std::forward(args)...)) { return {ref(std::forward(arg1), std::forward(args)...)}; } template long broadcasted::flat_size() const { return 0; } template template broadcast_base::broadcast_base(V v) : _value(v) { } #ifdef USE_XSIMD template template broadcast_base::broadcast_base(V v) : _value(v), _splated(xsimd::simd_type(_value)) { } #endif template template broadcast::broadcast(V v) : _base(v) { } template typename broadcast::dtype broadcast::operator[](long) const { return _base._value; } template template typename broadcast::dtype broadcast:: operator[](array) const { return _base._value; } template typename broadcast::dtype broadcast::fast(long) const { return _base._value; } template template typename broadcast::dtype broadcast:: operator()(Args &&... args) const { return _base._value; } template template std::integral_constant broadcast::shape() const { return {}; } template long broadcast::flat_size() const { return 0; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/types/numpy_expr.hpp000066400000000000000000000305661416264035500233660ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_NUMPY_EXPR_HPP #define PYTHONIC_TYPES_NUMPY_EXPR_HPP #include "pythonic/include/types/numpy_expr.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/types/nditerator.hpp" #include "pythonic/builtins/ValueError.hpp" PYTHONIC_NS_BEGIN namespace types { namespace details { long best_of() { return 1; } template long best_of(V0 v0, Vs... vs) { long vtail = best_of(vs...); return ((long)v0 == vtail) ? v0 : (v0 * vtail); } template long init_shape_element(Args const &args, utils::index_sequence) { return best_of(std::get(args).template shape()...); } } template numpy_expr::numpy_expr(Args const &... args) : args(args...) { } template template typename numpy_expr::const_iterator numpy_expr::_begin(utils::index_sequence) const { return {{make_step(size(), std::get(args).template shape<0>())...}, const_cast::type const &>( std::get(args)).begin()...}; } template typename numpy_expr::const_iterator numpy_expr::begin() const { return _begin(utils::make_index_sequence{}); } template template typename numpy_expr::const_iterator numpy_expr::_end(utils::index_sequence) const { return {{make_step(size(), std::get(args).template shape<0>())...}, const_cast::type const &>( std::get(args)).end()...}; } template typename numpy_expr::const_iterator numpy_expr::end() const { return _end(utils::make_index_sequence{}); } template typename numpy_expr::const_fast_iterator numpy_expr::begin(types::fast) const { return {*this, 0}; } template typename numpy_expr::const_fast_iterator numpy_expr::end(types::fast) const { return {*this, size()}; } template template bool numpy_expr::_no_broadcast(utils::index_sequence) const { bool child_broadcast = false; (void)std::initializer_list{ (child_broadcast |= !utils::no_broadcast(std::get(args)))...}; if (child_broadcast) return false; bool same_shape = true; (void)std::initializer_list{ (same_shape &= (is_trivial_broadcast() || std::get(args).template shape<0>() == size()))...}; return same_shape; } template template bool numpy_expr::_no_broadcast_ex( utils::index_sequence) const { bool child_broadcast = false; (void)std::initializer_list{ (child_broadcast |= !utils::no_broadcast_ex(std::get(args)))...}; if (child_broadcast) return false; bool same_shape = true; auto shp = sutils::getshape(*this); (void)std::initializer_list{ (same_shape &= (is_trivial_broadcast() || sutils::getshape(std::get(args)) == shp))...}; return same_shape; } template template bool numpy_expr::_no_broadcast_vectorize( utils::index_sequence) const { bool child_broadcast = false; (void)std::initializer_list{ (child_broadcast |= !utils::no_broadcast(std::get(args)))...}; if (child_broadcast) return false; bool same_shape = true; (void)std::initializer_list{ (same_shape &= ((long)std::get(args).template shape<0>() == size()))...}; return same_shape; } template bool numpy_expr::no_broadcast() const { return _no_broadcast(utils::make_index_sequence{}); } template bool numpy_expr::no_broadcast_ex() const { return _no_broadcast_ex(utils::make_index_sequence{}); } template bool numpy_expr::no_broadcast_vectorize() const { return _no_broadcast_vectorize( utils::make_index_sequence{}); } template template typename numpy_expr::iterator numpy_expr::_begin(utils::index_sequence) { return {{make_step(size(), std::get(args).template shape<0>())...}, const_cast::type &>(std::get(args)) .begin()...}; } template typename numpy_expr::iterator numpy_expr::begin() { return _begin(utils::make_index_sequence{}); } template template typename numpy_expr::iterator numpy_expr::_end(utils::index_sequence) { return {{make_step(size(), std::get(args).template shape<0>())...}, const_cast::type &>(std::get(args)) .end()...}; } template typename numpy_expr::iterator numpy_expr::end() { return _end(utils::make_index_sequence{}); } template auto numpy_expr::fast(long i) const -> decltype(this->_fast(i, utils::make_index_sequence{})) { return _fast(i, utils::make_index_sequence{}); } template template auto numpy_expr::map_fast(Indices... indices) const -> decltype( this->_map_fast(array{{indices...}}, utils::make_index_sequence{})) { static_assert(sizeof...(Indices) == sizeof...(Args), "compatible call"); return _map_fast(array{{indices...}}, utils::make_index_sequence{}); } template auto numpy_expr::operator[](long i) const -> decltype(this->fast(i)) { if (i < 0) i += size(); return fast(i); } #ifdef USE_XSIMD template template typename numpy_expr::simd_iterator numpy_expr::_vbegin(vectorize, utils::index_sequence) const { return {{make_step(size(), std::get(args).template shape<0>())...}, std::make_tuple(const_cast::type const &>( std::get(args)).begin()...), std::get(args).vbegin(vectorize{})...}; } template typename numpy_expr::simd_iterator numpy_expr::vbegin(vectorize) const { return _vbegin(vectorize{}, utils::make_index_sequence{}); } template template typename numpy_expr::simd_iterator numpy_expr::_vend(vectorize, utils::index_sequence) const { return {{make_step(size(), std::get(args).template shape<0>())...}, std::make_tuple(const_cast::type const &>( std::get(args)).end()...), std::get(args).vend(vectorize{})...}; } template typename numpy_expr::simd_iterator numpy_expr::vend(vectorize) const { return _vend(vectorize{}, utils::make_index_sequence{}); } template template typename numpy_expr::simd_iterator_nobroadcast numpy_expr::_vbegin(vectorize_nobroadcast, utils::index_sequence) const { return {std::get(args).vbegin(vectorize_nobroadcast{})...}; } template typename numpy_expr::simd_iterator_nobroadcast numpy_expr::vbegin(vectorize_nobroadcast) const { return _vbegin(vectorize_nobroadcast{}, utils::make_index_sequence{}); } template template typename numpy_expr::simd_iterator_nobroadcast numpy_expr::_vend(vectorize_nobroadcast, utils::index_sequence) const { return {std::get(args).vend(vectorize_nobroadcast{})...}; } template typename numpy_expr::simd_iterator_nobroadcast numpy_expr::vend(vectorize_nobroadcast) const { return _vend(vectorize_nobroadcast{}, utils::make_index_sequence{}); } #endif template template auto numpy_expr::operator()(S const &... s) const -> decltype(this->_get(utils::make_index_sequence{}, s...)) { return _get(utils::make_index_sequence{}, s...); } template template typename std::enable_if< is_numexpr_arg::value && std::is_same::value && !is_pod_array::value, numpy_vexpr, ndarray>>>::type numpy_expr::fast(F const &filter) const { long sz = filter.template shape<0>(); long *raw = (long *)malloc(sz * sizeof(long)); long n = 0; for (long i = 0; i < sz; ++i) if (filter.fast(i)) raw[n++] = i; // realloc(raw, n * sizeof(long)); long shp[1] = {n}; return this->fast( ndarray>(raw, shp, types::ownership::owned)); } template template typename std::enable_if< is_numexpr_arg::value && std::is_same::value && !is_pod_array::value, numpy_vexpr, ndarray>>>::type numpy_expr:: operator[](F const &filter) const { return fast(filter); } template template // indexing through an array of indices -- a view typename std::enable_if::value && !is_array_index::value && !std::is_same::value && !is_pod_array::value, numpy_vexpr, F>>::type numpy_expr:: operator[](F const &filter) const { return {*this, filter}; } template template // indexing through an array of indices -- a view typename std::enable_if::value && !is_array_index::value && !std::is_same::value && !is_pod_array::value, numpy_vexpr, F>>::type numpy_expr::fast(F const &filter) const { return {*this, filter}; } template numpy_expr::operator bool() const { if (sutils::any_of(*this, [](long n) { return n != 1; })) throw ValueError("The truth value of an array with more than one element " "is ambiguous. Use a.any() or a.all()"); array first = {0}; return operator[](first); } template long numpy_expr::flat_size() const { return prod_helper(*this, utils::make_index_sequence()); } template long numpy_expr::size() const { return this->template shape<0>(); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/types/numpy_gexpr.hpp000066400000000000000000000637051416264035500235360ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_NUMPY_GEXPR_HPP #define PYTHONIC_TYPES_NUMPY_GEXPR_HPP #include "pythonic/include/types/numpy_gexpr.hpp" #include "pythonic/builtins/ValueError.hpp" #include "pythonic/utils/meta.hpp" #include "pythonic/operator_/iadd.hpp" #include "pythonic/operator_/isub.hpp" #include "pythonic/operator_/imul.hpp" #include "pythonic/operator_/idiv.hpp" PYTHONIC_NS_BEGIN namespace types { template bool slices_may_overlap(S0 const &s0, S1 const &s1) { if (s0.step >= 0 && s1.step >= 0) return s0.lower > s1.lower; else return s0.lower < s1.lower; } template bool slices_may_overlap(S const &s, long const &i) { return s.lower <= i && i < s.upper; } template bool slices_may_overlap(long const &i, S const &s) { return s.lower <= i && i < s.upper; } template bool may_overlap(E0 const &, E1 const &) { return true; } template bool may_overlap(E0 const &, broadcast const &) { return false; } template typename std::enable_if::value, bool>::type may_overlap(numpy_gexpr const &gexpr, E1 const &) { return false; } template bool may_overlap_helper(numpy_gexpr const &gexpr, Tuple const &args, utils::index_sequence) { bool overlaps[] = {may_overlap(gexpr, std::get(args))...}; return std::any_of(std::begin(overlaps), std::end(overlaps), [](bool b) { return b; }); } template bool may_overlap(numpy_gexpr const &gexpr, numpy_expr const &expr) { return may_overlap_helper(gexpr, expr.args, utils::make_index_sequence{}); } template bool may_gexpr_overlap(E0 const &gexpr, E1 const &expr) { if (!std::is_same::value) { return false; } if (std::tuple_size::value != std::tuple_size::value) { return false; } if (gexpr.arg.id() != expr.arg.id()) { return false; } if (!slices_may_overlap(std::get<0>(gexpr.slices), std::get<0>(expr.slices))) return false; return true; } template bool may_overlap(numpy_gexpr const &, S...> const &gexpr, numpy_gexpr const &, Sp...> const &expr) { return may_gexpr_overlap(gexpr, expr); } template bool may_overlap(numpy_gexpr &, S...> const &gexpr, numpy_gexpr &, Sp...> const &expr) { return may_gexpr_overlap(gexpr, expr); } template bool may_overlap(numpy_gexpr &, S...> const &gexpr, numpy_gexpr const &, Sp...> const &expr) { return may_gexpr_overlap(gexpr, expr); } template bool may_overlap(numpy_gexpr const &, S...> const &gexpr, numpy_gexpr &, Sp...> const &expr) { return may_gexpr_overlap(gexpr, expr); } template T to_slice::operator()(T value) { return value; } fast_contiguous_slice to_slice::operator()(none_type) { return {0, 1}; } template T to_normalized_slice::operator()(T value) { return value; } contiguous_normalized_slice to_normalized_slice:: operator()(none_type) { return {0, 1}; } /* helper to build a new shape out of a shape and a slice with new axis */ template auto make_reshape(pS const &shape, IsNewAxis is_new_axis) -> decltype(sutils::copy_new_axis(shape, is_new_axis)) { return sutils::copy_new_axis(shape, is_new_axis); } /* helper to build an extended slice aka numpy_gexpr out of a subscript */ namespace details { template std::tuple<> merge_gexpr, std::tuple<>>::run( S const &, std::tuple<> const &t0, std::tuple<> const &) { return t0; } template template std::tuple merge_gexpr, std::tuple<>>::run( S const &, std::tuple const &t0, std::tuple<>) { return t0; } template constexpr long count_new_axis_helper(utils::index_sequence) { return count_new_axis::type...>::value; } template auto normalize_all(S const &s, T const &t, utils::index_sequence) -> decltype(std::make_tuple(normalize( std::get(t), s.template shape( utils::make_index_sequence<1 + Is>())>())...)) { return std::make_tuple(normalize( std::get(t), s.template shape( utils::make_index_sequence<1 + Is>())>())...); } template template std::tuple...> merge_gexpr, std::tuple>::run( S const &s, std::tuple<>, std::tuple const &t1) { return normalize_all(s, t1, utils::make_index_sequence()); } template typename std::enable_if::value == 0, numpy_gexpr>::type _make_gexpr(Arg arg, std::tuple const &t) { return {arg, t}; } template numpy_gexpr::type>::type...> _make_gexpr_helper(Arg arg, S const &s, utils::index_sequence) { return {arg, to_normalized_slice::type>{}( std::get(s))...}; } template auto _make_gexpr(Arg arg, std::tuple const &s) -> typename std::enable_if< count_new_axis::value != 0, decltype(_make_gexpr_helper( arg.reshape(make_reshape::value>( arg, std::tuple::is_new_axis>...>())), s, utils::make_index_sequence()))>::type { return _make_gexpr_helper( arg.reshape(make_reshape::value>( arg, std::tuple::is_new_axis>...>())), s, utils::make_index_sequence()); } template template numpy_gexpr...> make_gexpr:: operator()(Arg arg, std::tuple s, utils::index_sequence) { return {arg, normalize(std::get(s), arg.template shape())...}; } template numpy_gexpr...> make_gexpr:: operator()(Arg arg, S const &... s) { return operator()(arg, std::tuple(s...), utils::make_index_sequence()); } } template auto make_gexpr(Arg &&arg, S const &... s) -> decltype(details::make_gexpr{}(std::forward(arg), s...)) { return details::make_gexpr{}(std::forward(arg), s...); } template numpy_gexpr::numpy_gexpr() : buffer(nullptr) { } template template // not using the default one, to make it possible to // accept reference && non reference version of Argp numpy_gexpr::numpy_gexpr(numpy_gexpr const &other) : arg(other.arg), slices(other.slices), _shape(other._shape), buffer(other.buffer), _strides(other._strides) { static_assert(std::is_same::type, typename returnable::type>::value, "this constructor is only here to adapt reference / non " "reference type, nothing else"); assert(buffer); } template template typename std::enable_if< std::is_same::value || std::is_same::value, void>::type numpy_gexpr::init_shape(Slice const &s, utils::int_<1>, utils::int_) { buffer += s.lower * arg.template strides(); sutils::assign(std::get(_strides), s.step * arg.template strides()); sutils::assign(std::get(_shape), std::get(slices).size()); } template template typename std::enable_if< std::is_same::value || std::is_same::value, void>::type numpy_gexpr::init_shape(Slice const &s, utils::int_, utils::int_) { sutils::assign(std::get(_shape), std::get(slices).size()); buffer += s.lower * arg.template strides(); sutils::assign(std::get(_strides), s.step * arg.template strides()); init_shape(std::get(slices), utils::int_(), utils::int_()); } template template void numpy_gexpr::init_shape(long cs, utils::int_<1>, utils::int_) { assert(cs >= 0 && "normalized"); buffer += cs * arg.template strides(); } template template void numpy_gexpr::init_shape(long cs, utils::int_, utils::int_) { assert(cs >= 0 && "normalized"); buffer += cs * arg.template strides(); init_shape(std::get(slices), utils::int_(), utils::int_()); } template numpy_gexpr::numpy_gexpr(Arg const &arg, std::tuple const &values) : arg(arg), slices(values), buffer(const_cast(this->arg.buffer)) { assert(buffer); init_shape(std::get<0>(slices), utils::int_(), utils::int_<0>()); sutils::copy_shape::value, count_long::value>( _shape, arg, utils::make_index_sequence::value)>()); sutils::copy_strides::value, count_long::value>( _strides, arg, utils::make_index_sequence::value)>()); } template numpy_gexpr::numpy_gexpr(Arg const &arg, S const &... s) : numpy_gexpr(arg, std::tuple(s...)) { } template template numpy_gexpr::numpy_gexpr(numpy_gexpr const &expr, Arg arg) : arg(arg), slices(tuple_pop(expr.slices)), buffer(expr.buffer) { assert(buffer); sutils::copy_shape<0, 1>(_shape, expr, utils::make_index_sequence()); buffer += arg.buffer - expr.arg.buffer; sutils::copy_strides<0, 1>(_strides, expr, utils::make_index_sequence()); } template template numpy_gexpr::numpy_gexpr(G const &expr, Arg &&arg) : arg(std::forward(arg)), slices(tuple_pop(expr.slices)), buffer(expr.buffer) { assert(buffer); sutils::copy_shape<0, 1>(_shape, expr, utils::make_index_sequence()); buffer += (arg.buffer - expr.arg.buffer); sutils::copy_strides<0, 1>(_strides, expr, utils::make_index_sequence()); } template template typename std::enable_if::value, numpy_gexpr &>::type numpy_gexpr::_copy(E const &expr) { static_assert(value >= utils::dim_of::value, "dimensions match"); /* at this point, we could not statically check that there is not an * aliasing issue that would require an extra copy because of the vector * assignment * perform a fuzzy alias check dynamically! */ assert(buffer); constexpr bool vectorize = is_vectorizable && std::is_same::type>::value && is_vectorizable_array::value; if (may_overlap(*this, expr)) { return utils::broadcast_copy< numpy_gexpr &, ndarray, value, value - utils::dim_of::value, vectorize>( *this, ndarray(expr)); } else { // 100% sure there's no overlap return utils::broadcast_copy::value, vectorize>( *this, expr); } } template template typename std::enable_if::value, numpy_gexpr &>::type numpy_gexpr::_copy(E const &expr) { constexpr bool vectorize = is_vectorizable && std::is_same::type>::value && is_vectorizable_array::value; static_assert(value >= utils::dim_of::value, "dimensions match"); assert(buffer); return utils::broadcast_copy::value, vectorize>( *this, expr); } template template numpy_gexpr &numpy_gexpr::operator=(E const &expr) { return _copy(expr); } template numpy_gexpr &numpy_gexpr:: operator=(numpy_gexpr const &expr) { if (buffer == nullptr) { // arg = expr.arg; const_cast::type &>(arg) = expr.arg; slices = expr.slices; assert(expr.buffer); buffer = arg.buffer + (expr.buffer - expr.arg.buffer); _shape = expr._shape; _strides = expr._strides; assert(sutils::getshape(*this) == sutils::getshape(expr) && "compatible sizes"); return *this; } else { return _copy(expr); } } template template numpy_gexpr &numpy_gexpr:: operator=(numpy_gexpr const &expr) { if (buffer == nullptr) { // arg = expr.arg; const_cast::type &>(arg) = expr.arg; slices = expr.slices; assert(expr.buffer); buffer = arg.buffer + (expr.buffer - expr.arg.buffer); _shape = expr._shape; _strides = expr._strides; return *this; } else { return _copy(expr); } } template template typename std::enable_if::value, numpy_gexpr &>::type numpy_gexpr::update_(E const &expr) { using BExpr = typename std::conditional::value, broadcast, E const &>::type; BExpr bexpr = expr; // 100% sure there's no overlap return utils::broadcast_update < Op, numpy_gexpr &, BExpr, value, value - (std::is_scalar::value + utils::dim_of::value), is_vectorizable && types::is_vectorizable::type>::type>::value && std::is_same::type>::type>::value > (*this, bexpr); } template template typename std::enable_if::value, numpy_gexpr &>::type numpy_gexpr::update_(E const &expr) { using BExpr = typename std::conditional::value, broadcast, E const &>::type; BExpr bexpr = expr; if (may_overlap(*this, expr)) { using NBExpr = ndarray::type::dtype, typename std::remove_reference::type::shape_t>; return utils::broadcast_update < Op, numpy_gexpr &, NBExpr, value, value - (std::is_scalar::value + utils::dim_of::value), is_vectorizable && types::is_vectorizable::value && std::is_same::type::dtype>::value > (*this, NBExpr(bexpr)); } else { // 100% sure there's no overlap return utils::broadcast_update < Op, numpy_gexpr &, BExpr, value, value - (std::is_scalar::value + utils::dim_of::value), is_vectorizable && types::is_vectorizable::value && std::is_same::type::dtype>::value > (*this, bexpr); } } template template numpy_gexpr &numpy_gexpr::operator+=(E const &expr) { return update_(expr); } template numpy_gexpr &numpy_gexpr:: operator+=(numpy_gexpr const &expr) { return update_(expr); } template template numpy_gexpr &numpy_gexpr::operator-=(E const &expr) { return update_(expr); } template numpy_gexpr &numpy_gexpr:: operator-=(numpy_gexpr const &expr) { return update_(expr); } template template numpy_gexpr &numpy_gexpr::operator*=(E const &expr) { return update_(expr); } template numpy_gexpr &numpy_gexpr:: operator*=(numpy_gexpr const &expr) { return update_(expr); } template template numpy_gexpr &numpy_gexpr::operator/=(E const &expr) { return update_(expr); } template numpy_gexpr &numpy_gexpr:: operator/=(numpy_gexpr const &expr) { return update_(expr); } template template numpy_gexpr &numpy_gexpr::operator|=(E const &expr) { return update_(expr); } template numpy_gexpr &numpy_gexpr:: operator|=(numpy_gexpr const &expr) { return update_(expr); } template template numpy_gexpr &numpy_gexpr::operator&=(E const &expr) { return update_(expr); } template numpy_gexpr &numpy_gexpr:: operator&=(numpy_gexpr const &expr) { return update_(expr); } template template numpy_gexpr &numpy_gexpr::operator^=(E const &expr) { return update_(expr); } template numpy_gexpr &numpy_gexpr:: operator^=(numpy_gexpr const &expr) { return update_(expr); } template typename numpy_gexpr::const_iterator numpy_gexpr::begin() const { return make_const_nditerator < is_strided || value != 1 > ()(*this, 0); } template typename numpy_gexpr::const_iterator numpy_gexpr::end() const { return make_const_nditerator < is_strided || value != 1 > ()(*this, size()); } template typename numpy_gexpr::iterator numpy_gexpr::begin() { return make_nditerator < is_strided || value != 1 > ()(*this, 0); } template typename numpy_gexpr::iterator numpy_gexpr::end() { return make_nditerator < is_strided || value != 1 > ()(*this, size()); } #ifdef USE_XSIMD template template typename numpy_gexpr::simd_iterator numpy_gexpr::vbegin(vectorizer) const { return {buffer}; } template template typename numpy_gexpr::simd_iterator numpy_gexpr::vend(vectorizer) const { using vector_type = typename xsimd::simd_type; static const std::size_t vector_size = vector_type::size; return {buffer + long(size() / vector_size * vector_size)}; } #endif template auto numpy_gexpr::operator[](long i) const -> decltype(this->fast(i)) { if (i < 0) i += std::get<0>(_shape); return fast(i); } template auto numpy_gexpr::operator[](long i) -> decltype(this->fast(i)) { if (i < 0) i += std::get<0>(_shape); return fast(i); } template template auto numpy_gexpr::operator()(Sp const &... s) const -> decltype(make_gexpr(*this, s...)) { return make_gexpr(*this, s...); } template template auto numpy_gexpr::operator[](Sp const &s) const -> typename std::enable_if::value, decltype(make_gexpr(*this, (s.lower, s)))>::type { return make_gexpr(*this, s); } template template auto numpy_gexpr::fast(array const &indices) const & -> decltype(nget().fast(*this, indices)) { return nget().fast(*this, indices); } template template auto numpy_gexpr::fast(array const &indices) && -> decltype(nget().fast(std::move(*this), indices)) { return nget().fast(std::move(*this), indices); } template template auto numpy_gexpr::operator[](array const &indices) const & -> decltype(nget()(*this, indices)) { return nget()(*this, indices); } template template auto numpy_gexpr::operator[](array const &indices) && -> decltype(nget()(std::move(*this), indices)) { return nget()(std::move(*this), indices); } template template typename std::enable_if< is_numexpr_arg::value && std::is_same::value, numpy_vexpr, ndarray>>>::type numpy_gexpr::fast(F const &filter) const { long sz = filter.template shape<0>(); long *raw = (long *)malloc(sz * sizeof(long)); long n = 0; for (long i = 0; i < sz; ++i) if (filter.fast(i)) raw[n++] = i; // realloc(raw, n * sizeof(long)); long shp[1] = {n}; return this->fast( ndarray>(raw, shp, types::ownership::owned)); } template template typename std::enable_if< is_numexpr_arg::value && std::is_same::value, numpy_vexpr, ndarray>>>::type numpy_gexpr:: operator[](F const &filter) const { return fast(filter); } template numpy_gexpr::operator bool() const { if (sutils::any_of(*this, [](long n) { return n != 1; })) throw ValueError("The truth value of an array with more than one element " "is ambiguous. Use a.any() or a.all()"); return *buffer; } template long numpy_gexpr::flat_size() const { return sutils::prod(*this); } template long numpy_gexpr::size() const { return std::get<0>(_shape); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/types/numpy_iexpr.hpp000066400000000000000000000352741416264035500235400ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_NUMPY_IEXPR_HPP #define PYTHONIC_TYPES_NUMPY_IEXPR_HPP #include "pythonic/include/types/numpy_iexpr.hpp" #include "pythonic/types/nditerator.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/utils/array_helper.hpp" #include "pythonic/utils/broadcast_copy.hpp" #include "pythonic/include/types/raw_array.hpp" #include "pythonic/types/ndarray.hpp" // we should remove that dep during a refactoring :-) #include "pythonic/operator_/iadd.hpp" #include "pythonic/operator_/iand.hpp" #include "pythonic/operator_/idiv.hpp" #include "pythonic/operator_/imul.hpp" #include "pythonic/operator_/ior.hpp" #include "pythonic/operator_/ixor.hpp" #include "pythonic/operator_/isub.hpp" #include PYTHONIC_NS_BEGIN namespace types { template numpy_iexpr::numpy_iexpr() : buffer(nullptr) { } template template // not using the default one, to make it possible to // accept reference and non reference version of Argp numpy_iexpr::numpy_iexpr(numpy_iexpr const &other) : arg(other.arg), buffer(other.buffer) { assert(buffer); } template template // not using the default one, to make it possible to // accept reference and non reference version of Argp numpy_iexpr::numpy_iexpr(numpy_iexpr const &other) : arg(const_cast::type &>(other.arg)), buffer(other.buffer) { assert(buffer); } template numpy_iexpr::numpy_iexpr(Arg const &arg, long index) : arg(arg), buffer(arg.buffer + index * arg.template strides<0>()) { assert(buffer); } template long numpy_iexpr::size() const { return arg.template shape<1>(); } template template numpy_iexpr &numpy_iexpr::operator=(E const &expr) { assert(buffer); return utils::broadcast_copy < numpy_iexpr &, E, value, value - utils::dim_of::value, is_vectorizable && std::is_same::type>::value && types::is_vectorizable::value > (*this, expr); } template template numpy_iexpr &numpy_iexpr::operator=(numpy_iexpr const &expr) { if (!buffer) { this->~numpy_iexpr(); return *new (this) numpy_iexpr(expr); } assert(buffer); return utils::broadcast_copy < numpy_iexpr &, numpy_iexpr const &, value, value - utils::dim_of::value, is_vectorizable && numpy_iexpr::is_vectorizable && std::is_same::dtype>::value > (*this, expr); } template numpy_iexpr &numpy_iexpr::operator=(numpy_iexpr const &expr) { if (!buffer) { this->~numpy_iexpr(); return *new (this) numpy_iexpr(expr); } assert(buffer); return utils::broadcast_copy < numpy_iexpr &, numpy_iexpr const &, value, value - utils::dim_of::value, is_vectorizable && numpy_iexpr::is_vectorizable && std::is_same::dtype>::value > (*this, expr); } template template numpy_iexpr &numpy_iexpr::update_(Expr const &expr) { using BExpr = typename std::conditional::value, broadcast, Expr const &>::type; assert(buffer); BExpr bexpr = expr; utils::broadcast_update< Op, numpy_iexpr &, BExpr, value, value - (std::is_scalar::value + utils::dim_of::value), is_vectorizable && types::is_vectorizable::type>::type>::value && std::is_same::type>::type>::value>(*this, bexpr); return *this; } template template numpy_iexpr &numpy_iexpr::operator+=(E const &expr) { return update_(expr); } template numpy_iexpr &numpy_iexpr::operator+=(numpy_iexpr const &expr) { return update_(expr); } template template numpy_iexpr &numpy_iexpr::operator-=(E const &expr) { return update_(expr); } template numpy_iexpr &numpy_iexpr::operator-=(numpy_iexpr const &expr) { return update_(expr); } template template numpy_iexpr &numpy_iexpr::operator*=(E const &expr) { return update_(expr); } template numpy_iexpr &numpy_iexpr::operator*=(numpy_iexpr const &expr) { return update_(expr); } template template numpy_iexpr &numpy_iexpr::operator/=(E const &expr) { return update_(expr); } template numpy_iexpr &numpy_iexpr::operator/=(numpy_iexpr const &expr) { return update_(expr); } template template numpy_iexpr &numpy_iexpr::operator&=(E const &expr) { return update_(expr); } template numpy_iexpr &numpy_iexpr::operator&=(numpy_iexpr const &expr) { return update_(expr); } template template numpy_iexpr &numpy_iexpr::operator|=(E const &expr) { return update_(expr); } template numpy_iexpr &numpy_iexpr::operator|=(numpy_iexpr const &expr) { return update_(expr); } template template numpy_iexpr &numpy_iexpr::operator^=(E const &expr) { return update_(expr); } template numpy_iexpr &numpy_iexpr::operator^=(numpy_iexpr const &expr) { return update_(expr); } template typename numpy_iexpr::const_iterator numpy_iexpr::begin() const { return make_const_nditerator < is_strided || value != 1 > ()(*this, 0); } template typename numpy_iexpr::const_iterator numpy_iexpr::end() const { return make_const_nditerator < is_strided || value != 1 > ()(*this, size()); } template typename numpy_iexpr::iterator numpy_iexpr::begin() { return make_nditerator < is_strided || value != 1 > ()(*this, 0); } template typename numpy_iexpr::iterator numpy_iexpr::end() { return make_nditerator < is_strided || value != 1 > ()(*this, size()); } template typename numpy_iexpr::dtype const *numpy_iexpr::fbegin() const { return buffer; } template typename numpy_iexpr::dtype const *numpy_iexpr::fend() const { return buffer + flat_size(); } template typename numpy_iexpr::dtype *numpy_iexpr::fbegin() { return buffer; } template typename numpy_iexpr::dtype const *numpy_iexpr::fend() { return buffer + flat_size(); } template size_t compute_fast_offset(size_t offset, long mult, T0 const &indices, T1 const &shape, std::integral_constant) { return offset; } template size_t compute_fast_offset(size_t offset, long mult, T0 const &indices, T1 const &shape, std::integral_constant) { return compute_fast_offset(offset + std::get(indices) * mult, mult * shape.template shape(), indices, shape, std::integral_constant()); } template typename numpy_iexpr::dtype const & numpy_iexpr::fast(array const &indices) const { return buffer[compute_fast_offset( indices[value - 1], arg.template shape(), indices, arg, std::integral_constant())]; } template typename numpy_iexpr::dtype & numpy_iexpr::fast(array const &indices) { return const_cast( const_cast(*this).fast(indices)); } template template typename std::enable_if< is_numexpr_arg::value && std::is_same::value, numpy_vexpr, ndarray>>>::type numpy_iexpr::fast(F const &filter) const { long sz = filter.template shape<0>(); long *raw = (long *)malloc(sz * sizeof(long)); long n = 0; for (long i = 0; i < sz; ++i) if (filter.fast(i)) raw[n++] = i; // realloc(raw, n * sizeof(long)); long shp[1] = {n}; return this->fast( ndarray>(raw, shp, types::ownership::owned)); } #ifdef USE_XSIMD template template typename numpy_iexpr::simd_iterator numpy_iexpr::vbegin(vectorizer) const { return {buffer}; } template template typename numpy_iexpr::simd_iterator numpy_iexpr::vend(vectorizer) const { using vector_type = typename xsimd::simd_type; static const std::size_t vector_size = vector_type::size; return {buffer + long(size() / vector_size * vector_size)}; } #endif template auto numpy_iexpr::operator[](long i) const & -> decltype(this->fast(i)) { if (i < 0) i += size(); return fast(i); } template auto numpy_iexpr::operator[](long i) & -> decltype(this->fast(i)) { if (i < 0) i += size(); return fast(i); } template auto numpy_iexpr::operator[](long i) && -> decltype(std::move(*this).fast(i)) { if (i < 0) i += size(); return std::move(*this).fast(i); } template template typename std::enable_if::value, numpy_gexpr, normalize_t>>::type numpy_iexpr:: operator[](Sp const &s0) const { return make_gexpr(*this, s0); } template template typename std::enable_if< is_slice::value, numpy_gexpr, normalize_t, normalize_t...>>::type numpy_iexpr:: operator()(Sp const &s0, S const &... s) const { return make_gexpr(*this, s0, s...); } template template typename std::enable_if< is_numexpr_arg::value && std::is_same::value, numpy_vexpr, ndarray>>>::type numpy_iexpr:: operator[](F const &filter) const { return fast(filter); } template size_t compute_offset(size_t offset, long mult, T0 const &indices, T1 const &shape, std::integral_constant) { return offset; } template size_t compute_offset(size_t offset, long mult, T0 const &indices, T1 const &shape, std::integral_constant) { return compute_offset( offset + (std::get(indices) < 0 ? std::get(indices) + shape.template shape() : std::get(indices)) * mult, mult * shape.template shape(), indices, shape, std::integral_constant()); } template typename numpy_iexpr::dtype const &numpy_iexpr:: operator[](array const &indices) const { return buffer[compute_offset(indices[value - 1] < 0 ? indices[value - 1] + arg.template shape() : indices[value - 1], arg.template shape(), indices, arg, std::integral_constant())]; } template typename numpy_iexpr::dtype &numpy_iexpr:: operator[](array const &indices) { return const_cast(const_cast(*this)[indices]); } template numpy_iexpr::operator bool() const { if (sutils::any_of(*this, [](long n) { return n != 1; })) throw ValueError("The truth value of an array with more than one element " "is ambiguous. Use a.any() or a.all()"); return *buffer; } template long prod_helper(S const &shape, utils::index_sequence) { long res = 1; (void)std::initializer_list{ (res *= (long)(shape.template shape()))...}; return res; } template long numpy_iexpr::flat_size() const { return prod_helper(*this, utils::make_index_sequence()); } template long numpy_iexpr::buffer_offset(Arg const &arg, long index, utils::int_<0>) { return index; } template template long numpy_iexpr::buffer_offset(ndarray const &arg, long index, utils::int_) { return index * arg.template strides<0>(); } template template long numpy_iexpr::buffer_offset(E const &arg, long index, utils::int_) { return index * arg.template strides<0>(); } template template numpy_iexpr numpy_iexpr_helper::get(T &&e, long i) { return {std::forward(e), i}; } template typename T::dtype &numpy_iexpr_helper<1>::get(T const &e, long i) { return e.buffer[i * e.template strides()]; } template typename T::dtype &numpy_iexpr_helper<1>::get(T &&e, long i) { return e.buffer[i * e.template strides()]; } template typename T::dtype &numpy_iexpr_helper<1>::get(T &e, long i) { return e.buffer[i * e.template strides()]; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/types/numpy_nary_expr.hpp000066400000000000000000000022021416264035500244010ustar00rootroot00000000000000#ifndef NUMPY_NARY_FUNC_NAME #error NUMPY_NARY_FUNC_NAME undefined #endif #ifndef NUMPY_NARY_FUNC_SYM #error NUMPY_NARY_FUNC_SYM undefined #endif #ifndef NUMPY_NARY_RESHAPE_MODE #define NUMPY_NARY_RESHAPE_MODE adapt_type #endif #ifndef NUMPY_NARY_EXTRA_METHOD #define NUMPY_NARY_EXTRA_METHOD #endif namespace functor { template auto NUMPY_NARY_FUNC_NAME:: operator()(T &&... args) const -> typename std::enable_if< !types::valid_numexpr_parameters::type...>::value, decltype(NUMPY_NARY_FUNC_SYM(std::forward(args)...))>::type { return NUMPY_NARY_FUNC_SYM(std::forward(args)...); } template typename std::enable_if< types::valid_numexpr_parameters::type...>::value, types::numpy_expr< NUMPY_NARY_FUNC_NAME, typename types::NUMPY_NARY_RESHAPE_MODE::type...>>::type NUMPY_NARY_FUNC_NAME:: operator()(E &&... args) const { return {std::forward(args)...}; } } #undef NUMPY_NARY_FUNC_NAME #undef NUMPY_NARY_FUNC_SYM #undef NUMPY_NARY_RESHAPE_MODE #undef NUMPY_NARY_EXTRA_METHOD pythran-0.10.0+ds2/pythran/pythonic/types/numpy_op_helper.hpp000066400000000000000000000003031416264035500243470ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_NUMPY_OP_HELPER_HPP #define PYTHONIC_TYPES_NUMPY_OP_HELPER_HPP #include "pythonic/include/types/numpy_op_helper.hpp" #include "pythonic/types/numpy_broadcast.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/types/numpy_operators.hpp000066400000000000000000000075771416264035500244340ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_NUMPY_OPERATORS_HPP #define PYTHONIC_TYPES_NUMPY_OPERATORS_HPP #include "pythonic/include/types/numpy_operators.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/operator_/add.hpp" #include "pythonic/operator_/and_.hpp" #include "pythonic/operator_/or_.hpp" #include "pythonic/operator_/xor_.hpp" #include "pythonic/operator_/div.hpp" #include "pythonic/operator_/eq.hpp" #include "pythonic/operator_/gt.hpp" #include "pythonic/operator_/ge.hpp" #include "pythonic/operator_/lshift.hpp" #include "pythonic/operator_/lt.hpp" #include "pythonic/operator_/le.hpp" #include "pythonic/operator_/mul.hpp" #include "pythonic/operator_/neg.hpp" #include "pythonic/operator_/not_.hpp" #include "pythonic/operator_/ne.hpp" #include "pythonic/operator_/pos.hpp" #include "pythonic/operator_/rshift.hpp" #include "pythonic/operator_/sub.hpp" #include "pythonic/numpy/mod.hpp" #include "pythonic/numpy/bitwise_not.hpp" #include "pythonic/types/numpy_op_helper.hpp" PYTHONIC_NS_BEGIN /* operators must live in the same namespace as the associated type */ namespace types { #define NUMPY_BINARY_FUNC_NAME operator+ #define NUMPY_BINARY_FUNC_SYM operator_::functor::add #include "pythonic/types/numpy_binary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator& #define NUMPY_BINARY_FUNC_SYM operator_::functor::and_ #include "pythonic/types/numpy_binary_op.hpp" #define NUMPY_UNARY_FUNC_NAME operator~ #define NUMPY_UNARY_FUNC_SYM numpy::functor::bitwise_not #include "pythonic/types/numpy_unary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator| #define NUMPY_BINARY_FUNC_SYM operator_::functor::or_ #include "pythonic/types/numpy_binary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator^ #define NUMPY_BINARY_FUNC_SYM operator_::functor::xor_ #include "pythonic/types/numpy_binary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator/ #define NUMPY_BINARY_FUNC_SYM operator_::functor::div #include "pythonic/types/numpy_binary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator== #define NUMPY_BINARY_FUNC_SYM operator_::functor::eq #include "pythonic/types/numpy_binary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator% #define NUMPY_BINARY_FUNC_SYM numpy::functor::mod #include "pythonic/types/numpy_binary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator> #define NUMPY_BINARY_FUNC_SYM operator_::functor::gt #include "pythonic/types/numpy_binary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator>= #define NUMPY_BINARY_FUNC_SYM operator_::functor::ge #include "pythonic/types/numpy_binary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator<< #define NUMPY_BINARY_FUNC_SYM operator_::functor::lshift #include "pythonic/types/numpy_binary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator< #define NUMPY_BINARY_FUNC_SYM operator_::functor::lt #include "pythonic/types/numpy_binary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator<= #define NUMPY_BINARY_FUNC_SYM operator_::functor::le #include "pythonic/types/numpy_binary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator* #define NUMPY_BINARY_FUNC_SYM operator_::functor::mul #include "pythonic/types/numpy_binary_op.hpp" #define NUMPY_UNARY_FUNC_NAME operator- #define NUMPY_UNARY_FUNC_SYM operator_::functor::neg #include "pythonic/types/numpy_unary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator!= #define NUMPY_BINARY_FUNC_SYM operator_::functor::ne #include "pythonic/types/numpy_binary_op.hpp" #define NUMPY_UNARY_FUNC_NAME operator+ #define NUMPY_UNARY_FUNC_SYM operator_::functor::pos #include "pythonic/types/numpy_unary_op.hpp" #define NUMPY_UNARY_FUNC_NAME operator! #define NUMPY_UNARY_FUNC_SYM operator_::functor::not_ #include "pythonic/types/numpy_unary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator>> #define NUMPY_BINARY_FUNC_SYM operator_::functor::rshift #include "pythonic/types/numpy_binary_op.hpp" #define NUMPY_BINARY_FUNC_NAME operator- #define NUMPY_BINARY_FUNC_SYM operator_::functor::sub #include "pythonic/types/numpy_binary_op.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/types/numpy_texpr.hpp000066400000000000000000000256531416264035500235530ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_NUMPY_TEXPR_HPP #define PYTHONIC_TYPES_NUMPY_TEXPR_HPP #include "pythonic/include/types/numpy_texpr.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/numpy/array.hpp" #include "pythonic/numpy/transpose.hpp" #include "pythonic/operator_/iadd.hpp" #include "pythonic/operator_/iand.hpp" #include "pythonic/operator_/idiv.hpp" #include "pythonic/operator_/imul.hpp" #include "pythonic/operator_/ior.hpp" #include "pythonic/operator_/ixor.hpp" #include "pythonic/operator_/isub.hpp" PYTHONIC_NS_BEGIN namespace types { template numpy_texpr_2::numpy_texpr_2() { } template numpy_texpr_2::numpy_texpr_2(Arg const &arg) : arg(arg) { } template typename numpy_texpr_2::const_iterator numpy_texpr_2::begin() const { return {*this, 0}; } template typename numpy_texpr_2::const_iterator numpy_texpr_2::end() const { return {*this, size()}; } template typename numpy_texpr_2::iterator numpy_texpr_2::begin() { return {*this, 0}; } template typename numpy_texpr_2::iterator numpy_texpr_2::end() { return {*this, size()}; } template auto numpy_texpr_2::fast(long i) const -> decltype(this->arg(fast_contiguous_slice(pythonic::builtins::None, pythonic::builtins::None), i)) { return arg( contiguous_slice(pythonic::builtins::None, pythonic::builtins::None), i); } template auto numpy_texpr_2::fast(long i) -> decltype(this->arg(fast_contiguous_slice(pythonic::builtins::None, pythonic::builtins::None), i)) { return arg( contiguous_slice(pythonic::builtins::None, pythonic::builtins::None), i); } #ifdef USE_XSIMD template template typename numpy_texpr_2::simd_iterator numpy_texpr_2::vbegin(vectorizer) const { return {*this}; } template template typename numpy_texpr_2::simd_iterator numpy_texpr_2::vend(vectorizer) const { return {*this}; // ! vectorizable anyway } #endif template auto numpy_texpr_2::operator[](long i) const -> decltype(this->fast(i)) { if (i < 0) i += size(); return fast(i); } template auto numpy_texpr_2::operator[](long i) -> decltype(this->fast(i)) { if (i < 0) i += size(); return fast(i); } template template auto numpy_texpr_2:: operator[](S const &s0) const -> numpy_texprarg( fast_contiguous_slice(pythonic::builtins::None, pythonic::builtins::None), (s0.step, s0)))> { return {arg(fast_contiguous_slice(pythonic::builtins::None, pythonic::builtins::None), s0)}; } template template auto numpy_texpr_2:: operator[](S const &s0) -> numpy_texprarg( fast_contiguous_slice(pythonic::builtins::None, pythonic::builtins::None), (s0.step, s0)))> { return {arg(fast_contiguous_slice(pythonic::builtins::None, pythonic::builtins::None), s0)}; } /* element filtering */ template template // indexing through an array of boolean -- a mask typename std::enable_if< is_numexpr_arg::value && std::is_same::value && F::value == 1 && !is_pod_array::value, numpy_vexpr, ndarray>>>::type numpy_texpr_2::fast(F const &filter) const { long sz = filter.template shape<0>(); long *raw = (long *)malloc(sz * sizeof(long)); long n = 0; for (long i = 0; i < sz; ++i) if (filter.fast(i)) raw[n++] = i; // realloc(raw, n * sizeof(long)); return this->fast(ndarray>(raw, pshape(n), types::ownership::owned)); } template template // indexing through an array of boolean -- a mask typename std::enable_if< is_numexpr_arg::value && std::is_same::value && F::value != 1 && !is_pod_array::value, numpy_vexpr::dtype, pshape>, ndarray>>>::type numpy_texpr_2::fast(F const &filter) const { return numpy::functor::array{}(*this) .flat()[ndarray(filter).flat()]; } template template // indexing through an array of boolean -- a mask typename std::enable_if< is_numexpr_arg::value && std::is_same::value && F::value == 1 && !is_pod_array::value, numpy_vexpr, ndarray>>>::type numpy_texpr_2:: operator[](F const &filter) const { return fast(filter); } template template // indexing through an array of boolean -- a mask typename std::enable_if< is_numexpr_arg::value && std::is_same::value && F::value != 1 && !is_pod_array::value, numpy_vexpr::dtype, pshape>, ndarray>>>::type numpy_texpr_2:: operator[](F const &filter) const { return fast(filter); } template template // indexing through an array of indices -- a view typename std::enable_if< is_numexpr_arg::value && !std::is_same::value && !is_pod_array::value, numpy_vexpr, ndarray>>>::type numpy_texpr_2:: operator[](F const &filter) const { static_assert(F::value == 1, "advanced indexing only supporint with 1D index"); return {*this, filter}; } template template // indexing through an array of indices -- a view typename std::enable_if< is_numexpr_arg::value && !std::is_same::value && !is_pod_array::value, numpy_vexpr, ndarray>>>::type numpy_texpr_2::fast(F const &filter) const { static_assert(F::value == 1, "advanced indexing only supported with 1D index"); return {*this, filter}; } template template auto numpy_texpr_2::operator()(S0 const &s0, S const &... s) const -> typename std::enable_if< !is_numexpr_arg::value, decltype(this->_reverse_index( std::tuple{s0, s...}, utils::make_reversed_index_sequence<1 + sizeof...(S)>()))>::type { return _reverse_index( std::tuple{s0, s...}, utils::make_reversed_index_sequence<1 + sizeof...(S)>()); } template template auto numpy_texpr_2::operator()(S0 const &s0, S const &... s) const -> typename std::enable_if::value, decltype(this->copy()(s0, s...))>::type { return copy()(s0, s...); } template numpy_texpr_2::operator bool() const { return (bool)arg; } template long numpy_texpr_2::flat_size() const { return arg.flat_size(); } template intptr_t numpy_texpr_2::id() const { return arg.id(); } template template numpy_texpr_2 &numpy_texpr_2::operator=(Expr const &expr) { return utils::broadcast_copy < numpy_texpr_2 &, Expr, value, value - utils::dim_of::value, is_vectorizable && std::is_same::type>::value && types::is_vectorizable::value > (*this, expr); } template template numpy_texpr_2 &numpy_texpr_2:: operator=(numpy_texpr const &expr) { arg = expr.arg; return *this; } template template numpy_texpr_2 &numpy_texpr_2::update_(Expr const &expr) { using BExpr = typename std::conditional::value, broadcast, Expr const &>::type; BExpr bexpr = expr; utils::broadcast_update< Op, numpy_texpr_2 &, BExpr, value, value - (std::is_scalar::value + utils::dim_of::value), is_vectorizable && types::is_vectorizable::type>::type>::value && std::is_same::type>::type>::value>(*this, bexpr); return *this; } template template numpy_texpr_2 &numpy_texpr_2::operator+=(Expr const &expr) { return update_(expr); } template template numpy_texpr_2 &numpy_texpr_2::operator-=(E const &expr) { return update_(expr); } template template numpy_texpr_2 &numpy_texpr_2::operator*=(E const &expr) { return update_(expr); } template template numpy_texpr_2 &numpy_texpr_2::operator/=(E const &expr) { return update_(expr); } template template numpy_texpr_2 &numpy_texpr_2::operator&=(E const &expr) { return update_(expr); } template template numpy_texpr_2 &numpy_texpr_2::operator|=(E const &expr) { return update_(expr); } template template numpy_texpr_2 &numpy_texpr_2::operator^=(E const &expr) { return update_(expr); } // only implemented for N = 2 template numpy_texpr>>::numpy_texpr( ndarray> const &arg) : numpy_texpr_2>>{arg} { } template numpy_texpr>>::numpy_texpr( ndarray> const &arg) : numpy_texpr_2>>{arg} { } template numpy_texpr>::numpy_texpr( numpy_gexpr const &arg) : numpy_texpr_2>{arg} { } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/types/numpy_unary_op.hpp000066400000000000000000000007011416264035500242300ustar00rootroot00000000000000#ifndef NUMPY_UNARY_FUNC_NAME #error NUMPY_UNARY_FUNC_NAME undefined #endif #ifndef NUMPY_UNARY_FUNC_SYM #error NUMPY_UNARY_FUNC_SYM undefined #endif template typename std::enable_if< types::valid_numop_parameters::type>::value, types::numpy_expr>::type NUMPY_UNARY_FUNC_NAME(E &&self) { return {std::forward(self)}; } #undef NUMPY_UNARY_FUNC_NAME #undef NUMPY_UNARY_FUNC_SYM pythran-0.10.0+ds2/pythran/pythonic/types/numpy_vexpr.hpp000066400000000000000000000151641416264035500235510ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_NUMPY_VEXPR_HPP #define PYTHONIC_TYPES_NUMPY_VEXPR_HPP PYTHONIC_NS_BEGIN namespace types { template template typename std::enable_if::value, numpy_vexpr &>::type numpy_vexpr:: operator=(E const &expr) { // TODO: avoid the tmp copy when no aliasing typename assignable::type tmp{expr}; for (long i = 0, n = tmp.template shape<0>(); i < n; ++i) (*this).fast(i) = tmp.fast(i); return *this; } template template typename std::enable_if::value, numpy_vexpr &>::type numpy_vexpr:: operator=(E const &expr) { for (long i = 0, n = shape<0>(); i < n; ++i) (*this).fast(i) = expr; return *this; } template numpy_vexpr &numpy_vexpr::operator=(numpy_vexpr const &expr) { // TODO: avoid the tmp copy when no aliasing typename assignable>::type tmp{expr}; for (long i = 0, n = tmp.template shape<0>(); i < n; ++i) (*this).fast(i) = tmp.fast(i); return *this; } template typename numpy_vexpr::iterator numpy_vexpr::begin() { return {*this, 0}; } template typename numpy_vexpr::iterator numpy_vexpr::end() { return {*this, shape<0>()}; } template typename numpy_vexpr::const_iterator numpy_vexpr::begin() const { return {*this, 0}; } template typename numpy_vexpr::const_iterator numpy_vexpr::end() const { return {*this, shape<0>()}; } template template auto numpy_vexpr::operator()(S const &... slices) const -> decltype(ndarray>{*this}(slices...)) { return ndarray>{*this}(slices...); } #ifdef USE_XSIMD template template typename numpy_vexpr::simd_iterator numpy_vexpr::vbegin(vectorizer) const { return {*this, 0}; } template template typename numpy_vexpr::simd_iterator numpy_vexpr::vend(vectorizer) const { return {*this, 0}; } #endif /* element filtering */ template template // indexing through an array of boolean -- a mask typename std::enable_if< is_numexpr_arg::value && std::is_same::value && !is_pod_array::value, numpy_vexpr, ndarray>>>::type numpy_vexpr::fast(E const &filter) const { long sz = filter.template shape<0>(); long *raw = (long *)malloc(sz * sizeof(long)); long n = 0; for (long i = 0; i < sz; ++i) if (filter.fast(i)) raw[n++] = i; // realloc(raw, n * sizeof(long)); long shp[1] = {n}; return this->fast( ndarray>(raw, shp, types::ownership::owned)); } template template // indexing through an array of boolean -- a mask typename std::enable_if< !is_slice::value && is_numexpr_arg::value && std::is_same::value && !is_pod_array::value, numpy_vexpr, ndarray>>>::type numpy_vexpr:: operator[](E const &filter) const { return fast(filter); } template template // indexing through an array of indices -- a view typename std::enable_if::value && !is_array_index::value && !std::is_same::value && !is_pod_array::value, numpy_vexpr, E>>::type numpy_vexpr:: operator[](E const &filter) const { return {*this, filter}; } template template // indexing through an array of indices -- a view typename std::enable_if::value && !is_array_index::value && !std::is_same::value && !is_pod_array::value, numpy_vexpr, E>>::type numpy_vexpr::fast(E const &filter) const { return (*this)[filter]; } template template numpy_vexpr &numpy_vexpr::update_(Expr const &expr) { using BExpr = typename std::conditional::value, broadcast, Expr const &>::type; BExpr bexpr = expr; utils::broadcast_update< Op, numpy_vexpr &, BExpr, value, value - (std::is_scalar::value + utils::dim_of::value), is_vectorizable && types::is_vectorizable::type>::type>::value && std::is_same::type>::type>::value>(*this, bexpr); return *this; } template template numpy_vexpr &numpy_vexpr::operator+=(Expr const &expr) { return update_(expr); } template template numpy_vexpr &numpy_vexpr::operator-=(Expr const &expr) { return update_(expr); } template template numpy_vexpr &numpy_vexpr::operator*=(Expr const &expr) { return update_(expr); } template template numpy_vexpr &numpy_vexpr::operator/=(Expr const &expr) { return update_(expr); } template template numpy_vexpr &numpy_vexpr::operator&=(Expr const &expr) { return update_(expr); } template template numpy_vexpr &numpy_vexpr::operator|=(Expr const &expr) { return update_(expr); } template template numpy_vexpr &numpy_vexpr::operator^=(Expr const &expr) { return update_(expr); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/types/pointer.hpp000066400000000000000000000027551416264035500226370ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_POINTER_HPP #define PYTHONIC_TYPES_POINTER_HPP #include "pythonic/include/types/pointer.hpp" PYTHONIC_NS_BEGIN namespace types { template typename pointer::reference pointer::operator[](long i) { return data[i]; } template typename pointer::value_type pointer::operator[](long i) const { return data[i]; } template typename pointer::reference pointer::fast(long i) { return data[i]; } template typename pointer::value_type pointer::fast(long i) const { return data[i]; } } PYTHONIC_NS_END namespace std { template typename pythonic::types::pointer::reference get(pythonic::types::pointer &t) { return t[I]; } template typename pythonic::types::pointer::value_type get(pythonic::types::pointer const &t) { return t[I]; } template typename pythonic::types::pointer::value_type get(pythonic::types::pointer &&t) { return t[I]; } } #ifdef ENABLE_PYTHON_MODULE PYTHONIC_NS_BEGIN template PyObject *to_python>::convert(types::pointer const &v) { return nullptr; } template bool from_python>::is_convertible(PyObject *obj) { return false; } template types::pointer from_python>::convert(PyObject *obj) { return {nullptr}; } PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/types/raw_array.hpp000066400000000000000000000022721416264035500231400ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_RAW_ARRAY_HPP #define PYTHONIC_TYPES_RAW_ARRAY_HPP #include "pythonic/include/types/raw_array.hpp" #include "pythonic/builtins/MemoryError.hpp" #include #include PYTHONIC_NS_BEGIN namespace types { /* Wrapper class to store an array pointer * * for internal use only, meant to be stored in a shared_ptr */ template raw_array::raw_array() : data(nullptr), external(false) { } template raw_array::raw_array(size_t n) : data((T *)malloc(n * sizeof(T))), external(false) { if (!data) { std::ostringstream oss; oss << "unable to allocate " << n << " bytes"; throw types::MemoryError(oss.str()); } } template raw_array::raw_array(T *d, ownership o) : data(d), external(o == ownership::external) { } template raw_array::raw_array(raw_array &&d) : data(d.data), external(d.external) { d.data = nullptr; } template raw_array::~raw_array() { if (data && !external) free(data); } template void raw_array::forget() { external = true; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/types/set.hpp000066400000000000000000000305751416264035500217530ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_SET_HPP #define PYTHONIC_TYPES_SET_HPP #include "pythonic/include/types/set.hpp" #include "pythonic/types/assignable.hpp" #include "pythonic/types/empty_iterator.hpp" #include "pythonic/types/list.hpp" #include "pythonic/utils/iterator.hpp" #include "pythonic/utils/reserve.hpp" #include "pythonic/utils/shared_ref.hpp" #include "pythonic/builtins/in.hpp" #include #include #include #include #include #include PYTHONIC_NS_BEGIN namespace types { /// set implementation // constructors template set::set() : data(utils::no_memory()) { } template template set::set(InputIterator start, InputIterator stop) : data() { std::copy(start, stop, std::back_inserter(*this)); } template set::set(empty_set const &) : data() { } template set::set(T const &value, single_value) : data() { data->insert(value); } template set::set(std::initializer_list l) : data(std::move(l)) { } template set::set(set const &other) : data(other.data) { } template template set::set(set const &other) : data() { std::copy(other.begin(), other.end(), std::inserter(*data, data->begin())); } // iterators template typename set::iterator set::begin() { return data->begin(); } template typename set::const_iterator set::begin() const { return data->begin(); } template typename set::iterator set::end() { return data->end(); } template typename set::const_iterator set::end() const { return data->end(); } template typename set::reverse_iterator set::rbegin() { return data->rbegin(); } template typename set::const_reverse_iterator set::rbegin() const { return data->rbegin(); } template typename set::reverse_iterator set::rend() { return data->rend(); } template typename set::const_reverse_iterator set::rend() const { return data->rend(); } // modifiers template T set::pop() { if (size() <= 0) throw std::out_of_range("Trying to pop() an empty set."); T tmp = *begin(); data->erase(begin()); return tmp; } template void set::add(const T &x) { data->insert(x); } template void set::push_back(const T &x) { data->insert(x); } template void set::clear() { data->clear(); } template template void set::discard(U const &elem) { // Remove element elem from the set if it is present. data->erase(elem); } template template void set::remove(U const &elem) { // Remove element elem from the set. Raises KeyError if elem is ! // contained in the set. if (!data->erase(elem)) throw std::runtime_error( "set.delete() : couldn't delete element ! in the set."); } // set interface template set::operator bool() const { return !data->empty(); } template long set::size() const { return data->size(); } // Misc template set set::copy() const { return set(begin(), end()); } template template bool set::isdisjoint(U const &other) const { // Return true if the this has no elements in common with other. for (iterator it = begin(); it != end(); ++it) { if (in(other, *it)) return false; } return true; } template template bool set::issubset(U const &other) const { // Test whether every element in the set is in other. for (iterator it = begin(); it != end(); ++it) { if (!in(other, *it)) return false; } return true; } template template bool set::issuperset(U const &other) const { // Test whether every element in other is in the set. return other.issubset(*this); } template set set::union_() const { return set(begin(), end()); } template template typename __combined, U, Types...>::type set::union_(U &&other, Types &&... others) const { typename __combined, U, Types...>::type tmp = union_(std::forward(others)...); tmp.data->insert(other.begin(), other.end()); return tmp; } template template none_type set::update(Types &&... others) { *this = union_(std::forward(others)...); return {}; } template set set::intersection() const { return set(begin(), end()); } template template typename __combined, U, Types...>::type set::intersection(U const &other, Types const &... others) const { // Return a new set with elements common to the set && all others. typename __combined, U, Types...>::type tmp = intersection(others...); for (auto it = tmp.begin(); it != tmp.end(); ++it) { if (!in(other, *it)) tmp.discard( *it); // faster than remove() but ! direct interaction with data } return tmp; } template template void set::intersection_update(Types const &... others) { *this = intersection(others...); } template set set::difference() const { return set(begin(), end()); } template template set set::difference(U const &other, Types const &... others) const { // Return a new set with elements in the set that are ! in the others. set tmp = difference(others...); /* for(iterator it=tmp.begin(); it!=tmp.end();++it){ if(other.get_data().find(*it)!=other.end()) tmp.discard(*it); } */ // This algo will do several times the same find(), because // std::set::erase() calls find. Lame! for (typename U::const_iterator it = other.begin(); it != other.end(); ++it) { tmp.discard(*it); } return tmp; } template template bool set::contains(V const &v) const { return data->find(v) != data->end(); } template template void set::difference_update(Types const &... others) { *this = difference(others...); } template template set::type> set::symmetric_difference(set const &other) const { // Return a new set with elements in either the set || other but ! both. // return ((*this-other) | (other-*this)); // We must use fcts && ! operators because fcts have to handle any // iterable objects && operators only sets (cf python ref) return (this->difference(other)).union_(other.difference(*this)); } template template typename __combined>::type set::symmetric_difference(U const &other) const { // Return a new set with elements in either the set || other but ! both. set::value_type> tmp( other.begin(), other.end()); // We must use fcts && ! operators because fcts have to handle any // iterable objects && operators only sets (cf python ref) return (this->difference(other)).union_(tmp.difference(*this)); } template template void set::symmetric_difference_update(U const &other) { *this = symmetric_difference(other); } // Operators template template bool set::operator==(set const &other) const { return *data == *other.data; } template template bool set::operator<=(set const &other) const { // Every element in *this is in other return issubset(other); } template template bool set::operator<(set const &other) const { // Every element in this is in other && this != other return (*this <= other) && (this->size() != other.size()); } template template bool set::operator>=(set const &other) const { // Every element in other is in set return other <= *this; } template template bool set::operator>(set const &other) const { // Every element in other is in set && this != other return other < *this; } template template set::type> set:: operator|(set const &other) const { return union_(other); } template template void set::operator|=(set const &other) { update(other); } template template set::type> set:: operator&(set const &other) const { return intersection(other); } template template void set::operator&=(set const &other) { return intersection_update(other); } template template set set::operator-(set const &other) const { return difference(other); } template template void set::operator-=(set const &other) { return difference_update(other); } template template set::type> set:: operator^(set const &other) const { return symmetric_difference(other); } template template void set::operator^=(set const &other) { return symmetric_difference_update(other); } template intptr_t set::id() const { return reinterpret_cast(&(*data)); } template std::ostream &operator<<(std::ostream &os, set const &v) { if (v.size() == 0) { return os << "set()"; } os << "{"; const char *commaSeparator = ""; for (const auto &e : v) { os << commaSeparator << e; commaSeparator = ", "; } return os << "}"; } /// empty_set implementation empty_set empty_set::operator|(empty_set const &) { return empty_set(); } template set empty_set::operator|(set const &s) { return s; } template U empty_set::operator&(U const &s) { return {}; } template U empty_set::operator-(U const &s) { return {}; } empty_set empty_set::operator^(empty_set const &) { return empty_set(); } template set empty_set::operator^(set const &s) { return s; } template none_type empty_set::update(Types &&...) { return {}; } empty_set::operator bool() { return false; } empty_set::iterator empty_set::begin() const { return empty_iterator(); } empty_set::iterator empty_set::end() const { return empty_iterator(); } template bool empty_set::contains(V const &) const { return false; } } PYTHONIC_NS_END #ifdef ENABLE_PYTHON_MODULE PYTHONIC_NS_BEGIN template PyObject *to_python>::convert(types::set const &v) { PyObject *obj = PySet_New(nullptr); for (auto const &e : v) PySet_Add(obj, ::to_python(e)); return obj; } PyObject *to_python::convert(types::empty_set) { return PySet_New(nullptr); } template bool from_python>::is_convertible(PyObject *obj) { if (PySet_Check(obj)) { PyObject *iterator = PyObject_GetIter(obj); if (PyObject *item = PyIter_Next(iterator)) { bool res = ::is_convertible(item); Py_DECREF(item); Py_DECREF(iterator); return res; } else { Py_DECREF(iterator); return true; } } return false; } template types::set from_python>::convert(PyObject *obj) { types::set v = types::empty_set(); // may be useful to reserve more space ? PyObject *iterator = PyObject_GetIter(obj); while (PyObject *item = PyIter_Next(iterator)) { v.add(::from_python(item)); Py_DECREF(item); } Py_DECREF(iterator); return v; } PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/types/slice.hpp000066400000000000000000000430721416264035500222530ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_SLICE_HPP #define PYTHONIC_TYPES_SLICE_HPP #include "pythonic/include/types/slice.hpp" #include "pythonic/types/NoneType.hpp" #include "pythonic/builtins/None.hpp" #include #include #include #include PYTHONIC_NS_BEGIN namespace types { struct contiguous_slice; namespace details { long roundup_divide(long a, long b) { if (b > 0) return (a + b - 1) / b; else return (a + b + 1) / b; } } normalized_slice::normalized_slice() { } normalized_slice::normalized_slice(long lower, long upper, long step) : lower(lower), upper(upper), step(step) { } normalized_slice normalized_slice:: operator*(normalized_slice const &other) const { return {lower + step * other.lower, lower + step * other.upper, step * other.step}; } normalized_slice normalized_slice:: operator*(contiguous_normalized_slice const &other) const { return {lower + step * other.lower, lower + step * other.upper, step * other.step}; } normalized_slice normalized_slice::operator*(slice const &other) const { return (*this) * other.normalize(size()); } normalized_slice normalized_slice:: operator*(contiguous_slice const &other) const { return (*this) * other.normalize(size()); } long normalized_slice::size() const { return std::max(0L, details::roundup_divide(upper - lower, step)); } long normalized_slice::get(long i) const { return lower + i * step; } slice::slice(none lower, none upper, none step) : lower(lower), upper(upper), step(step) { } slice::slice() : lower(builtins::None), upper(builtins::None), step(builtins::None) { } slice slice::operator*(slice const &other) const { // We do not implement these because it requires to know the "end" // value of the slice which is ! possible if it is ! "step == 1" slice // TODO: We can skip these constraints if we know begin, end && step. long sstep = (step.is_none()) ? 1 : (long)step; long ostep = (other.step.is_none()) ? 1 : (long)other.step; assert(!((ostep < 0 || static_cast(other.upper) < 0 || static_cast(other.lower) < 0) && sstep != 1 && sstep != -1) && "not implemented"); bound new_lower; if (other.lower.is_none() || (long)other.lower == 0) { if (ostep > 0) new_lower = lower; else if (sstep > 0) { if (upper.is_none() || (long)upper == 0) // 0 means the first value && ! the last value new_lower = none_type{}; else new_lower = (long)upper - 1; } else { if (upper.is_none() || (long)upper == -1) new_lower = none_type{}; else new_lower = (long)upper + 1; } } else { none ref = ((long)other.lower > 0) ? lower : upper; if (ref.is_none) { if (sstep > 0) new_lower = (long)other.lower * sstep; else new_lower = (long)other.lower * sstep - 1; } else new_lower = (long)ref + (long)other.lower * sstep; } long new_step = sstep * ostep; bound new_upper; if (other.upper.is_none()) { if (ostep > 0) new_upper = upper; else if (sstep > 0) { if (lower.is_none() || (long)lower == 0) new_upper = none_type{}; else new_upper = (long)lower - 1; } else { if (lower.is_none() || (long)lower == -1) // 0 means the first value && ! the last value new_upper = none_type{}; else new_upper = (long)lower + 1; } } else { none ref = ((long)other.upper > 0) ? lower : upper; if (ref.is_none) { if (sstep > 0) new_upper = (long)other.upper * sstep; else new_upper = (long)other.upper * sstep - 1; } else new_upper = (long)ref + (long)other.upper * sstep; } return {new_lower, new_upper, new_step}; } /* Normalize change a[:-1] to a[:len(a)-1] to have positif index. It also check for value bigger than len(a) to fit the size of the container */ normalized_slice slice::normalize(long max_size) const { long sstep = step.is_none() ? 1 : (long)step; long normalized_upper; if (upper.is_none()) { if (sstep > 0L) normalized_upper = max_size; else normalized_upper = -1L; } else { if (upper < 0L) normalized_upper = std::max(-1L, max_size + upper); else if (upper > max_size) normalized_upper = max_size; else normalized_upper = (long)upper; } long normalized_lower; if (lower.is_none() && sstep > 0L) normalized_lower = 0L; else if (lower.is_none() && sstep < 0L) normalized_lower = max_size - 1L; else if (lower < 0L) normalized_lower = std::max(0L, max_size + lower); else if (lower > max_size) normalized_lower = max_size; else normalized_lower = (long)lower; return {normalized_lower, normalized_upper, sstep}; } /* * An assert is raised when we can't compute the size without more * informations. */ long slice::size() const { long sstep = step.is_none() ? 1 : (long)step; assert(!(upper.is_none() && lower.is_none())); long len; #define SIGN(x) (((long)x >= 0l) ? 0 : 1) if (upper.is_none()) { assert(SIGN(sstep) != SIGN(lower)); len = -(long)lower; } else if (lower.is_none()) { assert(SIGN(sstep) == SIGN((long)upper)); len = upper; } else len = upper - lower; #undef SIGN return std::max(0L, details::roundup_divide(len, sstep)); } long slice::get(long i) const { long sstep = step.is_none() ? 1 : (long)step; assert(!upper.is_none() && !lower.is_none()); return (long)lower + i * sstep; } contiguous_normalized_slice::contiguous_normalized_slice() { } contiguous_normalized_slice::contiguous_normalized_slice(long lower, long upper) : lower(lower), upper(upper) { } contiguous_normalized_slice contiguous_normalized_slice:: operator*(contiguous_normalized_slice const &other) const { return contiguous_normalized_slice(lower + other.lower, lower + other.upper); } contiguous_normalized_slice contiguous_normalized_slice:: operator*(contiguous_slice const &other) const { return (*this) * other.normalize(size()); } contiguous_normalized_slice contiguous_normalized_slice:: operator*(fast_contiguous_slice const &other) const { return (*this) * other.normalize(size()); } normalized_slice contiguous_normalized_slice:: operator*(normalized_slice const &other) const { return normalized_slice(lower + step * other.lower, lower + step * other.upper, step * other.step); } normalized_slice contiguous_normalized_slice:: operator*(slice const &other) const { return (*this) * other.normalize(size()); } long contiguous_normalized_slice::size() const { return std::max(0L, upper - lower); } inline long contiguous_normalized_slice::get(long i) const { return lower + i; } contiguous_slice::contiguous_slice(none lower, none upper) : lower(lower.is_none ? 0 : (long)lower), upper(upper) { } contiguous_slice contiguous_slice:: operator*(contiguous_slice const &other) const { long new_lower; if (other.lower < 0) new_lower = upper + other.lower * step; else new_lower = lower + other.lower * step; bound new_upper; if (other.upper.is_none()) new_upper = upper; else if ((long)other.upper < 0) { if (upper.is_none()) new_upper = (long)other.upper * step; else new_upper = upper + (long)other.upper * step; } else new_upper = lower + (long)other.upper * step; return {new_lower, new_upper}; } slice contiguous_slice::operator*(slice const &other) const { none new_lower; if (other.lower.is_none() || (long)other.lower == 0) { if (other.step > 0) new_lower = lower; else if (upper.is_none() || (long)upper == 0) // 0 means the first value && ! the last value new_lower = none_type{}; else new_lower = (long)upper - 1; } else { if ((long)other.lower > 0) new_lower = lower + (long)other.lower * step; else if (upper.is_none()) new_lower = (long)other.lower * step; else new_lower = (long)upper + (long)other.lower * step; } long new_step = other.step; bound new_upper; if (other.upper.is_none()) { if (other.step > 0) new_upper = upper; else if ((long)lower == 0) new_upper = none_type{}; else new_upper = (long)lower - 1; } else { if ((long)other.upper > 0) new_upper = lower + (long)other.upper * step; else if (upper.is_none()) new_upper = (long)other.upper * step; else new_upper = (long)upper + (long)other.upper * step; } return {new_lower, new_upper, new_step}; } /* Normalize change a[:-1] to a[:len(a)-1] to have positif index. It also check for value bigger than len(a) to fit the size of the container */ contiguous_normalized_slice contiguous_slice::normalize(long max_size) const { long normalized_upper; if (upper.is_none()) normalized_upper = max_size; else if (upper < 0L) normalized_upper = std::max(-1L, max_size + upper); else if (upper > max_size) normalized_upper = max_size; else normalized_upper = (long)upper; long normalized_lower; if (lower < 0L) normalized_lower = std::max(0L, max_size + lower); else if (lower > max_size) normalized_lower = max_size; else normalized_lower = (long)lower; return contiguous_normalized_slice(normalized_lower, normalized_upper); } long contiguous_slice::size() const { long len; if (upper.is_none()) { assert(lower < 0); len = -lower; } else len = upper - lower; return std::max(0L, len); } long contiguous_slice::get(long i) const { return int(lower) + i; } fast_contiguous_slice::fast_contiguous_slice(none lower, none upper) : lower(lower.is_none ? 0 : (long)lower), upper(upper) { } fast_contiguous_slice fast_contiguous_slice:: operator*(fast_contiguous_slice const &other) const { long new_lower = lower + other.lower * step; bound new_upper; if (other.upper.is_none()) new_upper = upper; else new_upper = lower + (long)other.upper * step; return {new_lower, new_upper}; } contiguous_slice fast_contiguous_slice:: operator*(contiguous_slice const &other) const { long new_lower; if (other.lower < 0) new_lower = upper + other.lower * step; else new_lower = lower + other.lower * step; bound new_upper; if (other.upper.is_none()) new_upper = upper; else if ((long)other.upper < 0) { if (upper.is_none()) new_upper = (long)other.upper * step; else new_upper = upper + (long)other.upper * step; } else new_upper = lower + (long)other.upper * step; return {new_lower, new_upper}; } slice fast_contiguous_slice::operator*(slice const &other) const { none new_lower; if (other.lower.is_none() || (long)other.lower == 0) { if (other.step > 0) new_lower = lower; else if (upper.is_none() || (long)upper == 0) // 0 means the first value && ! the last value new_lower = none_type{}; else new_lower = (long)upper - 1; } else { if ((long)other.lower > 0) new_lower = lower + (long)other.lower * step; else if (upper.is_none()) new_lower = (long)other.lower * step; else new_lower = (long)upper + (long)other.lower * step; } long new_step = other.step; bound new_upper; if (other.upper.is_none()) { if (other.step > 0) new_upper = upper; else if ((long)lower == 0) new_upper = none_type{}; else new_upper = (long)lower - 1; } else { if ((long)other.upper > 0) new_upper = lower + (long)other.upper * step; else if (upper.is_none()) new_upper = (long)other.upper * step; else new_upper = (long)upper + (long)other.upper * step; } return {new_lower, new_upper, new_step}; } /* Normalize change a[:-1] to a[:len(a)-1] to have positif index. It also check for value bigger than len(a) to fit the size of the container */ contiguous_normalized_slice fast_contiguous_slice::normalize(long max_size) const { long normalized_upper; if (upper.is_none()) normalized_upper = max_size; else if (upper > max_size) normalized_upper = max_size; else normalized_upper = (long)upper; long normalized_lower; if (lower > max_size) normalized_lower = max_size; else normalized_lower = (long)lower; return {normalized_lower, normalized_upper}; } long fast_contiguous_slice::size() const { assert(!upper.is_none()); return std::max(0L, upper - lower); } slice slice::operator*(contiguous_slice const &other) const { // We do ! implement these because it requires to know the "end" // value of the slice which is ! possible if it is ! "step == 1" slice // TODO: We can skip these constraints if we know begin, end && step. assert(!((static_cast(other.upper) < 0 || static_cast(other.lower) < 0) && step != 1 && step != -1) && "not implemented"); bound new_lower; if (other.lower == 0) new_lower = lower; else { bound ref = ((long)other.lower > 0) ? lower : upper; if (ref.is_none()) { if (step > 0) new_lower = (long)other.lower * step; else new_lower = (long)other.lower * step - 1; } else new_lower = (long)ref + (long)other.lower * step; } long new_step = step; bound new_upper; if (other.upper.is_none()) new_upper = upper; else { bound ref = ((long)other.upper > 0) ? lower : upper; if (ref.is_none()) { if (step > 0) new_upper = (long)other.upper * step; else new_upper = (long)other.upper * step - 1; } else new_upper = (long)ref + (long)other.upper * step; } return {new_lower, new_upper, new_step}; } template std::ostream &operator<<(std::ostream &os, bound const &b) { return (b.is_none() ? (os << "None") : (os << (T)b)); } template typename std::enable_if::value, std::ostream &>::type operator<<(std::ostream &os, S const &s) { return os << "slice(" << s.lower << ", " << s.upper << ", " << s.step << ")"; } } PYTHONIC_NS_END #ifdef ENABLE_PYTHON_MODULE PYTHONIC_NS_BEGIN template PyObject *to_python>::convert(types::bound const &b) { if (b.is_none()) return ::to_python(types::none_type{}); else return ::to_python((T)b); } PyObject *to_python::convert( types::contiguous_normalized_slice const &v) { return PySlice_New(::to_python(v.lower), ::to_python(v.upper), ::to_python(types::none_type{})); } PyObject * to_python::convert(types::contiguous_slice const &v) { return PySlice_New(::to_python(v.lower), ::to_python(v.upper), ::to_python(types::none_type{})); } PyObject * to_python::convert(types::normalized_slice const &v) { if (v.step > 0) { return PySlice_New(::to_python(v.lower), ::to_python(v.upper), ::to_python(v.step)); } else { return PySlice_New(::to_python(v.lower), v.upper < 0 ? ::to_python(types::none_type{}) : ::to_python(v.upper), ::to_python(v.step)); } } PyObject *to_python::convert(types::slice const &v) { if (v.step > 0) { return PySlice_New(::to_python(v.lower), ::to_python(v.upper), ::to_python(v.step)); } else { return PySlice_New(::to_python(v.lower), v.upper < 0 ? ::to_python(types::none_type{}) : ::to_python(v.upper), ::to_python(v.step)); } } bool from_python::is_convertible(PyObject *obj) { return PySlice_Check(obj); } types::slice from_python::convert(PyObject *obj) { Py_ssize_t start, stop, step; #if PY_MAJOR_VERSION > 3 || (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION > 6) || \ (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION == 6 && \ PY_MICRO_VERSION >= 1 && !defined(PYPY_VERSION)) PySlice_Unpack(obj, &start, &stop, &step); #elif PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 1 PySlice_GetIndices((PyObject *)obj, PY_SSIZE_T_MAX, &start, &stop, &step); #else PySlice_GetIndices((PySliceObject *)obj, PY_SSIZE_T_MAX, &start, &stop, &step); #endif types::none nstart, nstop, nstep; if (start != PY_SSIZE_T_MAX) nstart = start; else nstart = types::none_type{}; if (stop != PY_SSIZE_T_MAX) nstop = stop; else nstop = types::none_type{}; if (step != PY_SSIZE_T_MAX) nstep = step; else nstep = types::none_type{}; return {nstart, nstop, nstep}; } PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/types/static_if.hpp000066400000000000000000000002021416264035500231050ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_STATIC_IF_HPP #define PYTHONIC_TYPES_STATIC_IF_HPP #include "pythonic/include/types/static_if.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/types/str.hpp000066400000000000000000000404541416264035500217650ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_STR_HPP #define PYTHONIC_TYPES_STR_HPP #include "pythonic/include/types/str.hpp" #include "pythonic/types/tuple.hpp" #include "pythonic/types/assignable.hpp" #include "pythonic/utils/shared_ref.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/utils/int_.hpp" #include #include #include #include #include PYTHONIC_NS_BEGIN namespace types { chr::operator str() const { return str(c); } /// const_sliced_str_iterator implementation const_sliced_str_iterator::const_sliced_str_iterator(char const *data, long step) : data(data), step(step) { } const_sliced_str_iterator const_sliced_str_iterator::operator++() { data += step; return *this; } bool const_sliced_str_iterator:: operator<(const_sliced_str_iterator const &other) const { return (step > 0) ? (data < other.data) : (data > other.data); } bool const_sliced_str_iterator:: operator==(const_sliced_str_iterator const &other) const { return data == other.data; } bool const_sliced_str_iterator:: operator!=(const_sliced_str_iterator const &other) const { return data != other.data; } chr const_sliced_str_iterator::operator*() const { return (*data); } const_sliced_str_iterator const_sliced_str_iterator::operator-(long n) const { const_sliced_str_iterator other(*this); other.data -= step * n; return other; } long const_sliced_str_iterator:: operator-(const_sliced_str_iterator const &other) const { return (data - other.data) / step; } /// sliced_str implementation // constructor template sliced_str::sliced_str() : data(utils::no_memory()) { } template sliced_str::sliced_str(sliced_str const &s) : data(s.data), slicing(s.slicing) { } template sliced_str::sliced_str(sliced_str const &s, typename S::normalized_type const &sl) : data(s.data), slicing(s.slicing * sl) { } template sliced_str::sliced_str(str const &other, typename S::normalized_type const &s) : data(other.data), slicing(s) { } // const getter template typename sliced_str::container_type const &sliced_str::get_data() const { return *data; } template typename S::normalized_type const &sliced_str::get_slice() const { return slicing; } // iterators template typename sliced_str::const_iterator sliced_str::begin() const { return typename sliced_str::const_iterator(data->c_str() + slicing.lower, slicing.step); } template typename sliced_str::const_iterator sliced_str::end() const { return typename sliced_str::const_iterator(data->c_str() + slicing.upper, slicing.step); } // size template long sliced_str::size() const { return slicing.size(); } // accessor template chr sliced_str::fast(long i) const { return (*data)[slicing.get(i)]; } template chr sliced_str::operator[](long i) const { if (i < 0) { i += size(); } return fast(i); } template template typename std::enable_if::value, sliced_str>::type sliced_str:: operator[](Sp const &s) const { return {*this, s.normalize(size())}; } // conversion template sliced_str::operator bool() const { return size() > 0; } template sliced_str::operator long() const { char const *iter = data->c_str() + slicing.lower; char const *end = data->c_str() + slicing.upper; while (iter < end && isblank(*iter)) iter += slicing.step; if (iter >= end) return 0; long neg = 1; if (*iter == '-') { iter += slicing.step; neg = -1; } long out = 0; for (; iter < end; iter += slicing.step) out = out * 10 + (*iter - '0'); return neg * out; } template bool sliced_str::operator!() const { return !bool(); } template bool sliced_str::contains(str const &v) const { return find(v) != std::string::npos; } template bool sliced_str::operator==(str const &v) const { if (size() != v.size()) return false; for (char const *iter = data->c_str() + slicing.lower, *end = data->c_str() + slicing.upper, *oter = v.data->c_str(); iter < end; iter += slicing.step, ++oter) if (*iter != *oter) return false; return true; } // io std::ostream &operator<<(std::ostream &os, types::chr const &v) { return os << v.c; } template std::ostream &operator<<(std::ostream &os, types::sliced_str const &v) { for (auto b = v.begin(); b != v.end(); ++b) os << *b; return os; } template str sliced_str::operator+(sliced_str const &s) { str out(*data); std::copy(s.begin(), s.end(), std::copy(begin(), end(), out.begin())); return out; } template size_t sliced_str::find(str const &s, size_t pos) const { return str(*this).find(s); // quite inefficient } template sliced_str &sliced_str::operator=(sliced_str const &s) { slicing = s.slicing; data = s.data; return *this; } template sliced_str &sliced_str::operator=(str const &s) { if (slicing.step == 1) { data->erase(slicing.lower, slicing.upper); data->insert(slicing.lower, s.chars()); } else assert("! implemented yet"); return *this; } /// str implementation str::str() : data() { } str::str(std::string const &s) : data(s) { } str::str(std::string &&s) : data(std::move(s)) { } str::str(const char *s) : data(s) { } template str::str(const char(&s)[N]) : data(s) { } str::str(const char *s, size_t n) : data(s, n) { } str::str(char c) : data(1, c) { } template str::str(sliced_str const &other) : data(other.size(), 0) { auto iter = chars().begin(); for (auto &&s : other) *iter++ = s.chars()[0]; } template str::str(T const &begin, T const &end) : data(begin, end) { } template str::str(T const &s) { std::ostringstream oss; oss << s; *data = oss.str(); } str::operator char() const { assert(size() == 1); return (*data)[0]; } str::operator long int() const { // Allows implicit conversion without loosing bool conversion char *endptr; auto dat = data->data(); long res = strtol(dat, &endptr, 10); if (endptr == dat) { std::ostringstream err; err << "invalid literal for long() with base 10:'" << c_str() << '\''; throw std::runtime_error(err.str()); } return res; } str::operator float() const { char *endptr; auto dat = data->data(); float res = strtof(dat, &endptr); if (endptr == dat) { std::ostringstream err; err << "invalid literal for float():'" << c_str() << "'"; throw std::runtime_error(err.str()); } return res; } str::operator double() const { char *endptr; auto dat = data->data(); double res = strtod(dat, &endptr); if (endptr == dat) { std::ostringstream err; err << "invalid literal for double():'" << c_str() << "'"; throw std::runtime_error(err.str()); } return res; } template str &str::operator=(sliced_str const &other) { auto &other_data = other.get_data(); auto &other_slice = other.get_slice(); auto other_size = other.size(); data = decltype(data)(); // Don't use the original buffer auto &my_data = *data; long j = 0L; if (other_size > size()) resize(other_size); for (long i = other_slice.lower; i < other_slice.upper; i = i + other_slice.step, j++) my_data[j] = other_data[i]; if (j < size()) resize(j); return *this; } str &str::operator+=(str const &s) { *data += *s.data; return *this; } str &str::operator+=(chr const &s) { *data += s.c; return *this; } long str::size() const { return data->size(); } typename str::iterator str::begin() const { return {data->begin()}; } typename str::reverse_iterator str::rbegin() const { return {data->rbegin()}; } typename str::iterator str::end() const { return {data->end()}; } typename str::reverse_iterator str::rend() const { return {data->rend()}; } auto str::c_str() const -> decltype(data->c_str()) { return data->c_str(); } auto str::resize(long n) -> decltype(data->resize(n)) { return data->resize(n); } long str::find(str const &s, size_t pos) const { const char *res = strstr(c_str() + pos, s.c_str()); return res ? res - c_str() : -1; } bool str::contains(str const &v) const { return find(v) != -1; } long str::find_first_of(str const &s, size_t pos) const { return data->find_first_of(*s.data, pos); } long str::find_first_of(const char *s, size_t pos) const { return data->find_first_of(s, pos); } long str::find_first_not_of(str const &s, size_t pos) const { return data->find_first_not_of(*s.data, pos); } long str::find_last_not_of(str const &s, size_t pos) const { return data->find_last_not_of(*s.data, pos); } str str::substr(size_t pos, size_t len) const { return data->substr(pos, len); } bool str::empty() const { return data->empty(); } int str::compare(size_t pos, size_t len, str const &str) const { return data->compare(pos, len, *str.data); } void str::reserve(size_t n) { data->reserve(n); } str &str::replace(size_t pos, size_t len, str const &str) { data->replace(pos, len, *str.data); return *this; } template str &str::operator+=(sliced_str const &other) { resize(size() + other.get_data().size()); std::copy(other.begin(), other.end(), begin()); return *this; } bool str::operator==(str const &other) const { return *data == *other.data; } bool str::operator!=(str const &other) const { return *data != *other.data; } bool str::operator<=(str const &other) const { return *data <= *other.data; } bool str::operator<(str const &other) const { return *data < *other.data; } bool str::operator>=(str const &other) const { return *data >= *other.data; } bool str::operator>(str const &other) const { return *data > *other.data; } template bool str::operator==(sliced_str const &other) const { if (size() != other.size()) return false; for (long i = other.get_slice().lower, j = 0L; j < size(); i = i + other.get_slice().step, j++) if (other.get_data()[i] != chars()[j]) return false; return true; } bool str::operator==(chr other) const { return size() == 1 && (*data)[0] == other.c; } template typename std::enable_if::value, sliced_str>::type str:: operator()(S const &s) const { return operator[](s); } chr str::operator[](long i) const { if (i < 0) i += size(); return fast(i); } chr str::fast(long i) const { return (*data)[i]; } template typename std::enable_if::value, sliced_str>::type str:: operator[](S const &s) const { return {*this, s.normalize(size())}; } str::operator bool() const { return !data->empty(); } long str::count(types::str const &sub) const { long counter = 0; for (long z = find(sub); // begin by looking for sub z != -1; // as long as we don't reach the end z = find(sub, z + sub.size())) // look for another one { ++counter; } return counter; } str operator+(str const &self, str const &other) { return str(self.chars() + other.chars()); } str operator+(chr const &self, chr const &other) { char tmp[2] = {self.c, other.c}; return str(&tmp[0], 2); } str operator+(chr const &self, str const &other) { return str(self.c + other.chars()); } str operator+(str const &self, chr const &other) { return str(self.chars() + other.c); } template str operator+(str const &self, char const(&other)[N]) { std::string s; s.reserve(self.size() + N); s += self.chars(); s += other; return {std::move(s)}; } template str operator+(chr const &self, char const(&other)[N]) { std::string s; s.reserve(1 + N); s += self.c; s += other; return {std::move(s)}; } template str operator+(char const(&self)[N], str const &other) { std::string s; s.reserve(other.size() + N); s += self; s += other.chars(); return {std::move(s)}; } template str operator+(char const(&self)[N], chr const &other) { std::string s; s.reserve(1 + N); s += self; s += other.c; return {std::move(s)}; } template bool operator==(char const(&self)[N], str const &other) { return other == self; } bool operator==(chr self, str const &other) { return other == self; } std::ostream &operator<<(std::ostream &os, str const &s) { return os << s.c_str(); } } namespace operator_ { template auto mod(const char(&fmt)[N], Arg &&arg) -> decltype(types::str(fmt) % std::forward(arg)) { return types::str(fmt) % std::forward(arg); } pythonic::types::str add(char const *self, char const *other) { pythonic::types::str res{self}; res += other; return res; } pythonic::types::str mul(long self, char const *other) { return pythonic::types::str{other} * self; } pythonic::types::str mul(char const *self, long other) { return pythonic::types::str{self} * other; } } PYTHONIC_NS_END pythonic::types::str operator*(pythonic::types::str const &s, long n) { if (n <= 0) return pythonic::types::str(); pythonic::types::str other; other.resize(s.size() * n); auto where = other.chars().begin(); for (long i = 0; i < n; i++, where += s.size()) std::copy(s.chars().begin(), s.chars().end(), where); return other; } pythonic::types::str operator*(long t, pythonic::types::str const &s) { return s * t; } pythonic::types::str operator*(pythonic::types::chr const &s, long n) { if (n <= 0) return pythonic::types::str(); pythonic::types::str other; other.resize(n); std::fill(other.chars().begin(), other.chars().end(), s.c); return other; } pythonic::types::str operator*(long t, pythonic::types::chr const &c) { return c * t; } namespace std { size_t hash:: operator()(const pythonic::types::str &x) const { return hash()(x.chars()); } size_t hash:: operator()(const pythonic::types::chr &x) const { return x.c; } template pythonic::types::str get(pythonic::types::str const &t) { return pythonic::types::str(t[I]); } } #ifdef ENABLE_PYTHON_MODULE #ifndef PyString_FromStringAndSize #define PyString_FromStringAndSize PyUnicode_FromStringAndSize #ifndef PyString_Check #define PyString_Check(x) PyUnicode_Check(x) && PyUnicode_IS_COMPACT_ASCII(x) #endif #ifndef PyString_AS_STRING #define PyString_AS_STRING (char *) _PyUnicode_COMPACT_DATA #endif #ifndef PyString_GET_SIZE #define PyString_GET_SIZE PyUnicode_GET_LENGTH #endif #endif PYTHONIC_NS_BEGIN PyObject *to_python::convert(types::str const &v) { return PyString_FromStringAndSize(v.c_str(), v.size()); } PyObject *to_python::convert(types::chr const &v) { return PyString_FromStringAndSize(&v.c, 1); } template PyObject * to_python>::convert(types::sliced_str const &v) { return ::to_python(types::str(v)); } bool from_python::is_convertible(PyObject *obj) { return PyString_Check(obj); } types::str from_python::convert(PyObject *obj) { return {PyString_AS_STRING(obj), (size_t)PyString_GET_SIZE(obj)}; } PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/types/traits.hpp000066400000000000000000000001721416264035500224540ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_TRAITS_HPP #define PYTHONIC_TYPES_TRAITS_HPP #include "pythonic/include/types/traits.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/types/tuple.hpp000066400000000000000000000451161416264035500223060ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_TUPLE_HPP #define PYTHONIC_TYPES_TUPLE_HPP #include "pythonic/include/types/tuple.hpp" #include "pythonic/types/assignable.hpp" #include "pythonic/types/traits.hpp" #include "pythonic/types/nditerator.hpp" #include "pythonic/types/dynamic_tuple.hpp" #include "pythonic/utils/int_.hpp" #include "pythonic/utils/seq.hpp" #include "pythonic/utils/nested_container.hpp" #include "pythonic/types/ndarray.hpp" #include #include namespace std { template bool operator==(pair const &self, tuple const &other) { return self.first == get<0>(other) && self.second == get<1>(other); } template bool operator==(pair const &self, tuple const &other) { return self.first == get<0>(other) && self.second == get<1>(other); } } template std::tuple operator+(std::tuple const &t0, std::tuple const &t1) { return std::tuple_cat(t0, t1); } template std::tuple operator+(std::tuple &&t0, std::tuple const &t1) { return std::tuple_cat(std::forward(t0), t1); } template std::tuple operator+(std::tuple const &t0, std::tuple &&t1) { return std::tuple_cat(t0, std::forward(t1)); } template std::tuple operator+(std::tuple &&t0, std::tuple &&t1) { return std::tuple_cat(std::forward(t0), std::forward(t1)); } PYTHONIC_NS_BEGIN namespace types { /* helper to extract the tail of a tuple, && pop the head */ template std::tuple tuple_tail(std::tuple const &t) { return make_tuple_tail<0>(t, utils::make_index_sequence{}); } template array_base array_to_array(A const &a, utils::index_sequence) { return {(T)std::get(a)...}; } /* inspired by std::array implementation */ template template long array_base::_flat_size(E const &e, utils::int_<1>) const { return N; } template template long array_base::_flat_size(E const &e, utils::int_) const { return N * _flat_size(e[0], utils::int_{}); } template long array_base::flat_size() const { return _flat_size(*this, utils::int_{}); } template intptr_t array_base::id() const { return reinterpret_cast(&(buffer[0])); } template void array_base::fill(const value_type &__u) { std::fill_n(begin(), size(), __u); } // Iterators. template typename array_base::iterator array_base::begin() noexcept { return {data()}; } template typename array_base::const_iterator array_base::begin() const noexcept { return {data()}; } template typename array_base::iterator array_base::end() noexcept { return {data() + N}; } template typename array_base::const_iterator array_base::end() const noexcept { return {data() + N}; } template typename array_base::reverse_iterator array_base::rbegin() noexcept { return reverse_iterator(end()); } template typename array_base::const_reverse_iterator array_base::rbegin() const noexcept { return const_reverse_iterator(end()); } template typename array_base::reverse_iterator array_base::rend() noexcept { return reverse_iterator(begin()); } template typename array_base::const_reverse_iterator array_base::rend() const noexcept { return const_reverse_iterator(begin()); } template typename array_base::const_iterator array_base::cbegin() const noexcept { return {&(buffer[0])}; } template typename array_base::const_iterator array_base::cend() const noexcept { return {&(buffer[N])}; } template typename array_base::const_reverse_iterator array_base::crbegin() const noexcept { return const_reverse_iterator(end()); } template typename array_base::const_reverse_iterator array_base::crend() const noexcept { return const_reverse_iterator(begin()); } // Capacity. template constexpr typename array_base::size_type array_base::size() const noexcept { return N; } template constexpr typename array_base::size_type array_base::max_size() const noexcept { return N; } template constexpr bool array_base::empty() const noexcept { return size() == 0; } // Element access. template typename array_base::reference array_base::fast(long n) { assert(n < (long)size()); return buffer[n]; } template typename array_base::const_reference array_base::fast(long n) const noexcept { assert(n < (long)size()); return buffer[n]; } #ifdef USE_XSIMD template template typename array_base::simd_iterator array_base::vbegin(vectorizer) const { return {&buffer[0]}; } template template typename array_base::simd_iterator array_base::vend(vectorizer) const { using vector_type = typename xsimd::simd_type; static const std::size_t vector_size = vector_type::size; return {&buffer[long(size() / vector_size * vector_size)]}; } #endif template typename array_base::reference array_base:: operator[](long __n) { auto const index = __n < 0 ? (__n + size()) : __n; assert(0 <= index && index < size()); return buffer[index]; } template typename array_base::const_reference array_base:: operator[](long __n) const noexcept { auto const index = __n < 0 ? (__n + size()) : __n; assert(0 <= index && index < size()); return buffer[index]; } template typename array_base::reference array_base::front() { return *begin(); } template typename array_base::const_reference array_base::front() const { return *begin(); } template typename array_base::reference array_base::back() { return N ? *(end() - 1) : *end(); } template typename array_base::const_reference array_base::back() const { return N ? *(end() - 1) : *end(); } template typename array_base::pointer array_base::data() noexcept { return &(buffer[0]); } template typename array_base::const_pointer array_base::data() const noexcept { return &(buffer[0]); } template template bool array_base::operator==(array_base const &other) const { return N == M && std::equal(begin(), end(), other.begin()); } template template bool array_base::operator!=(array_base const &other) const { return !(*this == other); } template template bool array_base::operator<(array_base const &other) const { return std::lexicographical_compare(begin(), end(), other.begin(), other.end()); } template template array_base::type, N + M, V> array_base:: operator+(array_base const &other) const { array_base::type, N + M, V> result; auto next = std::copy(begin(), end(), result.begin()); std::copy(other.begin(), other.end(), next); return result; } template template array_base::operator std::tuple() const { return array_to_tuple(*this, utils::make_index_sequence{}, typename utils::type_sequence{}); } template template array_base::operator array_base() const { return array_to_array(*this, utils::make_index_sequence{}); } template auto array_base::to_tuple() const -> decltype(array_to_tuple(*this, utils::make_index_sequence{}, utils::make_repeated_type())) { return array_to_tuple(*this, utils::make_index_sequence{}, utils::make_repeated_type()); } template template array_base array_base::to_array() const { return reinterpret_cast const &>(*this); } /* array */ template std::ostream &operator<<(std::ostream &os, types::array_base const &v) { os << "(["[std::is_same::value]; auto iter = v.begin(); if (iter != v.end()) { while (iter + 1 != v.end()) os << *iter++ << ", "; os << *iter; } return os << ")]"[std::is_same::value]; } template auto operator+(std::tuple const &t, types::array_base const <) -> decltype(std::tuple_cat(t, lt.to_tuple())) { return std::tuple_cat(t, lt.to_tuple()); } template auto operator+(types::array_base const <, std::tuple const &t) -> decltype(std::tuple_cat(lt.to_tuple(), t)) { return std::tuple_cat(lt.to_tuple(), t); } template dynamic_tuple array_base_slicer::operator()(array const &b, slice const &s) { normalized_slice ns = s.normalize(b.size()); array tmp; for (long j = 0; j < ns.size(); ++j) tmp[j] = b[ns.lower + j * ns.step]; return {&tmp[0], &tmp[ns.size()]}; } template dynamic_tuple array_base_slicer::operator()(array const &b, contiguous_slice const &s) { contiguous_normalized_slice cns = s.normalize(b.size()); return {&b[cns.lower], &b[cns.upper]}; } template dynamic_tuple array_base_slicer::operator()(array const &b, fast_contiguous_slice const &s) { contiguous_normalized_slice cns = s.normalize(b.size()); return {&b[cns.lower], &b[cns.upper]}; } } PYTHONIC_NS_END /* hashable tuples, as proposed in * http://stackoverflow.com/questions/7110301/generic-hash-for-tuples-in-unordered-map-unordered-set */ namespace { inline size_t hash_combiner(size_t left, size_t right) // replacable { return left ^ right; } template size_t hash_impl:: operator()(size_t a, const std::tuple &t) const { using nexttype = typename std::tuple_element>::type; hash_impl next; size_t b = std::hash()(std::get(t)); return next(hash_combiner(a, b), t); } template size_t hash_impl<0, types...>::operator()(size_t a, const std::tuple &t) const { using nexttype = typename std::tuple_element<0, std::tuple>::type; size_t b = std::hash()(std::get<0>(t)); return hash_combiner(a, b); } } /* specialize std::hash */ namespace std { template size_t hash>:: operator()(std::tuple const &t) const { const size_t begin = std::tuple_size>::value - 1; return hash_impl()(1, t); // 1 should be some largervalue } template size_t hash>:: operator()(pythonic::types::array_base const &l) const { size_t seed = 0; hash h; for (auto const &iter : l) seed ^= h(iter) + 0x9e3779b9 + (seed << 6) + (seed >> 2); return seed; } } PYTHONIC_NS_BEGIN namespace types { template void print_tuple(std::ostream &os, Tuple const &t, utils::int_) { print_tuple(os, t, utils::int_()); os << ", " << std::get(t); } template void print_tuple(std::ostream &os, Tuple const &t, utils::int_<0>) { os << std::get<0>(t); } } PYTHONIC_NS_END namespace std { template ostream &operator<<(ostream &os, tuple const &t) { os << '('; pythonic::types::print_tuple(os, t, pythonic::utils::int_()); return os << ')'; } } #ifdef ENABLE_PYTHON_MODULE #include "pythonic/include/utils/seq.hpp" #include "pythonic/include/utils/fwd.hpp" #include "pythonic/python/core.hpp" PYTHONIC_NS_BEGIN template PyObject *to_python>::convert(std::pair const &t) { PyObject *out = PyTuple_New(2); PyTuple_SET_ITEM(out, 0, ::to_python(std::get<0>(t))); PyTuple_SET_ITEM(out, 1, ::to_python(std::get<1>(t))); return out; } template PyObject * to_python>::convert(types::pshape const &t) { return ::to_python(t.array()); } template template PyObject *to_python>:: do_convert(std::tuple const &t, utils::index_sequence) { PyObject *out = PyTuple_New(sizeof...(Types)); (void)std::initializer_list{ (PyTuple_SET_ITEM(out, S, ::to_python(std::get(t))), true)...}; return out; } template PyObject * to_python>::convert(std::tuple const &t) { return do_convert(t, utils::make_index_sequence()); } template template PyObject *to_python>::do_convert(types::array const &t, utils::index_sequence) { PyObject *out = PyTuple_New(N); (void)std::initializer_list{ (PyTuple_SET_ITEM(out, S, ::to_python(std::get(t))), true)...}; return out; } template template PyObject *to_python>::do_convert( types::static_list const &t, utils::index_sequence) { PyObject *out = PyList_New(N); (void)std::initializer_list{ (PyList_SET_ITEM(out, S, ::to_python(std::get(t))), true)...}; return out; } template PyObject *to_python>::convert(types::array const &t) { return do_convert(t, utils::make_index_sequence()); } template PyObject * to_python>::convert(types::static_list const &t) { return do_convert(t, utils::make_index_sequence()); } template template bool from_python> ::do_is_convertible(PyObject *obj, typename utils::index_sequence) { bool checks[] = {::is_convertible< typename std::tuple_element>::type>( PyTuple_GET_ITEM(obj, S))...}; return std::find(std::begin(checks), std::end(checks), false) == std::end(checks); } template bool from_python>::is_convertible(PyObject *obj) { if (PyTuple_Check(obj)) { auto n = PyTuple_GET_SIZE(obj); if (n == sizeof...(Types)) { return do_is_convertible(obj, utils::make_index_sequence()); } } return false; } template template std::tuple from_python>::do_convert( PyObject *obj, typename utils::index_sequence) { return std::tuple{ ::from_python>::type>( PyTuple_GET_ITEM(obj, S))...}; } template std::tuple from_python>::convert(PyObject *obj) { return do_convert(obj, utils::make_index_sequence()); } template bool from_python>:: is_convertible(PyObject *obj) { if (PyTuple_Check(obj)) { auto n = PyTuple_GET_SIZE(obj); if (n == N) { return ::is_convertible(PyTuple_GET_ITEM(obj, 0)); } } return false; } template template types::array from_python>::do_convert( PyObject *obj, typename utils::index_sequence) { return {::from_python(PyTuple_GET_ITEM(obj, S))...}; } template types::array from_python>:: convert(PyObject *obj) { return do_convert(obj, utils::make_index_sequence()); } PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/types/uint16.hpp000066400000000000000000000001721416264035500222740ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_UINT16_HPP #define PYTHONIC_TYPES_UINT16_HPP #include "pythonic/include/types/uint16.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/types/uint32.hpp000066400000000000000000000001721416264035500222720ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_UINT32_HPP #define PYTHONIC_TYPES_UINT32_HPP #include "pythonic/include/types/uint32.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/types/uint64.hpp000066400000000000000000000001721416264035500222770ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_UINT64_HPP #define PYTHONIC_TYPES_UINT64_HPP #include "pythonic/include/types/uint64.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/types/uint8.hpp000066400000000000000000000001671416264035500222210ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_UINT8_HPP #define PYTHONIC_TYPES_UINT8_HPP #include "pythonic/include/types/uint8.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/types/uintc.hpp000066400000000000000000000001671416264035500222740ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_UINTC_HPP #define PYTHONIC_TYPES_UINTC_HPP #include "pythonic/include/types/uintc.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/types/uintp.hpp000066400000000000000000000001671416264035500223110ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_UINTP_HPP #define PYTHONIC_TYPES_UINTP_HPP #include "pythonic/include/types/uintp.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/types/variant_functor.hpp000066400000000000000000000165711416264035500243640ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_VARIANT_FUNCTOR_HPP #define PYTHONIC_TYPES_VARIANT_FUNCTOR_HPP #include "pythonic/include/types/variant_functor.hpp" #include "pythonic/utils/meta.hpp" #include #include PYTHONIC_NS_BEGIN namespace types { namespace details { template variant_functor_impl::variant_functor_impl(char mem[], Type const &t) : fun(new (mem) Type(t)) { } template template variant_functor_impl::variant_functor_impl(char mem[], OtherType const &t) : fun(nullptr) { } template variant_functor_impl::variant_functor_impl( char mem[], variant_functor_impl const &t) : fun(t.fun ? new (mem) Type(*t.fun) : nullptr) { } template template variant_functor_impl::variant_functor_impl( char mem[], variant_functor_impl const &t) : variant_functor_impl(mem, t.head) { } template template variant_functor_impl::variant_functor_impl( char mem[], variant_functor_impl const &t) : variant_functor_impl(mem, t.tail) { } template void variant_functor_impl::assign(char mem[], variant_functor_impl const &other) { if (fun != nullptr) fun->~Type(); if (other.fun) fun = new (mem) Type(*other.fun); } template void variant_functor_impl::assign(char mem[], variant_functor const &other) { assign(mem, static_cast const &>(other)); } template template void variant_functor_impl::assign( char mem[], variant_functor_impl const &other) { assign(mem, other.head); assign(mem, other.tail); } template template void variant_functor_impl::assign( char mem[], variant_functor const &other) { assign(mem, static_cast const &>( other)); } template template void variant_functor_impl::assign( char mem[], variant_functor_impl const &other) { } template template void variant_functor_impl::assign(char mem[], variant_functor const &other) { } template void variant_functor_impl::assign(char mem[], Type const &other) { if (fun != nullptr) fun->~Type(); fun = new (mem) Type(other); } template variant_functor_impl::~variant_functor_impl() { if (fun != nullptr) fun->~Type(); } template template void variant_functor_impl::assign(char mem[], OtherType const &other) { } template template auto variant_functor_impl::operator()(Args &&... args) -> decltype(std::declval()(std::forward(args)...)) { assert(fun && "handler defined"); return (*fun)(std::forward(args)...); } template template auto variant_functor_impl::operator()(Args &&... args) const -> decltype(std::declval()(std::forward(args)...)) { assert(fun && "handler defined"); return (*fun)(std::forward(args)...); } template template variant_functor_impl::variant_functor_impl( char mem[], OtherTypes const &... t) : head(mem, t...), tail(mem, t...) { } template template variant_functor_impl::variant_functor_impl( char mem[], variant_functor_impl const &t) : head(mem, t), tail(mem, t) { } template void variant_functor_impl::assign( char mem[], variant_functor_impl const &other) { head.assign(mem, other); tail.assign(mem, other); } template template void variant_functor_impl::assign(char mem[], OtherType const &other) { head.assign(mem, other); tail.assign(mem, other); } template template auto variant_functor_impl::operator()(Args &&... args) -> typename __combined()(args...)), decltype(std::declval()(args...))...>::type { if (head.fun) return head(args...); else return tail(args...); } template template auto variant_functor_impl::operator()(Args &&... args) const -> typename __combined()(args...)), decltype(std::declval()(args...))...>::type { if (head.fun) return head(args...); else return tail(args...); } } template variant_functor::variant_functor(variant_functor const &other) : details::variant_functor_impl( mem, static_cast const &>(other)) { } template variant_functor &variant_functor:: operator=(variant_functor const &other) { details::variant_functor_impl::assign(mem, other); return *this; } template template variant_functor &variant_functor:: operator=(variant_functor const &other) { details::variant_functor_impl::assign(mem, other); return *this; } template template variant_functor &variant_functor:: operator=(OtherType const &other) { static_assert( utils::any_of::value...>::value, "consistent assign"); details::variant_functor_impl::assign(mem, other); return *this; } template template variant_functor::variant_functor(OtherTypes const &... t) : details::variant_functor_impl(mem, t...) { } template template variant_functor::variant_functor( variant_functor const &t) : details::variant_functor_impl( mem, static_cast const &>( t)) { } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/types/vectorizable_type.hpp000066400000000000000000000142251416264035500247040ustar00rootroot00000000000000#ifndef PYTHONIC_TYPES_VECTORIZABLE_TYPE_HPP #define PYTHONIC_TYPES_VECTORIZABLE_TYPE_HPP #include "pythonic/include/types/vectorizable_type.hpp" #include "pythonic/include/numpy/bool_.hpp" #include "pythonic/include/numpy/uint8.hpp" #include "pythonic/include/numpy/int8.hpp" #include "pythonic/include/numpy/uint16.hpp" #include "pythonic/include/numpy/int16.hpp" #include "pythonic/include/numpy/uint32.hpp" #include "pythonic/include/numpy/int32.hpp" #include "pythonic/include/numpy/uint64.hpp" #include "pythonic/include/numpy/int64.hpp" #include "pythonic/include/numpy/float32.hpp" #include "pythonic/include/numpy/float64.hpp" PYTHONIC_NS_BEGIN namespace operator_ { namespace functor { struct mod; struct div; } } namespace builtins { namespace pythran { namespace functor { struct abssqr; } } } namespace numpy { namespace functor { struct arctan2; struct angle_in_rad; struct asarray_chkfinite; struct clip; struct copysign; struct divide; struct fix; struct floor_divide; struct fmod; struct heaviside; struct hypot; struct isfinite; struct isinf; struct isnan; struct isposinf; struct ldexp; struct logaddexp; struct logaddexp2; struct maximum; struct minimum; struct nan_to_num; struct nextafter; struct power; struct remainder; struct rint; struct signbit; struct spacing; struct true_divide; struct where; } } namespace scipy { namespace special { namespace functor { struct hankel1; struct hankel2; struct jv; struct iv; struct kv; struct yv; struct jvp; struct ivp; struct kvp; struct yvp; struct spherical_jn; struct spherical_yn; } } } namespace types { template struct is_vector_op { // vectorize everything but these ops. They require special handling for // vectorization, && SG did not invest enough time in those static const bool value = !std::is_same::value && (!std::is_same::value || utils::all_of()( std::declval()...))>::value...>::value) && !std::is_same::value && // Return type for generic function should be generic !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && // conditional processing doesn't permit SIMD !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && // not supported by xsimd !std::is_same::value && !std::is_same::value && // not supported for complex numbers !(utils::any_of< is_complex::type>::value...>::value && (std::is_same::value || std::is_same::value || std::is_same::value || std::is_same::value)) && // transtyping !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && // not supported for integral numbers !(utils::any_of::type>::value...>::value && (std::is_same::value || std::is_same::value || std::is_same::value || std::is_same::value || std::is_same::value || std::is_same::value || std::is_same::value || std::is_same::value || std::is_same::value || std::is_same::value)) && // special functions not in the scope of xsimd !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && !std::is_same::value && // true; }; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/utils/000077500000000000000000000000001416264035500204315ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/utils/array_helper.hpp000066400000000000000000000026611416264035500236240ustar00rootroot00000000000000#ifndef PYTHONIC_UTILS_ARRAY_HELPER_HPP #define PYTHONIC_UTILS_ARRAY_HELPER_HPP #include "pythonic/include/utils/array_helper.hpp" #include "pythonic/types/tuple.hpp" PYTHONIC_NS_BEGIN /* recursively return the value at the position given by `indices' in the * `self' "array like". It may be a sub array instead of real value. * indices[0] is the coordinate for the first dimension && indices[M-1] is * for the last one. */ template template auto nget::operator()(A &&self, types::array const &indices) -> decltype(nget()(std::forward(self)[0], indices)) { return nget()(std::forward(self)[indices[M - L - 1]], indices); } template template auto nget::fast(A &&self, types::array const &indices) -> decltype(nget().fast(std::forward(self).fast(0), indices)) { return nget().fast(std::forward(self).fast(indices[M - L - 1]), indices); } template auto nget<0>::operator()(A &&self, types::array const &indices) -> decltype(std::forward(self)[indices[M - 1]]) { return std::forward(self)[indices[M - 1]]; } template auto nget<0>::fast(A &&self, types::array const &indices) -> decltype(std::forward(self).fast(indices[M - 1])) { return std::forward(self).fast(indices[M - 1]); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/utils/broadcast_copy.hpp000066400000000000000000000431321416264035500241410ustar00rootroot00000000000000#ifndef PYTHONIC_UTILS_BROADCAST_COPY_HPP #define PYTHONIC_UTILS_BROADCAST_COPY_HPP #include "pythonic/include/utils/broadcast_copy.hpp" #include "pythonic/types/tuple.hpp" #ifdef _OPENMP #include // as a macro so that an enlightened user can modify this variable :-) #ifndef PYTHRAN_OPENMP_MIN_ITERATION_COUNT #define PYTHRAN_OPENMP_MIN_ITERATION_COUNT 1000 #endif #endif PYTHONIC_NS_BEGIN namespace utils { /* helper for specialization of the broadcasting, vectorizing copy operator * due to expression templates, this may also triggers a lot of *computations! * * ``vector_form'' is set to true if the operation can be done using *Boost.SIMD * * the call operator has four template parameters: * * template * void operator()(E &&self, F const &other, utils::int_, utils::int_) * * ``E'' is the type of the object to which the data are copied * * ``F'' is the type of the object from which the data are copied * * ``N'' is the depth of the loop nest. When it reaches ``1'', we have a raw *loop * that may be vectorizable * * ``D'' is the delta between the number of dimensions of E && F. When set *to a * value greater than ``0'', some broadcasting is needed */ template struct _broadcast_copy; struct fast_novectorize { }; template <> struct _broadcast_copy { template void helper(E &&self, F const &other, SelfIndices &&self_indices, OtherIndices &&other_indices, utils::index_sequence) { std::forward(self) .store((typename std::decay::type::dtype)other.load( (long)std::get(other_indices)...), (long)std::get(self_indices)...); } template void operator()(E &&self, F const &other, SelfIndices &&self_indices, OtherIndices &&other_indices) { helper(std::forward(self), other, self_indices, other_indices, utils::make_index_sequence::type>::value>()); } }; template struct _broadcast_copy { template void operator()(E &&self, F const &other, SelfIndices &&self_indices, OtherIndices &&other_indices) { long const other_size = other.template shape::type::value - N>(); long const self_size = self.template shape::type::value - N>(); if (self_size == other_size) for (long i = 0; i < self_size; ++i) _broadcast_copy{}( std::forward(self), other, std::tuple_cat(self_indices, std::make_tuple(i)), std::tuple_cat(other_indices, std::make_tuple(i))); else for (long i = 0; i < self_size; ++i) _broadcast_copy{}( std::forward(self), other, std::tuple_cat(self_indices, std::make_tuple(i)), std::tuple_cat(other_indices, std::make_tuple(0))); } }; template struct _broadcast_copy { template void operator()(E &&self, F const &other, SelfIndices &&self_indices, OtherIndices &&other_indices) { using broadcaster = typename std::conditional< types::is_dtype::value, types::broadcast::type::dtype>, types::broadcasted>::type; _broadcast_copy{}( std::forward(self), broadcaster(other), std::forward(self_indices), std::forward(other_indices)); } }; template struct _broadcast_copy { template void operator()(E &&self, F const &other, Indices... indices) { long self_size = std::distance(self.begin(), self.end()), other_size = std::distance(other.begin(), other.end()); #ifdef _OPENMP if (other_size >= PYTHRAN_OPENMP_MIN_ITERATION_COUNT) { auto siter = self.begin(); auto oiter = other.begin(); #pragma omp parallel for for (long i = 0; i < other_size; ++i) *(siter + i) = *(oiter + i); } else #endif std::copy(other.begin(), other.end(), self.begin()); // eventually repeat the pattern #ifdef _OPENMP if (self_size >= PYTHRAN_OPENMP_MIN_ITERATION_COUNT * other_size) #pragma omp parallel for for (long i = other_size; i < self_size; i += other_size) std::copy_n(self.begin(), other_size, self.begin() + i); else #endif for (long i = other_size; i < self_size; i += other_size) std::copy_n(self.begin(), other_size, self.begin() + i); } }; // ``D'' is not ``0'' so we should broadcast template struct _broadcast_copy { template void operator()(E &&self, F const &other) { if (types::is_dtype::value) { std::fill(self.begin(), self.end(), other); } else { auto sfirst = self.begin(); *sfirst = other; #ifdef _OPENMP auto siter = sfirst; long n = self.template shape<0>(); if (n >= PYTHRAN_OPENMP_MIN_ITERATION_COUNT) #pragma omp parallel for for (long i = 1; i < n; ++i) *(siter + i) = *sfirst; else #endif std::fill(self.begin() + 1, self.end(), *sfirst); } } template void operator()(E &&self, F const &other, ES, FS) { if (types::is_dtype::value) { std::fill(self.begin(), self.end(), other); } else { auto sfirst = self.begin(); *sfirst = other; #ifdef _OPENMP auto siter = sfirst; long n = self.template shape<0>(); if (n >= PYTHRAN_OPENMP_MIN_ITERATION_COUNT) #pragma omp parallel for for (long i = 1; i < n; ++i) *(siter + i) = *sfirst; else #endif std::fill(self.begin() + 1, self.end(), *sfirst); } } }; #ifdef USE_XSIMD // specialize for SIMD only if available // otherwise use the std::copy fallback template void vbroadcast_copy(E &&self, F const &other) { using T = typename F::dtype; using vT = xsimd::simd_type; static const std::size_t vN = vT::size; long self_size = std::distance(self.begin(), self.end()), other_size = std::distance(other.begin(), other.end()); auto oiter = vectorizer::vbegin(other); const long bound = std::distance(vectorizer::vbegin(other), vectorizer::vend(other)); #ifdef _OPENMP if (bound >= PYTHRAN_OPENMP_MIN_ITERATION_COUNT) { auto iter = vectorizer::vbegin(self); #pragma omp parallel for for (long i = 0; i < bound; ++i) { (iter + i).store(*(oiter + i)); } } else #endif for (auto iter = vectorizer::vbegin(self), end = vectorizer::vend(self); iter != end; ++iter, ++oiter) { iter.store(*oiter); } // tail { auto siter = self.begin(); auto oiter = other.begin(); for (long i = bound * vN; i < other_size; ++i) *(siter + i) = *(oiter + i); } #ifdef _OPENMP if (self_size >= PYTHRAN_OPENMP_MIN_ITERATION_COUNT * other_size) #pragma omp parallel for for (long i = other_size; i < self_size; i += other_size) std::copy_n(self.begin(), other_size, self.begin() + i); else #endif for (long i = other_size; i < self_size; i += other_size) std::copy_n(self.begin(), other_size, self.begin() + i); } template <> struct _broadcast_copy { template void operator()(E &&self, F const &other) { return vbroadcast_copy(std::forward(self), other); } }; template <> struct _broadcast_copy { template void operator()(E &&self, F const &other) { return vbroadcast_copy( std::forward(self), other); } }; #endif template struct broadcast_copy_dispatcher; template struct broadcast_copy_dispatcher { void operator()(E &self, F const &other) { if (utils::no_broadcast_ex(other)) _broadcast_copy{}( self, other, std::make_tuple(), std::make_tuple()); else _broadcast_copy{}(self, other); } }; template struct broadcast_copy_dispatcher { void operator()(E &self, F const &other) { if (utils::no_broadcast_ex(other)) _broadcast_copy{}( self, other, std::make_tuple(), std::make_tuple()); else _broadcast_copy{}(self, other); } }; template E &broadcast_copy(E &self, F const &other) { if (self.size()) broadcast_copy_dispatcher{}(self, other); return self; } /* update */ // ``D'' is not ``0'' so we should broadcast template struct _broadcast_update { template void operator()(E &&self, F const &other) { long n = self.template shape<0>(); auto siter = self.begin(); #ifdef _OPENMP if (n >= PYTHRAN_OPENMP_MIN_ITERATION_COUNT) #pragma omp parallel for for (long i = 0; i < n; ++i) Op{}(*(siter + i), other); else #endif for (long i = 0; i < n; ++i) Op{}(*(siter + i), other); } }; template struct _broadcast_update { template void operator()(E &&self, F const &other) { long other_size = std::distance(other.begin(), other.end()); auto siter = self.begin(); auto oiter = other.begin(); #ifdef _OPENMP if (other_size >= PYTHRAN_OPENMP_MIN_ITERATION_COUNT) #pragma omp parallel for for (long i = 0; i < other_size; ++i) Op{}(*(siter + i), *(oiter + i)); else #endif if (other_size == 1) { auto value = *oiter; for (auto send = self.end(); siter != send; ++siter) Op{}(*siter, value); } else for (auto send = self.end(); siter != send;) { auto ooiter = oiter; for (long i = 0; i < other_size; ++i, ++siter, ++ooiter) Op{}(*siter, *ooiter); } } template void operator()(E &&self, types::broadcast const &other) { auto value = *other.begin(); for (auto siter = self.begin(), send = self.end(); siter != send; ++siter) Op{}(*siter, value); } template void operator()(E &&self, types::broadcasted const &other) { auto value = *other.end(); for (auto siter = self.begin(), send = self.end(); siter != send; ++siter) Op{}(*siter, value); } }; template struct _broadcast_update { template void helper(E &&self, F const &other, SelfIndices &&self_indices, OtherIndices &&other_indices, utils::index_sequence) { self.template update(other.load((long)std::get(other_indices)...), (long)std::get(self_indices)...); } template void operator()(E &&self, F const &other, SelfIndices &&self_indices, OtherIndices &&other_indices) { helper(std::forward(self), other, self_indices, other_indices, utils::make_index_sequence::type>::value>()); } }; template struct _broadcast_update { template void operator()(E &&self, F const &other, SelfIndices &&self_indices, OtherIndices &&other_indices) { auto const other_size = other.template shape::type::value - N>(); auto const self_size = self.template shape::type::value - N>(); if (self_size == other_size) for (long i = 0; i < self_size; ++i) _broadcast_update{}( std::forward(self), other, std::tuple_cat(self_indices, std::make_tuple(i)), std::tuple_cat(other_indices, std::make_tuple(i))); else for (long i = 0; i < self_size; ++i) _broadcast_update{}( std::forward(self), other, std::tuple_cat(self_indices, std::make_tuple(i)), std::tuple_cat(other_indices, std::make_tuple(0))); } }; template struct _broadcast_update { template void operator()(E &&self, F const &other, SelfIndices &&self_indices, OtherIndices &&other_indices) { using broadcaster = typename std::conditional< types::is_dtype::value, types::broadcast::type::dtype>, types::broadcasted>::type; _broadcast_update{}( std::forward(self), broadcaster(other), std::forward(self_indices), std::forward(other_indices)); } }; #ifdef USE_XSIMD // specialize for SIMD only if available // otherwise use the std::copy fallback template void vbroadcast_update(E &&self, F const &other) { using T = typename F::dtype; using vT = typename xsimd::simd_type; long other_size = std::distance(other.begin(), other.end()); static const std::size_t vN = vT::size; auto oiter = vectorizer::vbegin(other); auto iter = vectorizer::vbegin(self); const long bound = std::distance(vectorizer::vbegin(other), vectorizer::vend(other)); #ifdef _OPENMP if (bound >= PYTHRAN_OPENMP_MIN_ITERATION_COUNT) #pragma omp parallel for for (long i = 0; i < bound; i++) { (iter + i).store(Op{}(*(iter + i), *(oiter + i))); } else #endif for (auto end = vectorizer::vend(self); iter != end; ++iter, ++oiter) { iter.store(Op{}(*iter, *oiter)); } // tail { auto siter = self.begin(); auto oiter = other.begin(); for (long i = bound * vN; i < other_size; ++i) Op{}(*(siter + i), *(oiter + i)); } } template void vbroadcast_update(E &&self, types::broadcast const &other) { auto value = *other.begin(); for (auto siter = self.begin(), send = self.end(); siter != send; ++siter) Op{}(*siter, value); } template void vbroadcast_update(E &&self, types::broadcasted const &other) { auto value = *other.end(); for (auto siter = self.begin(), send = self.end(); siter != send; ++siter) Op{}(*siter, value); } template struct _broadcast_update { template void operator()(Args &&... args) { vbroadcast_update(std::forward(args)...); } }; template struct _broadcast_update { template void operator()(Args &&... args) { vbroadcast_update( std::forward(args)...); } }; #endif template struct broadcast_update_dispatcher; template struct broadcast_update_dispatcher { void operator()(E &self, F const &other) { if (utils::no_broadcast_ex(other)) _broadcast_update{}( self, other, std::make_tuple(), std::make_tuple()); else _broadcast_update{}(self, other); } }; template struct broadcast_update_dispatcher { void operator()(E &self, F const &other) { if (utils::no_broadcast_ex(other)) _broadcast_update{}( self, other, std::make_tuple(), std::make_tuple()); else _broadcast_update{}(self, other); } }; template E &broadcast_update(E &self, F const &other) { if (self.size()) broadcast_update_dispatcher{}(self, other); return self; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/utils/functor.hpp000066400000000000000000000001751416264035500226250ustar00rootroot00000000000000#ifndef PYTHONIC_UTILS_FUNCTOR_HPP #define PYTHONIC_UTILS_FUNCTOR_HPP #include "pythonic/include/utils/functor.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/utils/fwd.hpp000066400000000000000000000003641416264035500217250ustar00rootroot00000000000000#ifndef PYTHONIC_UTILS_FWD_HPP #define PYTHONIC_UTILS_FWD_HPP #include "pythonic/include/utils/fwd.hpp" PYTHONIC_NS_BEGIN namespace utils { template void fwd(Types const &... types) { } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/utils/int_.hpp000066400000000000000000000001621416264035500220720ustar00rootroot00000000000000#ifndef PYTHONIC_UTILS_INT_HPP #define PYTHONIC_UTILS_INT_HPP #include "pythonic/include/utils/int_.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/utils/iterator.hpp000066400000000000000000000016061416264035500227760ustar00rootroot00000000000000#ifndef PYTHONIC_UTILS_ITERATOR_HPP #define PYTHONIC_UTILS_ITERATOR_HPP #include "pythonic/include/utils/iterator.hpp" PYTHONIC_NS_BEGIN namespace utils { template comparable_iterator::comparable_iterator() : T() { } template comparable_iterator::comparable_iterator(T const &t) : T(t) { } template bool comparable_iterator::operator<(comparable_iterator other) { return (*this) != other; } template iterator_reminder::iterator_reminder(T const &v) : values(v) { } template iterator_reminder::iterator_reminder(T const &v) : values(v) { } template iterator_reminder::iterator_reminder( T const &v, Others const &... others) : values(v, others...) { } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/utils/meta.hpp000066400000000000000000000006721416264035500220750ustar00rootroot00000000000000#ifndef PYTHONIC_UTILS_META_HPP #define PYTHONIC_UTILS_META_HPP #include "pythonic/include/utils/meta.hpp" template struct static_assert_check { static_assert(C, "Assertion failed "); static constexpr bool value = C; }; #define pythran_static_assert(value, str, ...) \ static_assert(static_assert_check::value, str) #endif pythran-0.10.0+ds2/pythran/pythonic/utils/nested_container.hpp000066400000000000000000000015731416264035500244740ustar00rootroot00000000000000#ifndef PYTHONIC_UTILS_NESTED_CONTAINER_HPP #define PYTHONIC_UTILS_NESTED_CONTAINER_HPP #include "pythonic/include/utils/nested_container.hpp" #include #include "pythonic/types/traits.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace utils { template long nested_container_size::flat_size(T const &t) { return t.size() * nested_container_size::value, bool, typename Type::value_type>::type>::flat_size(*t.begin()); } /* Recursion stops on bool */ template constexpr long nested_container_size::flat_size(F) { return 1; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/utils/neutral.hpp000066400000000000000000000006151416264035500226160ustar00rootroot00000000000000#ifndef PYTHONIC_UTILS_NEUTRAL_HPP #define PYTHONIC_UTILS_NEUTRAL_HPP #include "pythonic/include/utils/neutral.hpp" #include "pythonic/operator_/iadd.hpp" #include "pythonic/operator_/iand.hpp" #include "pythonic/operator_/ior.hpp" #include "pythonic/operator_/imul.hpp" #include "pythonic/operator_/imax.hpp" #include "pythonic/operator_/imin.hpp" #include "pythonic/operator_/ixor.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/utils/numpy_conversion.hpp000066400000000000000000000035071416264035500245640ustar00rootroot00000000000000#ifndef PYTHONIC_UTILS_NUMPY_CONVERSION_HPP #define PYTHONIC_UTILS_NUMPY_CONVERSION_HPP #include "pythonic/include/utils/numpy_conversion.hpp" #include "pythonic/utils/numpy_traits.hpp" #if _MSC_VER && !__clang__ #define NUMPY_EXPR_TO_NDARRAY0_IMPL(fname) \ template ::value && \ types::is_array::value, \ E>::type * = nullptr> \ auto fname(E const &expr, Types &&... others) \ { \ return fname(types::ndarray{expr}, \ std::forward(others)...); \ } #else #define NUMPY_EXPR_TO_NDARRAY0_IMPL(fname) \ template \ auto fname(E const &expr, Types &&... others) \ ->typename std::enable_if< \ !types::is_ndarray::value && types::is_array::value, \ decltype(fname( \ types::ndarray{expr}, \ std::forward(others)...))>::type \ { \ return fname(types::ndarray{expr}, \ std::forward(others)...); \ } #endif #endif pythran-0.10.0+ds2/pythran/pythonic/utils/numpy_traits.hpp000066400000000000000000000002141416264035500236750ustar00rootroot00000000000000#ifndef PYTHONIC_UTILS_NUMPY_TRAITS_HPP #define PYTHONIC_UTILS_NUMPY_TRAITS_HPP #include "pythonic/include/utils/numpy_traits.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/utils/pdqsort.hpp000066400000000000000000000521671416264035500226510ustar00rootroot00000000000000/* pdqsort.hpp - Pattern-defeating quicksort. Copyright (c) 2015 Orson Peters This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #ifndef PDQSORT_HPP #define PDQSORT_HPP #include #include #include #include #include #if __cplusplus >= 201103L #include #include #define PDQSORT_PREFER_MOVE(x) std::move(x) #else #define PDQSORT_PREFER_MOVE(x) (x) #endif namespace pdqsort_detail { enum { // Partitions below this size are sorted using insertion sort. insertion_sort_threshold = 24, // Partitions above this size use Tukey's ninther to select the pivot. ninther_threshold = 128, // When we detect an already sorted partition, attempt an insertion sort // that allows this // amount of element moves before giving up. partial_insertion_sort_limit = 8, // Must be multiple of 8 due to loop unrolling, and < 256 to fit in unsigned // char. block_size = 64, // Cacheline size, assumes power of two. cacheline_size = 64 }; #if __cplusplus >= 201103L template struct is_default_compare : std::false_type { }; template struct is_default_compare> : std::true_type { }; template struct is_default_compare> : std::true_type { }; #endif // Returns floor(log2(n)), assumes n > 0. template inline int log2(T n) { int log = 0; while (n >>= 1) ++log; return log; } // Sorts [begin, end) using insertion sort with the given comparison function. template inline void insertion_sort(Iter begin, Iter end, Compare comp) { typedef typename std::iterator_traits::value_type T; if (begin == end) return; for (Iter cur = begin + 1; cur != end; ++cur) { Iter sift = cur; Iter sift_1 = cur - 1; // Compare first so we can avoid 2 moves for an element already positioned // correctly. if (comp(*sift, *sift_1)) { T tmp = PDQSORT_PREFER_MOVE(*sift); do { *sift-- = PDQSORT_PREFER_MOVE(*sift_1); } while (sift != begin && comp(tmp, *--sift_1)); *sift = PDQSORT_PREFER_MOVE(tmp); } } } // Sorts [begin, end) using insertion sort with the given comparison function. // Assumes // *(begin - 1) is an element smaller than or equal to any element in [begin, // end). template inline void unguarded_insertion_sort(Iter begin, Iter end, Compare comp) { typedef typename std::iterator_traits::value_type T; if (begin == end) return; for (Iter cur = begin + 1; cur != end; ++cur) { Iter sift = cur; Iter sift_1 = cur - 1; // Compare first so we can avoid 2 moves for an element already positioned // correctly. if (comp(*sift, *sift_1)) { T tmp = PDQSORT_PREFER_MOVE(*sift); do { *sift-- = PDQSORT_PREFER_MOVE(*sift_1); } while (comp(tmp, *--sift_1)); *sift = PDQSORT_PREFER_MOVE(tmp); } } } // Attempts to use insertion sort on [begin, end). Will return false if more // than // partial_insertion_sort_limit elements were moved, and abort sorting. // Otherwise it will // successfully sort and return true. template inline bool partial_insertion_sort(Iter begin, Iter end, Compare comp) { typedef typename std::iterator_traits::value_type T; if (begin == end) return true; std::size_t limit = 0; for (Iter cur = begin + 1; cur != end; ++cur) { Iter sift = cur; Iter sift_1 = cur - 1; // Compare first so we can avoid 2 moves for an element already positioned // correctly. if (comp(*sift, *sift_1)) { T tmp = PDQSORT_PREFER_MOVE(*sift); do { *sift-- = PDQSORT_PREFER_MOVE(*sift_1); } while (sift != begin && comp(tmp, *--sift_1)); *sift = PDQSORT_PREFER_MOVE(tmp); limit += cur - sift; } if (limit > partial_insertion_sort_limit) return false; } return true; } template inline void sort2(Iter a, Iter b, Compare comp) { if (comp(*b, *a)) std::iter_swap(a, b); } // Sorts the elements *a, *b and *c using comparison function comp. template inline void sort3(Iter a, Iter b, Iter c, Compare comp) { sort2(a, b, comp); sort2(b, c, comp); sort2(a, b, comp); } template inline T *align_cacheline(T *p) { #if defined(UINTPTR_MAX) && __cplusplus >= 201103L std::uintptr_t ip = reinterpret_cast(p); #else std::size_t ip = reinterpret_cast(p); #endif ip = (ip + cacheline_size - 1) & -cacheline_size; return reinterpret_cast(ip); } template inline void swap_offsets(Iter first, Iter last, unsigned char *offsets_l, unsigned char *offsets_r, int num, bool use_swaps) { typedef typename std::iterator_traits::value_type T; if (use_swaps) { // This case is needed for the descending distribution, where we need // to have proper swapping for pdqsort to remain O(n). for (int i = 0; i < num; ++i) { std::iter_swap(first + offsets_l[i], last - offsets_r[i]); } } else if (num > 0) { Iter l = first + offsets_l[0]; Iter r = last - offsets_r[0]; T tmp(PDQSORT_PREFER_MOVE(*l)); *l = PDQSORT_PREFER_MOVE(*r); for (int i = 1; i < num; ++i) { l = first + offsets_l[i]; *r = PDQSORT_PREFER_MOVE(*l); r = last - offsets_r[i]; *l = PDQSORT_PREFER_MOVE(*r); } *r = PDQSORT_PREFER_MOVE(tmp); } } // Partitions [begin, end) around pivot *begin using comparison function comp. // Elements equal // to the pivot are put in the right-hand partition. Returns the position of // the pivot after // partitioning and whether the passed sequence already was correctly // partitioned. Assumes the // pivot is a median of at least 3 elements and that [begin, end) is at least // insertion_sort_threshold long. Uses branchless partitioning. template inline std::pair partition_right_branchless(Iter begin, Iter end, Compare comp) { typedef typename std::iterator_traits::value_type T; // Move pivot into local for speed. T pivot(PDQSORT_PREFER_MOVE(*begin)); Iter first = begin; Iter last = end; // Find the first element greater than or equal than the pivot (the median // of 3 guarantees // this exists). while (comp(*++first, pivot)) ; // Find the first element strictly smaller than the pivot. We have to guard // this search if // there was no element before *first. if (first - 1 == begin) while (first < last && !comp(*--last, pivot)) ; else while (!comp(*--last, pivot)) ; // If the first pair of elements that should be swapped to partition are the // same element, // the passed in sequence already was correctly partitioned. bool already_partitioned = first >= last; if (!already_partitioned) { std::iter_swap(first, last); ++first; } // The following branchless partitioning is derived from "BlockQuicksort: // How Branch // Mispredictions don’t affect Quicksort" by Stefan Edelkamp and Armin // Weiss. unsigned char offsets_l_storage[block_size + cacheline_size]; unsigned char offsets_r_storage[block_size + cacheline_size]; unsigned char *offsets_l = align_cacheline(offsets_l_storage); unsigned char *offsets_r = align_cacheline(offsets_r_storage); int num_l, num_r, start_l, start_r; num_l = num_r = start_l = start_r = 0; while (last - first > 2 * block_size) { // Fill up offset blocks with elements that are on the wrong side. if (num_l == 0) { start_l = 0; Iter it = first; for (unsigned char i = 0; i < block_size;) { offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it; offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it; offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it; offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it; offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it; offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it; offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it; offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it; } } if (num_r == 0) { start_r = 0; Iter it = last; for (unsigned char i = 0; i < block_size;) { offsets_r[num_r] = ++i; num_r += comp(*--it, pivot); offsets_r[num_r] = ++i; num_r += comp(*--it, pivot); offsets_r[num_r] = ++i; num_r += comp(*--it, pivot); offsets_r[num_r] = ++i; num_r += comp(*--it, pivot); offsets_r[num_r] = ++i; num_r += comp(*--it, pivot); offsets_r[num_r] = ++i; num_r += comp(*--it, pivot); offsets_r[num_r] = ++i; num_r += comp(*--it, pivot); offsets_r[num_r] = ++i; num_r += comp(*--it, pivot); } } // Swap elements and update block sizes and first/last boundaries. int num = std::min(num_l, num_r); swap_offsets(first, last, offsets_l + start_l, offsets_r + start_r, num, num_l == num_r); num_l -= num; num_r -= num; start_l += num; start_r += num; if (num_l == 0) first += block_size; if (num_r == 0) last -= block_size; } int l_size = 0, r_size = 0; int unknown_left = (int)(last - first) - ((num_r || num_l) ? block_size : 0); if (num_r) { // Handle leftover block by assigning the unknown elements to the other // block. l_size = unknown_left; r_size = block_size; } else if (num_l) { l_size = block_size; r_size = unknown_left; } else { // No leftover block, split the unknown elements in two blocks. l_size = unknown_left / 2; r_size = unknown_left - l_size; } // Fill offset buffers if needed. if (unknown_left && !num_l) { start_l = 0; Iter it = first; for (unsigned char i = 0; i < l_size;) { offsets_l[num_l] = i++; num_l += !comp(*it, pivot); ++it; } } if (unknown_left && !num_r) { start_r = 0; Iter it = last; for (unsigned char i = 0; i < r_size;) { offsets_r[num_r] = ++i; num_r += comp(*--it, pivot); } } int num = std::min(num_l, num_r); swap_offsets(first, last, offsets_l + start_l, offsets_r + start_r, num, num_l == num_r); num_l -= num; num_r -= num; start_l += num; start_r += num; if (num_l == 0) first += l_size; if (num_r == 0) last -= r_size; // We have now fully identified [first, last)'s proper position. Swap the // last elements. if (num_l) { offsets_l += start_l; while (num_l--) std::iter_swap(first + offsets_l[num_l], --last); first = last; } if (num_r) { offsets_r += start_r; while (num_r--) std::iter_swap(last - offsets_r[num_r], first), ++first; last = first; } // Put the pivot in the right place. Iter pivot_pos = first - 1; *begin = PDQSORT_PREFER_MOVE(*pivot_pos); *pivot_pos = PDQSORT_PREFER_MOVE(pivot); return std::make_pair(pivot_pos, already_partitioned); } // Partitions [begin, end) around pivot *begin using comparison function comp. // Elements equal // to the pivot are put in the right-hand partition. Returns the position of // the pivot after // partitioning and whether the passed sequence already was correctly // partitioned. Assumes the // pivot is a median of at least 3 elements and that [begin, end) is at least // insertion_sort_threshold long. template inline std::pair partition_right(Iter begin, Iter end, Compare comp) { typedef typename std::iterator_traits::value_type T; // Move pivot into local for speed. T pivot(PDQSORT_PREFER_MOVE(*begin)); Iter first = begin; Iter last = end; // Find the first element greater than or equal than the pivot (the median // of 3 guarantees // this exists). while (comp(*++first, pivot)) ; // Find the first element strictly smaller than the pivot. We have to guard // this search if // there was no element before *first. if (first - 1 == begin) while (first < last && !comp(*--last, pivot)) ; else while (!comp(*--last, pivot)) ; // If the first pair of elements that should be swapped to partition are the // same element, // the passed in sequence already was correctly partitioned. bool already_partitioned = first >= last; // Keep swapping pairs of elements that are on the wrong side of the pivot. // Previously // swapped pairs guard the searches, which is why the first iteration is // special-cased // above. while (first < last) { std::iter_swap(first, last); while (comp(*++first, pivot)) ; while (!comp(*--last, pivot)) ; } // Put the pivot in the right place. Iter pivot_pos = first - 1; *begin = PDQSORT_PREFER_MOVE(*pivot_pos); *pivot_pos = PDQSORT_PREFER_MOVE(pivot); return std::make_pair(pivot_pos, already_partitioned); } // Similar function to the one above, except elements equal to the pivot are // put to the left of // the pivot and it doesn't check or return if the passed sequence already was // partitioned. // Since this is rarely used (the many equal case), and in that case pdqsort // already has O(n) // performance, no block quicksort is applied here for simplicity. template inline Iter partition_left(Iter begin, Iter end, Compare comp) { typedef typename std::iterator_traits::value_type T; T pivot(PDQSORT_PREFER_MOVE(*begin)); Iter first = begin; Iter last = end; while (comp(pivot, *--last)) ; if (last + 1 == end) while (first < last && !comp(pivot, *++first)) ; else while (!comp(pivot, *++first)) ; while (first < last) { std::iter_swap(first, last); while (comp(pivot, *--last)) ; while (!comp(pivot, *++first)) ; } Iter pivot_pos = last; *begin = PDQSORT_PREFER_MOVE(*pivot_pos); *pivot_pos = PDQSORT_PREFER_MOVE(pivot); return pivot_pos; } template inline void pdqsort_loop(Iter begin, Iter end, Compare comp, int bad_allowed, bool leftmost = true) { typedef typename std::iterator_traits::difference_type diff_t; // Use a while loop for tail recursion elimination. while (true) { diff_t size = end - begin; // Insertion sort is faster for small arrays. if (size < insertion_sort_threshold) { if (leftmost) insertion_sort(begin, end, comp); else unguarded_insertion_sort(begin, end, comp); return; } // Choose pivot as median of 3 or pseudomedian of 9. diff_t s2 = size / 2; if (size > ninther_threshold) { sort3(begin, begin + s2, end - 1, comp); sort3(begin + 1, begin + (s2 - 1), end - 2, comp); sort3(begin + 2, begin + (s2 + 1), end - 3, comp); sort3(begin + (s2 - 1), begin + s2, begin + (s2 + 1), comp); std::iter_swap(begin, begin + s2); } else sort3(begin + s2, begin, end - 1, comp); // If *(begin - 1) is the end of the right partition of a previous // partition operation // there is no element in [begin, end) that is smaller than *(begin - 1). // Then if our // pivot compares equal to *(begin - 1) we change strategy, putting equal // elements in // the left partition, greater elements in the right partition. We do not // have to // recurse on the left partition, since it's sorted (all equal). if (!leftmost && !comp(*(begin - 1), *begin)) { begin = partition_left(begin, end, comp) + 1; continue; } // Partition and get results. std::pair part_result = Branchless ? partition_right_branchless(begin, end, comp) : partition_right(begin, end, comp); Iter pivot_pos = part_result.first; bool already_partitioned = part_result.second; // Check for a highly unbalanced partition. diff_t l_size = pivot_pos - begin; diff_t r_size = end - (pivot_pos + 1); bool highly_unbalanced = l_size < size / 8 || r_size < size / 8; // If we got a highly unbalanced partition we shuffle elements to break // many patterns. if (highly_unbalanced) { // If we had too many bad partitions, switch to heapsort to guarantee // O(n log n). if (--bad_allowed == 0) { std::make_heap(begin, end, comp); std::sort_heap(begin, end, comp); return; } if (l_size >= insertion_sort_threshold) { std::iter_swap(begin, begin + l_size / 4); std::iter_swap(pivot_pos - 1, pivot_pos - l_size / 4); if (l_size > ninther_threshold) { std::iter_swap(begin + 1, begin + (l_size / 4 + 1)); std::iter_swap(begin + 2, begin + (l_size / 4 + 2)); std::iter_swap(pivot_pos - 2, pivot_pos - (l_size / 4 + 1)); std::iter_swap(pivot_pos - 3, pivot_pos - (l_size / 4 + 2)); } } if (r_size >= insertion_sort_threshold) { std::iter_swap(pivot_pos + 1, pivot_pos + (1 + r_size / 4)); std::iter_swap(end - 1, end - r_size / 4); if (r_size > ninther_threshold) { std::iter_swap(pivot_pos + 2, pivot_pos + (2 + r_size / 4)); std::iter_swap(pivot_pos + 3, pivot_pos + (3 + r_size / 4)); std::iter_swap(end - 2, end - (1 + r_size / 4)); std::iter_swap(end - 3, end - (2 + r_size / 4)); } } } else { // If we were decently balanced and we tried to sort an already // partitioned // sequence try to use insertion sort. if (already_partitioned && partial_insertion_sort(begin, pivot_pos, comp) && partial_insertion_sort(pivot_pos + 1, end, comp)) return; } // Sort the left partition first using recursion and do tail recursion // elimination for // the right-hand partition. pdqsort_loop(begin, pivot_pos, comp, bad_allowed, leftmost); begin = pivot_pos + 1; leftmost = false; } } } template inline void pdqsort(Iter begin, Iter end, Compare comp) { if (begin == end) return; #if __cplusplus >= 201103L pdqsort_detail::pdqsort_loop< Iter, Compare, pdqsort_detail::is_default_compare< typename std::decay::type>::value && std::is_arithmetic::value_type>::value>( begin, end, comp, pdqsort_detail::log2(end - begin)); #else pdqsort_detail::pdqsort_loop( begin, end, comp, pdqsort_detail::log2(end - begin)); #endif } template inline void pdqsort(Iter begin, Iter end) { typedef typename std::iterator_traits::value_type T; pdqsort(begin, end, std::less()); } template inline void pdqsort_branchless(Iter begin, Iter end, Compare comp) { if (begin == end) return; pdqsort_detail::pdqsort_loop( begin, end, comp, pdqsort_detail::log2(end - begin)); } template inline void pdqsort_branchless(Iter begin, Iter end) { typedef typename std::iterator_traits::value_type T; pdqsort_branchless(begin, end, std::less()); } #undef PDQSORT_PREFER_MOVE #endif pythran-0.10.0+ds2/pythran/pythonic/utils/reserve.hpp000066400000000000000000000004551416264035500226210ustar00rootroot00000000000000#ifndef PYTHONIC_UTILS_RESERVE_HPP #define PYTHONIC_UTILS_RESERVE_HPP #include "pythonic/include/utils/reserve.hpp" PYTHONIC_NS_BEGIN namespace utils { template void reserve(Container &, From &&) // do nothing unless specialized { } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/utils/seq.hpp000066400000000000000000000001611416264035500217300ustar00rootroot00000000000000#ifndef PYTHONIC_UTILS_SEQ_HPP #define PYTHONIC_UTILS_SEQ_HPP #include "pythonic/include/utils/seq.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/utils/shared_ref.hpp000066400000000000000000000053411416264035500232470ustar00rootroot00000000000000#ifndef PYTHONIC_UTILS_SHARED_REF_HPP #define PYTHONIC_UTILS_SHARED_REF_HPP #include "pythonic/include/utils/shared_ref.hpp" #include #include #include #ifdef _OPENMP #include #endif PYTHONIC_NS_BEGIN namespace utils { /** Light-weight shared_ptr like-class * * Unlike std::shared_ptr, it allocates the memory itself using new. */ template template shared_ref::memory::memory(Types &&... args) : ptr(std::forward(args)...), count(1), foreign(nullptr) { } template shared_ref::shared_ref(no_memory const &) noexcept : mem(nullptr) { } template shared_ref::shared_ref(no_memory &&) noexcept : mem(nullptr) { } template template shared_ref::shared_ref(Types &&... args) : mem(new (std::nothrow) memory(std::forward(args)...)) { } template shared_ref::shared_ref(shared_ref &&p) noexcept : mem(p.mem) { p.mem = nullptr; } template shared_ref::shared_ref(shared_ref const &p) noexcept : mem(p.mem) { if (mem) acquire(); } template shared_ref::shared_ref(shared_ref &p) noexcept : mem(p.mem) { if (mem) acquire(); } template shared_ref::~shared_ref() noexcept { dispose(); } template void shared_ref::swap(shared_ref &rhs) noexcept { using std::swap; swap(mem, rhs.mem); } template shared_ref &shared_ref::operator=(shared_ref p) noexcept { swap(p); return *this; } template T &shared_ref::operator*() const noexcept { assert(mem); return mem->ptr; } template T *shared_ref::operator->() const noexcept { assert(mem); return &mem->ptr; } template bool shared_ref::operator!=(shared_ref const &other) const noexcept { return mem != other.mem; } template bool shared_ref::operator==(shared_ref const &other) const noexcept { return mem == other.mem; } template void shared_ref::external(extern_type obj_ptr) { assert(mem); mem->foreign = obj_ptr; } template inline extern_type shared_ref::get_foreign() { assert(mem); return mem->foreign; } template void shared_ref::dispose() { if (mem && --mem->count == 0) { #ifdef ENABLE_PYTHON_MODULE if (mem->foreign) { Py_DECREF(mem->foreign); } #endif delete mem; mem = nullptr; } } template void shared_ref::acquire() { assert(mem); ++mem->count; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/utils/tags.hpp000066400000000000000000000002311416264035500220740ustar00rootroot00000000000000#ifndef PYTHONIC_UTILS_TAGS_HPP #define PYTHONIC_UTILS_TAGS_HPP #include "pythonic/include/utils/tags.hpp" #include "pythonic/types/traits.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/utils/yield.hpp000066400000000000000000000010021416264035500222410ustar00rootroot00000000000000#ifndef PYTHRAN_UTILS_YIELD_HPP #define PYTHRAN_UTILS_YIELD_HPP #include "pythonic/include/utils/yield.hpp" /* * This contains base class for yielders */ #include "pythonic/types/generator.hpp" PYTHONIC_NS_BEGIN yielder::yielder() : __generator_state(0) { } bool yielder::operator!=(yielder const &other) const { return __generator_state != other.__generator_state; } bool yielder::operator==(yielder const &other) const { return __generator_state == other.__generator_state; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythran-darwin.cfg000066400000000000000000000002611416264035500210630ustar00rootroot00000000000000[compiler] defines= undefs= include_dirs= libs= library_dirs= cflags=-std=c++11 -fno-math-errno -Wno-unused-function ldflags= blas=blas CC= CXX= ignoreflags=-Wstrict-prototypes pythran-0.10.0+ds2/pythran/pythran-default.cfg000066400000000000000000000002141416264035500212210ustar00rootroot00000000000000[compiler] defines= undefs= include_dirs= libs= library_dirs= cflags=-std=c++11 ldflags= blas=blas CC= CXX= ignoreflags=-Wstrict-prototypes pythran-0.10.0+ds2/pythran/pythran-linux.cfg000066400000000000000000000004471416264035500207440ustar00rootroot00000000000000[compiler] defines= undefs= include_dirs= libs= library_dirs= cflags=-std=c++11 -fno-math-errno -fvisibility=hidden -fno-wrapv -Wno-unused-function -Wno-int-in-bool-context -Wno-unknown-warning-option ldflags=-fvisibility=hidden -Wl,-strip-all blas=blas CC= CXX= ignoreflags=-Wstrict-prototypes pythran-0.10.0+ds2/pythran/pythran-linux2.cfg000066400000000000000000000004471416264035500210260ustar00rootroot00000000000000[compiler] defines= undefs= include_dirs= libs= library_dirs= cflags=-std=c++11 -fno-math-errno -fvisibility=hidden -fno-wrapv -Wno-unused-function -Wno-int-in-bool-context -Wno-unknown-warning-option ldflags=-fvisibility=hidden -Wl,-strip-all blas=blas CC= CXX= ignoreflags=-Wstrict-prototypes pythran-0.10.0+ds2/pythran/pythran-win32.cfg000066400000000000000000000002401416264035500205360ustar00rootroot00000000000000[compiler] defines= undefs= include_dirs= libs= library_dirs= cflags=/std:c++14 /w ldflags= blas=pythran-openblas CC=clang-cl.exe CXX=clang-cl.exe ignoreflags= pythran-0.10.0+ds2/pythran/pythran.cfg000066400000000000000000000031631416264035500176050ustar00rootroot00000000000000[pythran] # optimization chain used by Pythran # It's a list of space separated optimization to apply in the given order optimizations = pythran.optimizations.InlineBuiltins pythran.optimizations.Inlining pythran.optimizations.RemoveDeadFunctions pythran.optimizations.ForwardSubstitution pythran.optimizations.DeadCodeElimination pythran.optimizations.ConstantFolding pythran.optimizations.PartialConstantFolding pythran.optimizations.SimplifyExcept pythran.optimizations.IterTransformation pythran.optimizations.LoopFullUnrolling pythran.optimizations.ModIndex pythran.transformations.FalsePolymorphism pythran.optimizations.PatternTransform pythran.optimizations.Square pythran.optimizations.RangeLoopUnfolding pythran.optimizations.RangeBasedSimplify pythran.optimizations.ListToTuple pythran.optimizations.TupleToShape complex_hook = False [typing] # maximum number of combiner per user function # increasing this value inreases typing accuracy # but slows down compilation time, to the point of making g++ crash max_combiner = 2 # set this to true to enable a costly yet more accurate type inference algorithm # This algorithms generates code difficult to compile for g++, but not clang++ enable_two_steps_typing = False # above this number of overloads, pythran specifications are considered invalid # as it generates ultra-large binaries max_export_overloads = 128 pythran-0.10.0+ds2/pythran/run.py000066400000000000000000000173501416264035500166200ustar00rootroot00000000000000#!/usr/bin/env python """ Script to run Pythran file compilation with specified g++ like flags. """ import argparse import logging import os import sys import pythran from distutils.errors import CompileError logger = logging.getLogger("pythran") def convert_arg_line_to_args(arg_line): """Read argument from file in a prettier way.""" for arg in arg_line.split(): if not arg.strip(): continue yield arg def compile_flags(args): """ Build a dictionnary with an entry for cppflags, ldflags, and cxxflags. These options are filled according to the command line defined options """ compiler_options = { 'define_macros': args.defines, 'undef_macros': args.undefs, 'include_dirs': args.include_dirs, 'extra_compile_args': args.extra_flags, 'library_dirs': args.libraries_dir, 'extra_link_args': args.extra_flags, 'config': args.config, } for param in ('opts', ): val = getattr(args, param, None) if val: compiler_options[param] = val return compiler_options def run(): prefix_chars = "-" if os.name == "nt": prefix_chars += "/" parser = argparse.ArgumentParser(prog='pythran', description='pythran: a python to C++ ' 'compiler', epilog="It's a megablast!", prefix_chars=prefix_chars, fromfile_prefix_chars="@") parser.add_argument('input_file', type=str, help='the pythran module to compile, ' 'either a .py or a .cpp file') parser.add_argument('-o', dest='output_file', type=str, help='path to generated file. Honors %%{ext}.') parser.add_argument('-P', dest='optimize_only', action='store_true', help='only run the high-level optimizer, ' 'do not compile') parser.add_argument('-E', dest='translate_only', action='store_true', help='only run the translator, do not compile') parser.add_argument('-e', dest='raw_translate_only', action='store_true', help='similar to -E, ' 'but does not generate python glue') parser.add_argument('-v', dest='verbose', action='store_true', help='be more verbose') parser.add_argument('-w', dest='warn_off', action='store_true', help='be less verbose') parser.add_argument('-V', '--version', action='version', version=pythran.version.__version__) parser.add_argument('-p', dest='opts', metavar='pass', action='append', help='any pythran optimization to apply before code ' 'generation', default=list()) parser.add_argument('-I', dest='include_dirs', metavar='include_dir', action='append', help='any include dir relevant to the underlying C++ ' 'compiler', default=list()) parser.add_argument('-L', dest='libraries_dir', metavar='ldflags', action='append', help='any search dir relevant to the linker', default=list()) parser.add_argument('-D', dest='defines', metavar='macro_definition', action='append', help='any macro definition relevant to ' 'the underlying C++ compiler', default=list()) parser.add_argument('-U', dest='undefs', metavar='macro_definition', action='append', help='any macro undef relevant to ' 'the underlying C++ compiler', default=list()) parser.add_argument('--config', dest='config', metavar='config', action='append', help='config additional params', default=list()) parser.convert_arg_line_to_args = convert_arg_line_to_args args, extra = parser.parse_known_args(sys.argv[1:]) args.extra_flags = extra if args.raw_translate_only: args.translate_only = True args.undefs.append('ENABLE_PYTHON_MODULE') if args.verbose and args.warn_off: logger.critical("Unexpected combination: -w and -v? Daoubennek?") sys.exit(1) if args.verbose: logger.setLevel(logging.INFO) if args.warn_off: logger.setLevel(logging.ERROR) if args.config: pythran.config.update_cfg(pythran.config.cfg, args.config) if args.verbose and not args.warn_off: pythran.config.lint_cfg(pythran.config.cfg) try: if not os.path.exists(args.input_file): raise ValueError("input file `{0}' not found".format( args.input_file)) module_name, ext = os.path.splitext(os.path.basename(args.input_file)) # FIXME: do we want to support other ext than .cpp? if ext not in ['.cpp', '.py']: raise SyntaxError("Unsupported file extension: '{0}'".format(ext)) if ext == '.cpp': if args.optimize_only: raise ValueError("Do you really ask for Python-to-Python " "on this C++ input file: '{0}'?".format( args.input_file)) if args.translate_only: raise ValueError("Do you really ask for Python-to-C++ " "on this C++ input file: '{0}'?".format( args.input_file)) pythran.compile_cxxfile(module_name, args.input_file, args.output_file, **compile_flags(args)) else: # assume we have a .py input file here pythran.compile_pythranfile(args.input_file, output_file=args.output_file, cpponly=args.translate_only, pyonly=args.optimize_only, **compile_flags(args)) except IOError as e: logger.critical("I've got a bad feeling about this...\n" "E: " + str(e)) sys.exit(1) except ValueError as e: logger.critical("Chair to keyboard interface error\n" "E: " + str(e)) sys.exit(1) except pythran.types.tog.PythranTypeError as e: logger.critical("You shall not pass!\n" "E: " + str(e)) sys.exit(1) except pythran.syntax.PythranSyntaxError as e: logger.critical("I am in trouble. Your input file does not seem " "to match Pythran's constraints...\n" + str(e)) sys.exit(1) except CompileError as e: logger.critical("Cover me Jack. Jack? Jaaaaack!!!!\n" "E: " + str(e)) sys.exit(1) except NotImplementedError: logger.critical("MAYDAY, MAYDAY, MAYDAY; pythran compiler; " "code area out of control\n" "E: not implemented feature needed, " "bash the developers") raise # Why ? we may instead display the stacktrace and exit? except EnvironmentError as e: logger.critical("By Jove! Your environment does not seem " "to provide all what we need\n" "E: " + str(e)) sys.exit(1) if __name__ == '__main__': run() pythran-0.10.0+ds2/pythran/spec.py000066400000000000000000000431001416264035500167360ustar00rootroot00000000000000''' This module provides a dummy parser for pythran annotations. * spec_parser reads the specs from a python module and returns them. ''' from pythran.types.conversion import pytype_to_pretty_type from collections import defaultdict from itertools import product import re import ply.lex as lex import ply.yacc as yacc from pythran.typing import List, Set, Dict, NDArray, Tuple, Pointer, Fun from pythran.syntax import PythranSyntaxError from pythran.config import cfg def ambiguous_types(ty0, ty1): from numpy import complex64, complex128 from numpy import float32, float64 from numpy import int8, int16, int32, int64, intp, intc from numpy import uint8, uint16, uint32, uint64, uintp, uintc try: from numpy import complex256, float128 except ImportError: complex256 = complex128 float128 = float64 if isinstance(ty0, tuple): if len(ty0) != len(ty1): return False return all(ambiguous_types(t0, t1) for t0, t1 in zip(ty0, ty1)) ambiguous_float_types = float, float64 if ty0 in ambiguous_float_types and ty1 in ambiguous_float_types: return True ambiguous_cplx_types = complex, complex128 if ty0 in ambiguous_cplx_types and ty1 in ambiguous_cplx_types: return True ambiguous_int_types = int64, int if ty0 in ambiguous_int_types and ty1 in ambiguous_int_types: return True if type(ty0) is not type(ty1): return False if not hasattr(ty0, '__args__'): return False if type(ty0) is NDArray: # no ambiguity for dtype return ambiguous_types(ty0.__args__[1:], ty1.__args__[1:]) else: return ambiguous_types(ty0.__args__, ty1.__args__) def istransposed(t): if not isinstance(t, NDArray): return False if len(t.__args__) - 1 != 2: return False return t.__args__[1] == t.__args__[2] == slice(-1, None, None) def istransposable(t): if not isinstance(t, NDArray): return False if len(t.__args__) - 1 != 2: return False return all(s.step == 1 for s in t.__args__[1:]) class Spec(object): ''' Result of spec parsing. ``functions'' is a mapping from function name to a tuple of signatures ``capsule'' is a mapping from function name to signature ''' def __init__(self, functions, capsules=None): self.functions = dict(functions) self.capsules = capsules or dict() # normalize function signatures for fname, signatures in functions.items(): if not isinstance(signatures, tuple): self.functions[fname] = (signatures,) if not self: import logging logging.warning("No pythran specification, " "nothing will be exported") def keys(self): return list(self.functions.keys()) + list(self.capsules.keys()) def __bool__(self): return bool(self.functions or self.capsules) __nonzero__ = __bool__ def to_docstrings(self, docstrings): for func_name, signatures in self.functions.items(): sigdocs = signatures_to_string(func_name, signatures) docstring_prototypes = 'Supported prototypes:\n{}'.format(sigdocs) docstring_py = docstrings.get(func_name, '') if not docstring_py: docstring = docstring_prototypes else: parts = docstring_py.split('\n\n', 1) docstring = parts[0] + '\n\n ' + docstring_prototypes if len(parts) == 2: docstring += '\n\n' + parts[1] docstrings[func_name] = docstring class SpecParser(object): """ A parser that scans a file lurking for lines such as the one below. It then generates a pythran-compatible signature to inject into compile. #pythran export a((float,(int, uint8),str list) list list) #pythran export a(str) #pythran export a( (str,str), int, int16 list list) #pythran export a( {str} ) """ # lex part dtypes = { 'bool': 'BOOL', 'byte': 'BYTE', 'complex': 'COMPLEX', 'int': 'INT', 'float': 'FLOAT', 'uint8': 'UINT8', 'uint16': 'UINT16', 'uint32': 'UINT32', 'uint64': 'UINT64', 'uintc': 'UINTC', 'uintp': 'UINTP', 'int8': 'INT8', 'int16': 'INT16', 'int32': 'INT32', 'int64': 'INT64', 'intc': 'INTC', 'intp': 'INTP', 'float32': 'FLOAT32', 'float64': 'FLOAT64', 'float128': 'FLOAT128', 'complex64': 'COMPLEX64', 'complex128': 'COMPLEX128', 'complex256': 'COMPLEX256', } reserved = { '#pythran': 'PYTHRAN', 'export': 'EXPORT', 'order': 'ORDER', 'capsule': 'CAPSULE', 'or': 'OR', 'list': 'LIST', 'set': 'SET', 'dict': 'DICT', 'slice': 'SLICE', 'str': 'STR', 'None': 'NONE', } reserved.update(dtypes) tokens = ('IDENTIFIER', 'NUM', 'COLUMN', 'LPAREN', 'RPAREN', 'CRAP', 'OPT', 'LARRAY', 'RARRAY', 'STAR', 'COMMA') + tuple(reserved.values()) crap = [tok for tok in tokens if tok != 'PYTHRAN'] some_crap = [tok for tok in crap if tok not in ('LPAREN', 'COMMA')] # token <> regexp binding t_CRAP = r'[^,:\(\)\[\]*?0-9]' t_COMMA = r',' t_COLUMN = r':' t_LPAREN = r'\(' t_RPAREN = r'\)' t_RARRAY = r'\]' t_LARRAY = r'\[' t_STAR = r'\*' t_OPT = r'\?' t_NUM = r'[1-9][0-9]*' precedence = ( ('left', 'OR'), ('left', 'LIST', 'DICT', 'SET'), ) def t_IDENTIFER(self, t): t.type = SpecParser.reserved.get(t.value, 'IDENTIFIER') return t t_IDENTIFER.__doc__ = r'\#?[a-zA-Z_][a-zA-Z_0-9]*' # skipped characters t_ignore = ' \t\n\r' # error handling def t_error(self, t): t.lexer.skip(1) # yacc part def p_exports(self, p): if len(p) > 1: isnative = len(p) == 6 target = self.exports if len(p) == 6 else self.native_exports for key, val in p[len(p)-3]: target[key] += val p_exports.__doc__ = '''exports : | PYTHRAN EXPORT export_list opt_craps exports | PYTHRAN EXPORT CAPSULE export_list opt_craps exports''' def p_export_list(self, p): p[0] = (p[1],) if len(p) == 2 else (p[1] + (p[3],)) p_export_list.__doc__ = '''export_list : export | export_list COMMA export''' def p_export(self, p): # unlikely case: the IDENTIFIER is an otherwise reserved name if len(p) > 2: sigs = p[3] or ((),) else: sigs = () p[0] = p[1], sigs self.export_info[p[1]] += p.lexpos(1), p_export.__doc__ = '''export : IDENTIFIER LPAREN opt_param_types RPAREN | IDENTIFIER | EXPORT LPAREN opt_param_types RPAREN | ORDER LPAREN opt_param_types RPAREN''' def p_opt_craps(self, p): pass p_opt_craps.__doc__ = '''opt_craps : | some_crap opt_all_craps''' def p_opt_all_craps(self, p): pass p_opt_all_craps.__doc__ = '''opt_all_craps : | crap opt_all_craps''' def p_crap(self, p): pass p_crap.__doc__ = 'crap : ' + '\n| '.join(crap) def p_some_crap(self, p): pass p_some_crap.__doc__ = 'some_crap : ' + '\n| '.join(some_crap) def p_dtype(self, p): import numpy p[0] = eval(p[1], numpy.__dict__), p_dtype.__doc__ = 'dtype : ' + '\n| '.join(dtypes.values()) def p_opt_param_types(self, p): p[0] = p[1] if len(p) == 2 else tuple() p_opt_param_types.__doc__ = '''opt_param_types : | param_types''' def p_opt_types(self, p): p[0] = p[1] if len(p) == 2 else tuple() p_opt_types.__doc__ = '''opt_types : | types''' def p_param_types(self, p): if len(p) == 2: p[0] = tuple((t,) for t in p[1]) elif len(p) == 3: p[0] = tuple((t,) for t in p[1]) + ((),) elif len(p) == 4: p[0] = tuple((t,) + ts for t in p[1] for ts in p[3]) else: p[0] = tuple((t,) + ts for t in p[1] for ts in p[4]) + ((),) p_param_types.__doc__ = '''param_types : type | type OPT | type COMMA param_types | type OPT COMMA default_types''' def p_default_types(self, p): if len(p) == 3: p[0] = tuple((t,) for t in p[1]) + ((),) else: p[0] = tuple((t,) + ts for t in p[1] for ts in p[4]) + ((),) p_default_types.__doc__ = '''default_types : type OPT | type OPT COMMA default_types''' def p_types(self, p): if len(p) == 2: p[0] = tuple((t,) for t in p[1]) else: p[0] = tuple((t,) + ts for t in p[1] for ts in p[3]) p_types.__doc__ = '''types : type | type COMMA types''' def p_array_type(self, p): if len(p) == 2: p[0] = p[1][0], elif len(p) == 5 and p[4] == ']': def args(t): return t.__args__ if isinstance(t, NDArray) else (t,) p[0] = tuple(NDArray[args(t) + p[3]] for t in p[1]) p_array_type.__doc__ = '''array_type : dtype | array_type LARRAY array_indices RARRAY''' def p_type(self, p): if len(p) == 2: p[0] = p[1], elif len(p) == 3 and p[2] == 'list': p[0] = tuple(List[t] for t in p[1]) elif len(p) == 3 and p[2] == 'set': p[0] = tuple(Set[t] for t in p[1]) elif len(p) == 3: if p[2] is None: expanded = [] for nd in p[1]: expanded.append(nd) if istransposable(nd): expanded.append(NDArray[nd.__args__[0], -1::, -1::]) p[0] = tuple(expanded) elif p[2] == "F": for nd in p[1]: if not istransposable(nd): msg = ("Invalid Pythran spec. F order is only valid " "for 2D plain arrays") self.p_error(p, msg) p[0] = tuple(NDArray[nd.__args__[0], -1::, -1::] for nd in p[1]) else: p[0] = p[1] elif len(p) == 5 and p[4] == ')': p[0] = tuple(Fun[args, r] for r in p[1] for args in (product(*p[3]) if len(p[3]) > 1 else p[3])) elif len(p) == 5: p[0] = tuple(Dict[k, v] for k in p[1] for v in p[3]) elif len(p) == 4 and p[2] == 'or': p[0] = p[1] + p[3] elif len(p) == 4 and p[3] == ')': p[0] = tuple(Tuple[t] for t in p[2]) elif len(p) == 4 and p[3] == ']': p[0] = p[2] else: msg = "Invalid Pythran spec. Unknown text '{0}'".format(p.value) self.p_error(p, msg) p_type.__doc__ = '''type : term | array_type opt_order | pointer_type | type LIST | type SET | type LPAREN opt_types RPAREN | type COLUMN type DICT | LPAREN types RPAREN | LARRAY type RARRAY | type OR type ''' def p_opt_order(self, p): if len(p) > 1: if p[3] not in 'CF': msg = "Invalid Pythran spec. Unknown order '{}'".format(p[3]) self.p_error(p, msg) p[0] = p[3] else: p[0] = None p_opt_order.__doc__ = '''opt_order : | ORDER LPAREN IDENTIFIER RPAREN''' def p_pointer_type(self, p): p[0] = Pointer[p[1][0]] p_pointer_type.__doc__ = '''pointer_type : dtype STAR''' def p_array_indices(self, p): if len(p) == 2: p[0] = p[1], else: p[0] = (p[1],) + p[3] p_array_indices.__doc__ = '''array_indices : array_index | array_index COMMA array_indices''' def p_array_index(self, p): if len(p) == 3: p[0] = slice(0, -1, -1) elif len(p) == 1 or p[1] == ':': p[0] = slice(0, -1, 1) else: p[0] = slice(0, int(p[1]), 1) p_array_index.__doc__ = '''array_index : | NUM | COLUMN | COLUMN COLUMN''' def p_term(self, p): if p[1] == 'str': p[0] = str elif p[1] == 'slice': p[0] = slice elif p[1] == 'None': p[0] = type(None) else: p[0] = p[1][0] p_term.__doc__ = '''term : STR | NONE | SLICE | dtype''' def PythranSpecError(self, msg, lexpos=None): err = PythranSyntaxError(msg) if lexpos is not None: line_start = self.input_text.rfind('\n', 0, lexpos) + 1 err.offset = lexpos - line_start err.lineno = 1 + self.input_text.count('\n', 0, lexpos) if self.input_file: err.filename = self.input_file return err def p_error(self, p): if p.type == 'IDENTIFIER': raise self.PythranSpecError( "Unexpected identifier `{}` at that point".format(p.value), p.lexpos) else: raise self.PythranSpecError( "Unexpected token `{}` at that point".format(p.value), p.lexpos) def __init__(self): self.lexer = lex.lex(module=self, debug=False) # Do not write the table for better compatibility across ply version self.parser = yacc.yacc(module=self, debug=False, write_tables=False) def __call__(self, text, input_file=None): self.exports = defaultdict(tuple) self.native_exports = defaultdict(tuple) self.export_info = defaultdict(tuple) self.input_text = text self.input_file = input_file lines = [] in_pythran_export = False for line in text.split("\n"): if re.match(r'\s*#\s*pythran', line): in_pythran_export = True lines.append(re.sub(r'\s*#\s*pythran', '#pythran', line)) elif in_pythran_export: stripped = line.strip() if stripped.startswith('#'): lines.append(line.replace('#', '')) else: in_pythran_export = not stripped lines.append('') else: in_pythran_export &= not line.strip() lines.append('') pythran_data = '\n'.join(lines) self.parser.parse(pythran_data, lexer=self.lexer, debug=False) for key, overloads in self.native_exports.items(): if len(overloads) > 1: msg = "Overloads not supported for capsule '{}'".format(key) loc = self.export_info[key][-1] raise self.PythranSpecError(msg, loc) self.native_exports[key] = overloads[0] for key, overloads in self.exports.items(): if len(overloads) > cfg.getint("typing", "max_export_overloads"): raise self.PythranSpecError( "Too many overloads for function '{}', probably due to " "automatic generation of C-style and Fortran-style memory " "layout. Please force a layout using `order(C)` or " "`order(F)` in the array signature".format(key)) for i, ty_i in enumerate(overloads): sty_i = spec_to_string(key, ty_i) for ty_j in overloads[i+1:]: sty_j = spec_to_string(key, ty_j) if sty_i == sty_j: msg = "Duplicate export entry {}.".format(sty_i) loc = self.export_info[key][-1] raise self.PythranSpecError(msg, loc) if ambiguous_types(ty_i, ty_j): msg = "Ambiguous overloads\n\t{}\n\t{}.".format(sty_i, sty_j) loc = self.export_info[key][i] raise self.PythranSpecError(msg, loc) return Spec(self.exports, self.native_exports) class ExtraSpecParser(SpecParser): ''' Extension of SpecParser that works on extra .pythran files ''' def __call__(self, text, input_file=None): # make the code looks like a regular pythran file text = re.sub(r'^\s*export', '#pythran export', text, flags=re.MULTILINE) return super(ExtraSpecParser, self).__call__(text, input_file) def spec_to_string(function_name, spec): arguments_types = [pytype_to_pretty_type(t) for t in spec] return '{}({})'.format(function_name, ', '.join(arguments_types)) def signatures_to_string(func_name, signatures): # filter out transposed version, they are confusing for some users # and can generate very long docstring that break MSVC sigdocs = [spec_to_string(func_name, sig) for sig in signatures if not any(istransposed(t) for t in sig)] if not sigdocs: sigdocs = [spec_to_string(func_name, sig) for sig in signatures] return ''.join('\n - ' + sigdoc for sigdoc in sigdocs) def spec_parser(text): return SpecParser()(text) def load_specfile(filepath): with open(filepath) as fd: return ExtraSpecParser()(fd.read(), input_file=filepath) pythran-0.10.0+ds2/pythran/syntax.py000066400000000000000000000237601416264035500173440ustar00rootroot00000000000000''' This module performs a few early syntax check on the input AST. It checks the conformance of the input code to Pythran specific constraints. ''' from pythran.tables import MODULES from pythran.intrinsic import Class from pythran.typing import Tuple, List, Set, Dict from pythran.utils import isstr import gast as ast import logging import numpy as np logger = logging.getLogger('pythran') class PythranSyntaxError(SyntaxError): def __init__(self, msg, node=None): SyntaxError.__init__(self, msg) if node: self.filename = getattr(node, 'filename', None) self.lineno = node.lineno self.offset = node.col_offset def __str__(self): if self.filename and self.lineno and self.offset: with open(self.filename) as f: for i in range(self.lineno - 1): f.readline() # and drop it extra = '{}\n{}'.format(f.readline().rstrip(), " " * (self.offset) + "^~~~ (o_0)") else: extra = None r = "{}:{}:{} error: {}\n".format(self.filename or "", self.lineno, self.offset, self.args[0]) if extra is not None: r += "----\n" r += extra r += "\n----\n" return r class SyntaxChecker(ast.NodeVisitor): """ Visit an AST and raise a PythranSyntaxError upon unsupported construct. Attributes ---------- attributes : {str} Possible attributes from Pythonic modules/submodules. """ def __init__(self): """ Gather attributes from MODULES content. """ self.attributes = set() def save_attribute(module): """ Recursively save Pythonic keywords as possible attributes. """ self.attributes.update(module.keys()) for signature in module.values(): if isinstance(signature, dict): save_attribute(signature) elif isinstance(signature, Class): save_attribute(signature.fields) for module in MODULES.values(): save_attribute(module) def visit_Module(self, node): err = ("Top level statements can only be assignments, strings," "functions, comments, or imports") WhiteList = ast.FunctionDef, ast.Import, ast.ImportFrom, ast.Assign for n in node.body: if isinstance(n, ast.Expr) and isstr(n.value): continue if isinstance(n, WhiteList): continue raise PythranSyntaxError(err, n) self.generic_visit(node) def visit_Interactive(self, node): raise PythranSyntaxError("Interactive session not supported", node) def visit_Expression(self, node): raise PythranSyntaxError("Interactive expressions not supported", node) def visit_Suite(self, node): raise PythranSyntaxError( "Suites are specific to Jython and not supported", node) def visit_ClassDef(self, _): raise PythranSyntaxError("Classes not supported") def visit_Print(self, node): self.generic_visit(node) if node.dest: raise PythranSyntaxError( "Printing to a specific stream not supported", node.dest) def visit_With(self, node): raise PythranSyntaxError("With statements not supported", node) def visit_Starred(self, node): raise PythranSyntaxError("Call with star arguments not supported", node) def visit_keyword(self, node): if node.arg is None: raise PythranSyntaxError("Call with kwargs not supported", node) def visit_Call(self, node): self.generic_visit(node) def visit_Constant(self, node): if node.value is Ellipsis: if hasattr(node, 'lineno'): args = [node] else: args = [] raise PythranSyntaxError("Ellipsis are not supported", *args) iinfo = np.iinfo(int) if isinstance(node.value, int) and not (iinfo.min <= node.value <= iinfo.max): raise PythranSyntaxError("large int not supported", node) def visit_FunctionDef(self, node): if node.decorator_list: raise PythranSyntaxError("decorators not supported", node) if node.args.vararg: raise PythranSyntaxError("Varargs not supported", node) if node.args.kwarg: raise PythranSyntaxError("Keyword arguments not supported", node) self.generic_visit(node) def visit_Raise(self, node): self.generic_visit(node) if node.cause: raise PythranSyntaxError( "Cause in raise statements not supported", node) def visit_Attribute(self, node): self.generic_visit(node) if node.attr not in self.attributes: raise PythranSyntaxError( "Attribute '{0}' unknown".format(node.attr), node) def visit_NamedExpr(self, node): raise PythranSyntaxError( "named expression are not supported yet, please open an issue :-)", node) def visit_Import(self, node): """ Check if imported module exists in MODULES. """ for alias in node.names: current_module = MODULES # Recursive check for submodules for path in alias.name.split('.'): if path not in current_module: raise PythranSyntaxError( "Module '{0}' unknown.".format(alias.name), node) else: current_module = current_module[path] def visit_ImportFrom(self, node): """ Check validity of imported functions. Check: - no level specific value are provided. - a module is provided - module/submodule exists in MODULES - imported function exists in the given module/submodule """ if node.level: raise PythranSyntaxError("Relative import not supported", node) if not node.module: raise PythranSyntaxError("import from without module", node) module = node.module current_module = MODULES # Check if module exists for path in module.split('.'): if path not in current_module: raise PythranSyntaxError( "Module '{0}' unknown.".format(module), node) else: current_module = current_module[path] # Check if imported functions exist for alias in node.names: if alias.name == '*': continue elif alias.name not in current_module: raise PythranSyntaxError( "identifier '{0}' not found in module '{1}'".format( alias.name, module), node) def visit_Exec(self, node): raise PythranSyntaxError("'exec' statements are not supported", node) def visit_Global(self, node): raise PythranSyntaxError("'global' statements are not supported", node) def check_syntax(node): '''Does nothing but raising PythranSyntaxError when needed''' SyntaxChecker().visit(node) def check_specs(specs, types): ''' Does nothing but raising PythranSyntaxError if specs are incompatible with the actual code ''' from pythran.types.tog import unify, clone, tr from pythran.types.tog import Function, TypeVariable, InferenceError for fname, signatures in specs.functions.items(): ftype = types[fname] for signature in signatures: sig_type = Function([tr(p) for p in signature], TypeVariable()) try: unify(clone(sig_type), clone(ftype)) except InferenceError: raise PythranSyntaxError( "Specification for `{}` does not match inferred type:\n" "expected `{}`\n" "got `Callable[[{}], ...]`".format( fname, ftype, ", ".join(map(str, sig_type.types[:-1]))) ) def check_exports(pm, mod, specs): ''' Does nothing but raising PythranSyntaxError if specs references an undefined global ''' from pythran.analyses.argument_effects import ArgumentEffects mod_functions = {node.name: node for node in mod.body if isinstance(node, ast.FunctionDef)} argument_effects = pm.gather(ArgumentEffects, mod) for fname, signatures in specs.functions.items(): try: fnode = mod_functions[fname] except KeyError: raise PythranSyntaxError( "Invalid spec: exporting undefined function `{}`" .format(fname)) ae = argument_effects[fnode] for signature in signatures: args_count = len(fnode.args.args) if len(signature) > args_count: raise PythranSyntaxError( "Too many arguments when exporting `{}`" .format(fname)) elif len(signature) < args_count - len(fnode.args.defaults): raise PythranSyntaxError( "Not enough arguments when exporting `{}`" .format(fname)) for i, ty in enumerate(signature): if ae[i] and isinstance(ty, (List, Tuple, Dict, Set)): logger.warning( ("Exporting function '{}' that modifies its {} " "argument. Beware that this argument won't be " "modified at Python call site").format( fname, ty.__class__.__qualname__), ) pythran-0.10.0+ds2/pythran/tables.py000066400000000000000000006237521416264035500172770ustar00rootroot00000000000000""" This modules provides the translation tables from python to c++. """ import gast as ast import inspect import logging import numpy import sys from pythran.typing import Dict, Set, List, TypeVar, Union, Optional, NDArray from pythran.typing import Generator, Fun, Tuple, Iterable, Sized, File from pythran.conversion import to_ast, ToNotEval from pythran.intrinsic import Class from pythran.intrinsic import ClassWithConstConstructor, ExceptionClass from pythran.intrinsic import ClassWithReadOnceConstructor from pythran.intrinsic import ConstFunctionIntr, FunctionIntr, UpdateEffect from pythran.intrinsic import ConstMethodIntr, MethodIntr, AttributeIntr from pythran.intrinsic import ReadEffect, ConstantIntr, UFunc from pythran.intrinsic import ReadOnceMethodIntr from pythran.intrinsic import ReadOnceFunctionIntr, ConstExceptionIntr from pythran import interval from functools import reduce logger = logging.getLogger("pythran") pythran_ward = '__pythran_' namespace = "pythonic" cxx_keywords = { 'and', 'and_eq', 'asm', 'auto', 'bitand', 'bitor', 'bool', 'break', 'case', 'catch', 'char', 'class', 'compl', 'const', 'const_cast', 'continue', 'default', 'delete', 'do', 'double', 'dynamic_cast', 'else', 'enum', 'explicit', 'export', 'extern', 'false', 'float', 'for', 'friend', 'goto', 'if', 'inline', 'int', 'long', 'mutable', 'namespace', 'new', 'not', 'not_eq', 'operator', 'or', 'or_eq', 'private', 'protected', 'public', 'register', 'reinterpret_cast', 'return', 'short', 'signed', 'sizeof', 'static', 'static_cast', 'struct', 'switch', 'template', 'this', 'throw', 'true', 'try', 'typedef', 'typeid', 'typename', 'union', 'unsigned', 'using', 'virtual', 'void', 'volatile', 'wchar_t', 'while', 'xor', 'xor_eq', # C++11 additions 'constexpr', 'decltype', 'noexcept', 'nullptr', 'static_assert', # reserved namespaces 'std', } def make_lazy(exp): return '[&] () {{ return {0}; }}'.format(exp) def make_and(x, y): lx, ly = make_lazy(x), make_lazy(y) return 'pythonic::builtins::pythran::and_({0}, {1})'.format(lx, ly) def make_or(x, y): lx, ly = make_lazy(x), make_lazy(y) return 'pythonic::builtins::pythran::or_({0}, {1})'.format(lx, ly) operator_to_lambda = { # boolop ast.And: make_and, ast.Or: make_or, # operator ast.Add: "pythonic::operator_::add({0}, {1})".format, ast.Sub: "pythonic::operator_::sub({0}, {1})".format, ast.Mult: "pythonic::operator_::mul({0}, {1})".format, ast.Div: "pythonic::operator_::div({0}, {1})".format, ast.Mod: "pythonic::operator_::mod({0}, {1})".format, ast.Pow: "pythonic::builtins::pow({0}, {1})".format, ast.LShift: "pythonic::operator_::lshift({0}, {1})".format, ast.RShift: "pythonic::operator_::rshift({0}, {1})".format, ast.BitOr: "pythonic::operator_::or_({0}, {1})".format, ast.BitXor: "pythonic::operator_::xor_({0}, {1})".format, ast.BitAnd: "pythonic::operator_::and_({0}, {1})".format, ast.MatMult: "pythonic::operator_::functor::matmul()({0}, {1})".format, ast.FloorDiv: "pythonic::operator_::functor::floordiv()({0}, {1})".format, # unaryop ast.Invert: "pythonic::operator_::invert({0})".format, ast.Not: "pythonic::operator_::not_({0})".format, ast.UAdd: "pythonic::operator_::pos({0})".format, ast.USub: "pythonic::operator_::neg({0})".format, # cmpop ast.Eq: "pythonic::operator_::eq({0}, {1})".format, ast.NotEq: "pythonic::operator_::ne({0}, {1})".format, ast.Lt: "pythonic::operator_::lt({0}, {1})".format, ast.LtE: "pythonic::operator_::le({0}, {1})".format, ast.Gt: "pythonic::operator_::gt({0}, {1})".format, ast.GtE: "pythonic::operator_::ge({0}, {1})".format, ast.Is: "pythonic::operator_::is_({0}, {1})".format, ast.IsNot: ("pythonic::operator_::is_not({0}, {1})").format, ast.In: "pythonic::operator_::contains({1}, {0})".format, ast.NotIn: "(!pythonic::operator_::contains({1}, {0}))".format, } update_operator_to_lambda = { # operator ast.Add: "({0} += {1})".format, ast.Sub: "({0} -= {1})".format, ast.Mult: "({0} *= {1})".format, ast.Div: "(pythonic::operator_::idiv({0}, {1}))".format, ast.Mod: "(pythonic::operator_::imod({0}, {1}))".format, ast.Pow: "(pythonic::operator_::ipow({0}, {1}))".format, ast.LShift: "({0} <<= {1})".format, ast.RShift: "({0} >>= {1})".format, ast.BitOr: "({0} |= {1})".format, ast.BitXor: "({0} ^= {1})".format, ast.BitAnd: "({0} &= {1})".format, ast.MatMult: "(pythonic::operator_::imatmul({0}, {1}))".format, ast.FloorDiv: "(pythonic::operator_::functor::ifloordiv{{}}({0}, {1}))".format, } T0, T1, T2, T3 = TypeVar('T0'), TypeVar('T1'), TypeVar('T2'), TypeVar('T3') T4, T5, T6, T7 = TypeVar('T4'), TypeVar('T5'), TypeVar('T6'), TypeVar('T7') _bool_signature = Union[ Fun[[], bool], Fun[[T0], bool] ] _int_signature = Union[ Fun[[], int], Fun[[bool], int], Fun[[int], int], Fun[[float], int], Fun[[str], int], ] _float_signature = Union[ Fun[[], float], Fun[[str], float], Fun[[float], float], ] _complex_signature = Union[ Fun[[float], complex], Fun[[float, float], complex], ] # workaround changes in numpy interaction with getfullargspec try: inspect.getfullargspec(numpy.asarray) # if we have a description, honor it extra_numpy_asarray_descr = {} except TypeError: extra_numpy_asarray_descr = {'args':('a', 'dtype'), 'defaults': (None,)} def update_effects(self, node): """ Combiner when we update the first argument of a function. It turn type of first parameter in combination of all others parameters types. """ return [self.combine(node.args[0], node_args_k, register=True, aliasing_type=True) for node_args_k in node.args[1:]] BINARY_UFUNC = {"accumulate": FunctionIntr()} REDUCED_BINARY_UFUNC = {"accumulate": FunctionIntr(), "reduce": ConstFunctionIntr()} CLASSES = { "dtype": { "type": MethodIntr(), }, "list": { "append": MethodIntr(signature=Fun[[List[T0], T0], None]), "extend": MethodIntr(update_effects), "pop": MethodIntr( signature=Union[ Fun[[List[T0]], T0], Fun[[List[T0], int], T0], ], ), "reverse": MethodIntr(signature=Fun[[List[T0]], None]), "sort": MethodIntr( args=("self", "key",), ), "count": ConstMethodIntr(signature=Fun[[List[T0], T0], int]), "remove": MethodIntr(signature=Fun[[List[T0], T0], None]), "insert": MethodIntr(signature=Fun[[List[T0], int, T0], None]), }, "slice": { "start": AttributeIntr(signature=Fun[[T0], int]), "stop": AttributeIntr(signature=Fun[[T0], int]), "step": AttributeIntr(signature=Fun[[T0], int]), }, "str": { "__mod__": ConstMethodIntr( signature=Union[ Fun[[str, T0], str], Fun[[str, T0, T1], str], Fun[[str, T0, T1, T2], str], Fun[[str, T0, T1, T2, T3, T4], str], Fun[[str, T0, T1, T2, T3, T4, T5], str], Fun[[str, T0, T1, T2, T3, T4, T5, T6], str], ], ), "capitalize": ConstMethodIntr(signature=Fun[[str], str]), "count": ConstMethodIntr(signature=Union[ Fun[[str, str], int], Fun[[str, str, int], int], Fun[[str, str, int, int], int], ]), "endswith": ConstMethodIntr( signature=Union[ Fun[[str, str], bool], Fun[[str, str, Optional[int]], bool], Fun[[str, str, Optional[int], Optional[int]], bool], ], ), "startswith": ConstMethodIntr( signature=Union[ Fun[[str, str], bool], Fun[[str, str, Optional[int]], bool], Fun[[str, str, Optional[int], Optional[int]], bool], ], ), "find": ConstMethodIntr( signature=Union[ Fun[[str, str], int], Fun[[str, str, Optional[int]], int], Fun[[str, str, Optional[int], Optional[int]], int], ], ), "isalpha": ConstMethodIntr(signature=Fun[[str], bool]), "isdigit": ConstMethodIntr(signature=Fun[[str], bool]), "join": ConstMethodIntr(signature=Fun[[str, Iterable[str]], str]), "lower": ConstMethodIntr(signature=Fun[[str], str]), "replace": ConstMethodIntr( signature=Union[ Fun[[str, str, str], str], Fun[[str, str, str, int], str], ] ), "split": ConstMethodIntr( signature=Union[ Fun[[str], List[str]], Fun[[str, str], List[str]], Fun[[str, None], List[str]], Fun[[str, str, int], List[str]], Fun[[str, None, int], List[str]], ] ), "strip": ConstMethodIntr( signature=Union[ Fun[[str], str], Fun[[str, str], str], ] ), "lstrip": ConstMethodIntr( signature=Union[ Fun[[str], str], Fun[[str, str], str], ] ), "rstrip": ConstMethodIntr( signature=Union[ Fun[[str], str], Fun[[str, str], str], ] ), "upper": ConstMethodIntr( signature=Fun[[str], str] ), }, "set": { "add": MethodIntr(signature=Fun[[Set[T0], T0], None]), "clear": MethodIntr(signature=Fun[[Set[T0]], None]), "copy": ConstMethodIntr(signature=Fun[[Set[T0]], Iterable[T0]]), "discard": MethodIntr(signature=Fun[[Set[T0], T0], None]), "remove": MethodIntr(signature=Fun[[Set[T0], T0], None]), "isdisjoint": ConstMethodIntr( signature=Fun[[Set[T0], Set[T0]], bool]), "union": ConstMethodIntr( signature=Union[ Fun[[Set[T0], Iterable[T0]], Set[T0]], Fun[[Set[T0], Iterable[T0], Iterable[T0]], Set[T0]], Fun[[Set[T0], Iterable[T0], Iterable[T0], Iterable[T0]], Set[T0]], ] ), "update": MethodIntr(update_effects), "intersection": ConstMethodIntr( signature=Union[ Fun[[Set[T0], Iterable[T0]], Set[T0]], Fun[[Set[T0], Iterable[T0], Iterable[T0]], Set[T0]], Fun[[Set[T0], Iterable[T0], Iterable[T0], Iterable[T0]], Set[T0]], ] ), "intersection_update": MethodIntr(update_effects), "difference": ConstMethodIntr( signature=Union[ Fun[[Set[T0], Iterable[T0]], Set[T0]], Fun[[Set[T0], Iterable[T0], Iterable[T0]], Set[T0]], Fun[[Set[T0], Iterable[T0], Iterable[T0], Iterable[T0]], Set[T0]], ] ), "difference_update": MethodIntr(update_effects), "symmetric_difference": ConstMethodIntr( signature=Union[ Fun[[Set[T0], Iterable[T0]], Set[T0]], Fun[[Set[T0], Iterable[T0], Iterable[T0]], Set[T0]], Fun[[Set[T0], Iterable[T0], Iterable[T0], Iterable[T0]], Set[T0]], ] ), "symmetric_difference_update": MethodIntr(update_effects), "issuperset": ConstMethodIntr( signature=Fun[[Set[T0], Set[T0]], bool]), "issubset": ConstMethodIntr( signature=Fun[[Set[T0], Set[T0]], bool]), }, "Exception": { "args": AttributeIntr(signature=Fun[[T0], str]), "errno": AttributeIntr(signature=Fun[[T0], str]), "strerror": AttributeIntr(signature=Fun[[T0], str]), "filename": AttributeIntr(signature=Fun[[T0], str]), }, "float": { "is_integer": ConstMethodIntr(signature=Fun[[float], bool]), }, "complex": { "conjugate": ConstMethodIntr(), "real": AttributeIntr( signature=Union[ Fun[[complex], float], Fun[[NDArray[complex, :]], NDArray[float, :]], Fun[[NDArray[complex, :, :]], NDArray[float, :, :]], Fun[[NDArray[complex, :, :, :]], NDArray[float, :, :, :]], Fun[[NDArray[complex, :, :, :, :]], NDArray[float, :, :, :, :]], ] ), "imag": AttributeIntr( signature=Union[ Fun[[complex], float], Fun[[NDArray[complex, :]], NDArray[float, :]], Fun[[NDArray[complex, :, :]], NDArray[float, :, :]], Fun[[NDArray[complex, :, :, :]], NDArray[float, :, :, :]], Fun[[NDArray[complex, :, :, :, :]], NDArray[float, :, :, :, :]], ] ), }, "dict": { "fromkeys": ConstFunctionIntr( signature=Union[ Fun[[Iterable[T0]], Dict[T0, Optional[T1]]], Fun[[Iterable[T0], T1], Dict[T0, T1]], ], ), "clear": MethodIntr(signature=Fun[[Dict[T0, T1]], None]), "copy": ConstMethodIntr( signature=Fun[[Dict[T0, T1]], Dict[T0, T1]]), "get": ConstMethodIntr( signature=Union[ Fun[[Dict[T0, T1], T0], Optional[T1]], Fun[[Dict[T0, T1], T0, T1], T1], ], ), "items": MethodIntr( signature=Fun[[Dict[T0, T1]], List[Tuple[T0, T1]]]), "keys": MethodIntr(signature=Fun[[Dict[T0, T1]], List[T0]]), "pop": MethodIntr( signature=Union[ Fun[[Dict[T0, T1], T0], T1], Fun[[Dict[T0, T1], T0, T1], T1], ] ), "popitem": MethodIntr( signature=Fun[[Dict[T0, T1]], Tuple[T0, T1]]), "setdefault": MethodIntr( signature=Union[ Fun[[Dict[T0, T1], T0, T1], T1], Fun[[Dict[T0, T1], T0], T1] ], return_alias=lambda args: { ast.Subscript(args[0], args[1], ast.Load()) }.union({args[2]} if len(args) == 3 else set()) ), "update": MethodIntr(update_effects), "values": MethodIntr(signature=Fun[[Dict[T0, T1]], List[T1]]), }, "file": { # Member variables "closed": AttributeIntr(signature=Fun[[File], bool]), "mode": AttributeIntr(signature=Fun[[File], str]), "name": AttributeIntr(signature=Fun[[File], str]), "newlines": AttributeIntr(signature=Fun[[File], str]), # Member functions "close": MethodIntr( signature=Fun[[File], None], global_effects=True ), "flush": MethodIntr( signature=Fun[[File], None], global_effects=True ), "fileno": MethodIntr( signature=Fun[[File], int], ), "isatty": MethodIntr(signature=Fun[[File], bool]), "next": MethodIntr(global_effects=True), "read": MethodIntr( signature=Union[ Fun[[File], str], Fun[[File, int], str], ], global_effects=True ), "readline": MethodIntr( signature=Union[ Fun[[File], str], Fun[[File, int], str], ], global_effects=True ), "readlines": MethodIntr( signature=Union[ Fun[[File], List[str]], Fun[[File, int], List[str]], ], global_effects=True ), "seek": MethodIntr( signature=Union[ Fun[[File, int], None], Fun[[File, int, int], None], ], global_effects=True ), "tell": MethodIntr(signature=Fun[[File], int]), "truncate": MethodIntr( signature=Union[ Fun[[File], None], Fun[[File, int], None], ], global_effects=True ), "write": MethodIntr( signature=Fun[[File, str], None], global_effects=True ), "writelines": MethodIntr( signature=Fun[[File, Iterable[str]], None], global_effects=True ), }, "finfo": { "eps": AttributeIntr(signature=float), }, "ndarray": { "astype": MethodIntr( signature=Union[ # dtype = bool Fun[[NDArray[bool, :], _bool_signature], NDArray[bool, :]], Fun[[NDArray[int, :], _bool_signature], NDArray[bool, :]], Fun[[NDArray[float, :], _bool_signature], NDArray[bool, :]], Fun[[NDArray[complex, :], _bool_signature], NDArray[bool, :]], Fun[[NDArray[bool, :, :], _bool_signature], NDArray[bool, :, :]], Fun[[NDArray[int, :, :], _bool_signature], NDArray[bool, :, :]], Fun[[NDArray[float, :, :], _bool_signature], NDArray[bool, :, :]], Fun[[NDArray[complex, :, :], _bool_signature], NDArray[bool, :, :]], Fun[[NDArray[bool, :, :, :], _bool_signature], NDArray[bool, :, :, :]], Fun[[NDArray[int, :, :, :], _bool_signature], NDArray[bool, :, :, :]], Fun[[NDArray[float, :, :, :], _bool_signature], NDArray[bool, :, :, :]], Fun[[NDArray[complex, :, :, :], _bool_signature], NDArray[bool, :, :, :]], Fun[[NDArray[bool, :, :, :, :], _bool_signature], NDArray[bool, :, :, :, :]], Fun[[NDArray[int, :, :, :, :], _bool_signature], NDArray[bool, :, :, :, :]], Fun[[NDArray[float, :, :, :, :], _bool_signature], NDArray[bool, :, :, :, :]], Fun[[NDArray[complex, :, :, :, :], _bool_signature], NDArray[bool, :, :, :, :]], # dtype = int Fun[[NDArray[bool, :], _int_signature], NDArray[int, :]], Fun[[NDArray[int, :], _int_signature], NDArray[int, :]], Fun[[NDArray[float, :], _int_signature], NDArray[int, :]], Fun[[NDArray[complex, :], _int_signature], NDArray[int, :]], Fun[[NDArray[bool, :, :], _int_signature], NDArray[int, :, :]], Fun[[NDArray[int, :, :], _int_signature], NDArray[int, :, :]], Fun[[NDArray[float, :, :], _int_signature], NDArray[int, :, :]], Fun[[NDArray[complex, :, :], _int_signature], NDArray[int, :, :]], Fun[[NDArray[bool, :, :, :], _int_signature], NDArray[int, :, :, :]], Fun[[NDArray[int, :, :, :], _int_signature], NDArray[int, :, :, :]], Fun[[NDArray[float, :, :, :], _int_signature], NDArray[int, :, :, :]], Fun[[NDArray[complex, :, :, :], _int_signature], NDArray[int, :, :, :]], Fun[[NDArray[bool, :, :, :, :], _int_signature], NDArray[int, :, :, :, :]], Fun[[NDArray[int, :, :, :, :], _int_signature], NDArray[int, :, :, :, :]], Fun[[NDArray[float, :, :, :, :], _int_signature], NDArray[int, :, :, :, :]], Fun[[NDArray[complex, :, :, :, :], _int_signature], NDArray[int, :, :, :, :]], # dtype = float Fun[[NDArray[bool, :], _float_signature], NDArray[float, :]], Fun[[NDArray[int, :], _float_signature], NDArray[float, :]], Fun[[NDArray[float, :], _float_signature], NDArray[float, :]], Fun[[NDArray[complex, :], _float_signature], NDArray[float, :]], Fun[[NDArray[bool, :, :], _float_signature], NDArray[float, :, :]], Fun[[NDArray[int, :, :], _float_signature], NDArray[float, :, :]], Fun[[NDArray[float, :, :], _float_signature], NDArray[float, :, :]], Fun[[NDArray[complex, :, :], _float_signature], NDArray[float, :, :]], Fun[[NDArray[bool, :, :, :], _float_signature], NDArray[float, :, :, :]], Fun[[NDArray[int, :, :, :], _float_signature], NDArray[float, :, :, :]], Fun[[NDArray[float, :, :, :], _float_signature], NDArray[float, :, :, :]], Fun[[NDArray[complex, :, :, :], _float_signature], NDArray[float, :, :, :]], Fun[[NDArray[bool, :, :, :, :], _float_signature], NDArray[float, :, :, :, :]], Fun[[NDArray[int, :, :, :, :], _float_signature], NDArray[float, :, :, :, :]], Fun[[NDArray[float, :, :, :, :], _float_signature], NDArray[float, :, :, :, :]], Fun[[NDArray[complex, :, :, :, :], _float_signature], NDArray[float, :, :, :, :]], # dtype = complex Fun[[NDArray[bool, :], _complex_signature], NDArray[complex, :]], Fun[[NDArray[int, :], _complex_signature], NDArray[complex, :]], Fun[[NDArray[float, :], _complex_signature], NDArray[complex, :]], Fun[[NDArray[complex, :], _complex_signature], NDArray[complex, :]], Fun[[NDArray[bool, :, :], _complex_signature], NDArray[complex, :, :]], Fun[[NDArray[int, :, :], _complex_signature], NDArray[complex, :, :]], Fun[[NDArray[float, :, :], _complex_signature], NDArray[complex, :, :]], Fun[[NDArray[complex, :, :], _complex_signature], NDArray[complex, :, :]], Fun[[NDArray[bool, :, :, :], _complex_signature], NDArray[complex, :, :, :]], Fun[[NDArray[int, :, :, :], _complex_signature], NDArray[complex, :, :, :]], Fun[[NDArray[float, :, :, :], _complex_signature], NDArray[complex, :, :, :]], Fun[[NDArray[complex, :, :, :], _complex_signature], NDArray[complex, :, :, :]], Fun[[NDArray[bool, :, :, :, :], _complex_signature], NDArray[complex, :, :, :, :]], Fun[[NDArray[int, :, :, :, :], _complex_signature], NDArray[complex, :, :, :, :]], Fun[[NDArray[float, :, :, :, :], _complex_signature], NDArray[complex, :, :, :, :]], Fun[[NDArray[complex, :, :, :, :], _complex_signature], NDArray[complex, :, :, :, :]], ] ), "dtype": AttributeIntr(), "fill": MethodIntr( signature=Union[ # 1d Fun[[NDArray[bool, :], bool], None], Fun[[NDArray[int, :], int], None], Fun[[NDArray[float, :], float], None], Fun[[NDArray[complex, :], complex], None], # 2d Fun[[NDArray[bool, :, :], bool], None], Fun[[NDArray[int, :, :], int], None], Fun[[NDArray[float, :, :], float], None], Fun[[NDArray[complex, :, :], complex], None], # 3d Fun[[NDArray[bool, :, :, :], bool], None], Fun[[NDArray[int, :, :, :], int], None], Fun[[NDArray[float, :, :, :], float], None], Fun[[NDArray[complex, :, :, :], complex], None], # 4d Fun[[NDArray[bool, :, :, :, :], bool], None], Fun[[NDArray[int, :, :, :, :], int], None], Fun[[NDArray[float, :, :, :, :], float], None], Fun[[NDArray[complex, :, :, :, :], complex], None], ], ), "flat": AttributeIntr( signature=Union[ # 1d Fun[[NDArray[bool, :]], Generator[bool]], Fun[[NDArray[int, :]], Generator[int]], Fun[[NDArray[float, :]], Generator[float]], Fun[[NDArray[complex, :]], Generator[complex]], # 2d Fun[[NDArray[bool, :, :]], Generator[bool]], Fun[[NDArray[int, :, :]], Generator[int]], Fun[[NDArray[float, :, :]], Generator[float]], Fun[[NDArray[complex, :, :]], Generator[complex]], # 3d Fun[[NDArray[bool, :, :, :]], Generator[bool]], Fun[[NDArray[int, :, :, :]], Generator[int]], Fun[[NDArray[float, :, :, :]], Generator[float]], Fun[[NDArray[complex, :, :, :]], Generator[complex]], # 4d Fun[[NDArray[bool, :, :, :, :]], Generator[bool]], Fun[[NDArray[int, :, :, :, :]], Generator[int]], Fun[[NDArray[float, :, :, :, :]], Generator[float]], Fun[[NDArray[complex, :, :, :, :]], Generator[complex]], ] ), "flatten": MethodIntr( signature=Union[ # 1d Fun[[NDArray[bool, :]], NDArray[bool, :]], Fun[[NDArray[int, :]], NDArray[int, :]], Fun[[NDArray[float, :]], NDArray[float, :]], Fun[[NDArray[complex, :]], NDArray[complex, :]], # 2d Fun[[NDArray[bool, :, :]], NDArray[bool, :]], Fun[[NDArray[int, :, :]], NDArray[int, :]], Fun[[NDArray[float, :, :]], NDArray[float, :]], Fun[[NDArray[complex, :, :]], NDArray[complex, :]], # 3d Fun[[NDArray[bool, :, :, :]], NDArray[bool, :]], Fun[[NDArray[int, :, :, :]], NDArray[int, :]], Fun[[NDArray[float, :, :, :]], NDArray[float, :]], Fun[[NDArray[complex, :, :, :]], NDArray[complex, :]], # 4d Fun[[NDArray[bool, :, :, :, :]], NDArray[bool, :]], Fun[[NDArray[int, :, :, :, :]], NDArray[int, :]], Fun[[NDArray[float, :, :, :, :]], NDArray[float, :]], Fun[[NDArray[complex, :, :, :, :]], NDArray[complex, :]], ] ), "item": MethodIntr( signature=Union[ # item = int # 1d Fun[[NDArray[bool, :], int], bool], Fun[[NDArray[int, :], int], int], Fun[[NDArray[float, :], int], float], Fun[[NDArray[complex, :], int], complex], # 2d Fun[[NDArray[bool, :, :], int], bool], Fun[[NDArray[int, :, :], int], int], Fun[[NDArray[float, :, :], int], float], Fun[[NDArray[complex, :, :], int], complex], # 3d Fun[[NDArray[bool, :, :, :], int], bool], Fun[[NDArray[int, :, :, :], int], int], Fun[[NDArray[float, :, :, :], int], float], Fun[[NDArray[complex, :, :, :], int], complex], # 4d Fun[[NDArray[bool, :, :, :, :], int], bool], Fun[[NDArray[int, :, :, :, :], int], int], Fun[[NDArray[float, :, :, :, :], int], float], Fun[[NDArray[complex, :, :, :, :], int], complex], # item = tuple # 1d Fun[[NDArray[bool, :], Tuple[int]], bool], Fun[[NDArray[int, :], Tuple[int]], int], Fun[[NDArray[float, :], Tuple[int]], float], Fun[[NDArray[complex, :], Tuple[int]], complex], # 2d Fun[[NDArray[bool, :, :], Tuple[int, int]], bool], Fun[[NDArray[int, :, :], Tuple[int, int]], int], Fun[[NDArray[float, :, :], Tuple[int, int]], float], Fun[[NDArray[complex, :, :], Tuple[int, int]], complex], # 3d Fun[[NDArray[bool, :, :, :], Tuple[int, int, int]], bool], Fun[[NDArray[int, :, :, :], Tuple[int, int, int]], int], Fun[[NDArray[float, :, :, :], Tuple[int, int, int]], float], Fun[[NDArray[complex, :, :, :], Tuple[int, int, int]], complex], # 4d Fun[[NDArray[bool, :, :, :, :], Tuple[int, int, int, int]], bool], Fun[[NDArray[int, :, :, :, :], Tuple[int, int, int, int]], int], Fun[[NDArray[float, :, :, :, :], Tuple[int, int, int, int]], float], Fun[[NDArray[complex, :, :, :, :], Tuple[int, int, int, int]], complex], ] ), "itemsize": AttributeIntr(signature=Fun[[NDArray[T0, :]], int], return_range=interval.positive_values), "nbytes": AttributeIntr( signature=Fun[[NDArray[T0, :]], int], return_range=interval.positive_values ), "ndim": AttributeIntr(signature=Fun[[NDArray[T0, :]], int], return_range=interval.positive_values), "reshape": ConstMethodIntr( signature=Union[ Fun[[NDArray[T0, :], int], NDArray[T1, :]], Fun[[NDArray[T0, :], Tuple[int]], NDArray[T1, :]], Fun[[NDArray[T0, :], int, int], NDArray[T1, :, :]], Fun[[NDArray[T0, :], Tuple[int, int]], NDArray[T1, :, :]], Fun[[NDArray[T0, :], int, int, int], NDArray[T1, :, :, :]], Fun[[NDArray[T0, :], Tuple[int, int, int]], NDArray[T1, :, :, :]], Fun[[NDArray[T0, :], int, int, int, int], NDArray[T1, :, :, :, :]], Fun[[NDArray[T0, :], Tuple[int, int, int, int]], NDArray[T1, :, :, :, :]], ] ), "shape": AttributeIntr( signature=Union[ # bool Fun[[NDArray[bool, :]], Tuple[int]], Fun[[NDArray[bool, :, :]], Tuple[int, int]], Fun[[NDArray[bool, :, :, :]], Tuple[int, int, int]], Fun[[NDArray[bool, :, :, :, :]], Tuple[int, int, int, int]], # int Fun[[NDArray[int, :]], Tuple[int]], Fun[[NDArray[int, :, :]], Tuple[int, int]], Fun[[NDArray[int, :, :, :]], Tuple[int, int, int]], Fun[[NDArray[int, :, :, :, :]], Tuple[int, int, int, int]], # float Fun[[NDArray[float, :]], Tuple[int]], Fun[[NDArray[float, :, :]], Tuple[int, int]], Fun[[NDArray[float, :, :, :]], Tuple[int, int, int]], Fun[[NDArray[float, :, :, :, :]], Tuple[int, int, int, int]], # complex Fun[[NDArray[complex, :]], Tuple[int]], Fun[[NDArray[complex, :, :]], Tuple[int, int]], Fun[[NDArray[complex, :, :, :]], Tuple[int, int, int]], Fun[[NDArray[complex, :, :, :, :]], Tuple[int, int, int, int]], ], return_range_content=interval.positive_values ), "size": AttributeIntr(signature=Fun[[NDArray[T0, :]], int], return_range=interval.positive_values), "sort": MethodIntr( args=("self", "axis", "kind"), defaults=(-1, None) ), "strides": AttributeIntr( signature=Union[ # bool Fun[[NDArray[bool, :]], Tuple[int]], Fun[[NDArray[bool, :, :]], Tuple[int, int]], Fun[[NDArray[bool, :, :, :]], Tuple[int, int, int]], Fun[[NDArray[bool, :, :, :, :]], Tuple[int, int, int, int]], # int Fun[[NDArray[int, :]], Tuple[int]], Fun[[NDArray[int, :, :]], Tuple[int, int]], Fun[[NDArray[int, :, :, :]], Tuple[int, int, int]], Fun[[NDArray[int, :, :, :, :]], Tuple[int, int, int, int]], # float Fun[[NDArray[float, :]], Tuple[int]], Fun[[NDArray[float, :, :]], Tuple[int, int]], Fun[[NDArray[float, :, :, :]], Tuple[int, int, int]], Fun[[NDArray[float, :, :, :, :]], Tuple[int, int, int, int]], # complex Fun[[NDArray[complex, :]], Tuple[int]], Fun[[NDArray[complex, :, :]], Tuple[int, int]], Fun[[NDArray[complex, :, :, :]], Tuple[int, int, int]], Fun[[NDArray[complex, :, :, :, :]], Tuple[int, int, int, int]], ] ), "T": AttributeIntr(signature=Fun[[NDArray[T0, :]], NDArray[T0, :]]), "tolist": ConstMethodIntr( signature=Union[ # 1d Fun[[NDArray[bool, :]], List[bool]], Fun[[NDArray[int, :]], List[int]], Fun[[NDArray[float, :]], List[float]], Fun[[NDArray[complex, :]], List[complex]], # 2d Fun[[NDArray[bool, :, :]], List[bool]], Fun[[NDArray[int, :, :]], List[int]], Fun[[NDArray[float, :, :]], List[float]], Fun[[NDArray[complex, :, :]], List[complex]], # 3d Fun[[NDArray[bool, :, :, :]], List[bool]], Fun[[NDArray[int, :, :, :]], List[int]], Fun[[NDArray[float, :, :, :]], List[float]], Fun[[NDArray[complex, :, :, :]], List[complex]], # 4d Fun[[NDArray[bool, :, :, :, :]], List[bool]], Fun[[NDArray[int, :, :, :, :]], List[int]], Fun[[NDArray[float, :, :, :, :]], List[float]], Fun[[NDArray[complex, :, :, :, :]], List[complex]], ] ), "tofile": ConstMethodIntr(signature=Fun[[NDArray[T0, :]], str, str], global_effects=True), "tostring": ConstMethodIntr(signature=Fun[[NDArray[T0, :]], str]), }, } _numpy_ones_signature = Union[ # 1d Fun[[int], NDArray[float, :]], Fun[[int, _bool_signature], NDArray[bool, :]], Fun[[int, _int_signature], NDArray[int, :]], Fun[[int, _float_signature], NDArray[float, :]], Fun[[int, _complex_signature], NDArray[complex, :]], # 1D tuple Fun[[Tuple[int]], NDArray[float, :]], Fun[[Tuple[int], _bool_signature], NDArray[bool, :]], Fun[[Tuple[int], _int_signature], NDArray[int, :]], Fun[[Tuple[int], _float_signature], NDArray[float, :]], Fun[[Tuple[int], _complex_signature], NDArray[complex, :]], # 2D tuple Fun[[Tuple[int, int]], NDArray[float, :, :]], Fun[[Tuple[int, int], _bool_signature], NDArray[bool, :, :]], Fun[[Tuple[int, int], _int_signature], NDArray[int, :, :]], Fun[[Tuple[int, int], _float_signature], NDArray[float, :, :]], Fun[[Tuple[int, int], _complex_signature], NDArray[complex, :, :]], # 3D tuple Fun[[Tuple[int, int, int]], NDArray[float, :, :, :]], Fun[[Tuple[int, int, int], _bool_signature], NDArray[bool, :, :, :]], Fun[[Tuple[int, int, int], _int_signature], NDArray[int, :, :, :]], Fun[[Tuple[int, int, int], _float_signature], NDArray[float, :, :, :]], Fun[[Tuple[int, int, int], _complex_signature], NDArray[complex, :, :, :]], # 4D tuple Fun[[Tuple[int, int, int, int]], NDArray[float, :, :, :, :]], Fun[[Tuple[int, int, int, int], _bool_signature], NDArray[bool, :, :, :, :]], Fun[[Tuple[int, int, int, int], _int_signature], NDArray[int, :, :, :, :]], Fun[[Tuple[int, int, int, int], _float_signature], NDArray[float, :, :, :, :]], Fun[[Tuple[int, int, int, int], _complex_signature], NDArray[complex, :, :, :, :]], ] _numpy_ones_like_signature = Union[ # scalar Fun[[bool], bool], Fun[[int], int], Fun[[float], float], Fun[[complex], complex], # scalar + None Fun[[bool, None], bool], Fun[[int, None], int], Fun[[float, None], float], Fun[[complex, None], complex], # scalar + dtype Fun[[bool, _bool_signature], bool], Fun[[bool, _int_signature], int], Fun[[bool, _float_signature], float], Fun[[bool, _complex_signature], complex], Fun[[int, _bool_signature], bool], Fun[[int, _int_signature], int], Fun[[int, _float_signature], float], Fun[[int, _complex_signature], complex], Fun[[complex, _bool_signature], bool], Fun[[complex, _int_signature], int], Fun[[complex, _float_signature], float], Fun[[complex, _complex_signature], complex], # array 1D Fun[[Iterable[bool]], NDArray[bool, :]], Fun[[Iterable[int]], NDArray[int, :]], Fun[[Iterable[float]], NDArray[float, :]], Fun[[Iterable[complex]], NDArray[complex, :]], # array 2d Fun[[Iterable[Iterable[bool]]], NDArray[bool, :, :]], Fun[[Iterable[Iterable[int]]], NDArray[int, :, :]], Fun[[Iterable[Iterable[float]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[complex]]], NDArray[complex, :, :]], # array 3d Fun[[Iterable[Iterable[Iterable[bool]]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]]], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], _float_signature], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]]], NDArray[complex, :, :, :]], # array 4d Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]]], NDArray[bool, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]]], NDArray[int, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]]], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]]], NDArray[complex, :, :, :, :]], # with dtype ] _numpy_unary_op_signature = Union[ # 1d Fun[[bool], bool], Fun[[int], int], Fun[[float], float], Fun[[complex], complex], # 1d Iterable Fun[[Iterable[bool]], NDArray[bool, :]], Fun[[Iterable[int]], NDArray[int, :]], Fun[[Iterable[float]], NDArray[float, :]], Fun[[Iterable[complex]], NDArray[complex, :]], # 2d Iterable Fun[[Iterable[Iterable[bool]]], NDArray[bool, :, :]], Fun[[Iterable[Iterable[int]]], NDArray[int, :, :]], Fun[[Iterable[Iterable[float]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[complex]]], NDArray[complex, :, :]], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]]], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]]], NDArray[complex, :, :, :]], # 4d Iterable Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]]], NDArray[bool, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]]], NDArray[int, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]]], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]]], NDArray[complex, :, :, :, :]], ] _numpy_float_unary_op_signature = Union[ # 1d Fun[[bool], float], Fun[[int], float], Fun[[float], float], # 1d Iterable Fun[[Iterable[bool]], NDArray[float, :]], Fun[[Iterable[int]], NDArray[float, :]], Fun[[Iterable[float]], NDArray[float, :]], # 2d Iterable Fun[[Iterable[Iterable[bool]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[int]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]]], NDArray[float, :, :]], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]]], NDArray[float, :, :, :]], # 4d Iterable Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]]], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]]], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]]], NDArray[float, :, :, :, :]], ] _numpy_int_unary_op_signature = Union[ # 1d Fun[[bool], bool], Fun[[int], int], # 1d Iterable Fun[[Iterable[bool]], NDArray[bool, :]], Fun[[Iterable[int]], NDArray[int, :]], # 2d Iterable Fun[[Iterable[Iterable[bool]]], NDArray[bool, :, :]], Fun[[Iterable[Iterable[int]]], NDArray[int, :, :]], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]]], NDArray[int, :, :, :]], # 4d Iterable Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]]], NDArray[bool, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]]], NDArray[int, :, :, :, :]], ] _numpy_unary_op_angle_signature = Union[ # no extra option # 1d Fun[[bool], float], Fun[[int], float], Fun[[float], float], Fun[[complex], float], # 1d Iterable Fun[[Iterable[bool]], NDArray[float, :]], Fun[[Iterable[int]], NDArray[float, :]], Fun[[Iterable[float]], NDArray[float, :]], Fun[[Iterable[complex]], NDArray[float, :]], # 2d Iterable Fun[[Iterable[Iterable[bool]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[int]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[complex]]], NDArray[float, :, :]], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]]], NDArray[float, :, :, :]], # 4d Iterable Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]]], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]]], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]]], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]]], NDArray[float, :, :, :, :]], # extra option # 1d Fun[[bool, bool], float], Fun[[int, bool], float], Fun[[float, bool], float], Fun[[complex, bool], float], # 1d Iterable Fun[[Iterable[bool], bool], NDArray[float, :]], Fun[[Iterable[int], bool], NDArray[float, :]], Fun[[Iterable[float], bool], NDArray[float, :]], Fun[[Iterable[complex], bool], NDArray[float, :]], # 2d Iterable Fun[[Iterable[Iterable[bool]], bool], NDArray[float, :, :]], Fun[[Iterable[Iterable[int]], bool], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]], bool], NDArray[float, :, :]], Fun[[Iterable[Iterable[complex]], bool], NDArray[float, :, :]], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]], bool], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], bool], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], bool], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], bool], NDArray[float, :, :, :]], # 4d Iterable Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]], bool], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]], bool], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]], bool], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]], bool], NDArray[float, :, :, :, :]], ] _numpy_array_str_signature = Union[ tuple(Fun[[NDArray[(dtype,) + slices]], str] for dtype in (bool, int, float, complex) for slices in [(slice(0),) * i for i in range(1, 5)]) ] _numpy_float_unary_op_float_signature = Union[ # 1d Fun[[bool], float], Fun[[int], float], Fun[[float], float], # 1d Iterable Fun[[Iterable[bool]], NDArray[float, :]], Fun[[Iterable[int]], NDArray[float, :]], Fun[[Iterable[float]], NDArray[float, :]], # 2d Iterable Fun[[Iterable[Iterable[bool]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[int]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]]], NDArray[float, :, :]], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]]], NDArray[float, :, :, :]], # 4d Iterable Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]]], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]]], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]]], NDArray[float, :, :, :, :]], ] _numpy_unary_op_float_signature = Union[ # 1d Fun[[bool], float], Fun[[int], float], Fun[[float], float], Fun[[complex], complex], # 1d Iterable Fun[[Iterable[bool]], NDArray[float, :]], Fun[[Iterable[int]], NDArray[float, :]], Fun[[Iterable[float]], NDArray[float, :]], Fun[[Iterable[complex]], NDArray[complex, :]], # 2d Iterable Fun[[Iterable[Iterable[bool]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[int]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[complex]]], NDArray[complex, :, :]], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]]], NDArray[complex, :, :, :]], # 4d Iterable Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]]], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]]], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]]], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]]], NDArray[complex, :, :, :, :]], ] _numpy_unary_op_int_signature = Union[ # 1d Fun[[bool], int], Fun[[int], int], Fun[[float], int], Fun[[complex], int], # 1d Iterable Fun[[Iterable[bool]], NDArray[int, :]], Fun[[Iterable[int]], NDArray[int, :]], Fun[[Iterable[float]], NDArray[int, :]], Fun[[Iterable[complex]], NDArray[int, :]], # 2d Iterable Fun[[Iterable[Iterable[bool]]], NDArray[int, :, :]], Fun[[Iterable[Iterable[int]]], NDArray[int, :, :]], Fun[[Iterable[Iterable[float]]], NDArray[int, :, :]], Fun[[Iterable[Iterable[complex]]], NDArray[int, :, :]], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]]], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]]], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]]], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]]], NDArray[int, :, :, :]], # 4d Iterable Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]]], NDArray[int, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]]], NDArray[int, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]]], NDArray[int, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]]], NDArray[int, :, :, :, :]], ] _numpy_unary_op_axis_signature = Union[ # no axis # 1d Fun[[bool], bool], Fun[[int], int], Fun[[float], float], Fun[[complex], complex], # 1d Iterable Fun[[Iterable[bool]], bool], Fun[[Iterable[int]], int], Fun[[Iterable[float]], float], Fun[[Iterable[complex]], complex], # 2d Iterable Fun[[Iterable[Iterable[bool]]], bool], Fun[[Iterable[Iterable[int]]], int], Fun[[Iterable[Iterable[float]]], float], Fun[[Iterable[Iterable[complex]]], complex], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]]], bool], Fun[[Iterable[Iterable[Iterable[int]]]], int], Fun[[Iterable[Iterable[Iterable[float]]]], float], Fun[[Iterable[Iterable[Iterable[complex]]]], complex], # 4d Iterable Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]]], bool], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]]], int], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]]], float], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]]], complex], # axis # 1d Iterable Fun[[Iterable[bool], int], bool], Fun[[Iterable[int], int], int], Fun[[Iterable[float], int], float], Fun[[Iterable[complex], int], complex], # 2d Iterable Fun[[Iterable[Iterable[bool]], int], NDArray[bool, :]], Fun[[Iterable[Iterable[int]], int], NDArray[int, :]], Fun[[Iterable[Iterable[float]], int], NDArray[float, :]], Fun[[Iterable[Iterable[complex]], int], NDArray[complex, :]], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]], int], NDArray[bool, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], int], NDArray[int, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], int], NDArray[float, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], int], NDArray[complex, :, :]], # 4d Iterable Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]], int], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]], int], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]], int], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]], int], NDArray[complex, :, :, :]], ] _numpy_unary_op_int_axis_signature = Union[ # no axis # 1d Fun[[bool], int], Fun[[int], int], Fun[[float], int], Fun[[complex], int], # 1d Iterable Fun[[Iterable[bool]], int], Fun[[Iterable[int]], int], Fun[[Iterable[float]], int], Fun[[Iterable[complex]], int], # 2d Iterable Fun[[Iterable[Iterable[bool]]], int], Fun[[Iterable[Iterable[int]]], int], Fun[[Iterable[Iterable[float]]], int], Fun[[Iterable[Iterable[complex]]], int], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]]], int], Fun[[Iterable[Iterable[Iterable[int]]]], int], Fun[[Iterable[Iterable[Iterable[float]]]], int], Fun[[Iterable[Iterable[Iterable[complex]]]], int], # 4d Iterable Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]]], int], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]]], int], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]]], int], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]]], int], # axis # 1d Iterable Fun[[Iterable[bool], int], int], Fun[[Iterable[int], int], int], Fun[[Iterable[float], int], int], Fun[[Iterable[complex], int], int], # 2d Iterable Fun[[Iterable[Iterable[bool]], int], NDArray[int, :]], Fun[[Iterable[Iterable[int]], int], NDArray[int, :]], Fun[[Iterable[Iterable[float]], int], NDArray[int, :]], Fun[[Iterable[Iterable[complex]], int], NDArray[int, :]], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]], int], NDArray[int, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], int], NDArray[int, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], int], NDArray[int, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], int], NDArray[int, :, :]], # 4d Iterable Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]], int], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]], int], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]], int], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]], int], NDArray[int, :, :, :]], ] _numpy_unary_op_sum_axis_signature = Union[ # no axis # 1d Fun[[bool], int], Fun[[int], int], Fun[[float], float], Fun[[complex], complex], # 1d Iterable Fun[[Iterable[bool]], int], Fun[[Iterable[int]], int], Fun[[Iterable[float]], float], Fun[[Iterable[complex]], complex], # 2d Iterable Fun[[Iterable[Iterable[bool]]], int], Fun[[Iterable[Iterable[int]]], int], Fun[[Iterable[Iterable[float]]], float], Fun[[Iterable[Iterable[complex]]], complex], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]]], int], Fun[[Iterable[Iterable[Iterable[int]]]], int], Fun[[Iterable[Iterable[Iterable[float]]]], float], Fun[[Iterable[Iterable[Iterable[complex]]]], complex], # 4d Iterable Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]]], int], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]]], int], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]]], float], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]]], complex], # axis # 1d Fun[[bool, int], int], Fun[[int, int], int], Fun[[float, int], float], Fun[[complex, int], complex], # 1d Iterable Fun[[Iterable[bool], int], int], Fun[[Iterable[int], int], int], Fun[[Iterable[float], int], float], Fun[[Iterable[complex], int], complex], # 2d Iterable Fun[[Iterable[Iterable[bool]], int], NDArray[int, :]], Fun[[Iterable[Iterable[int]], int], NDArray[int, :]], Fun[[Iterable[Iterable[float]], int], NDArray[float, :]], Fun[[Iterable[Iterable[complex]], int], NDArray[complex, :]], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]], int], NDArray[int, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], int], NDArray[int, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], int], NDArray[float, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], int], NDArray[complex, :, :]], # 4d Iterable Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]], int], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]], int], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]], int], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]], int], NDArray[complex, :, :, :]], ] _numpy_unary_op_cumsum_axis_signature = Union[ # no axis # 1d Fun[[bool], NDArray[int, :]], Fun[[int], NDArray[int, :]], Fun[[float], NDArray[float, :]], Fun[[complex], NDArray[complex, :]], # 1d Iterable Fun[[Iterable[bool]], NDArray[int, :]], Fun[[Iterable[int]], NDArray[int, :]], Fun[[Iterable[float]], NDArray[float, :]], Fun[[Iterable[complex]], NDArray[complex, :]], # 2d Iterable Fun[[Iterable[Iterable[bool]]], NDArray[int, :]], Fun[[Iterable[Iterable[int]]], NDArray[int, :]], Fun[[Iterable[Iterable[float]]], NDArray[float, :]], Fun[[Iterable[Iterable[complex]]], NDArray[complex, :]], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]]], NDArray[int, :]], Fun[[Iterable[Iterable[Iterable[int]]]], NDArray[int, :]], Fun[[Iterable[Iterable[Iterable[float]]]], NDArray[float, :]], Fun[[Iterable[Iterable[Iterable[complex]]]], NDArray[complex, :]], # 4d Iterable Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]]], NDArray[int, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]]], NDArray[int, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]]], NDArray[float, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]]], NDArray[complex, :]], # axis # 1d Fun[[bool, int], NDArray[int, :]], Fun[[int, int], NDArray[int, :]], Fun[[float, int], NDArray[float, :]], Fun[[complex, int], NDArray[complex, :]], # 1d Iterable Fun[[Iterable[bool], int], NDArray[int, :]], Fun[[Iterable[int], int], NDArray[int, :]], Fun[[Iterable[float], int], NDArray[float, :]], Fun[[Iterable[complex], int], NDArray[complex, :]], # 2d Iterable Fun[[Iterable[Iterable[bool]], int], NDArray[int, :, :]], Fun[[Iterable[Iterable[int]], int], NDArray[int, :, :]], Fun[[Iterable[Iterable[float]], int], NDArray[float, :, :]], Fun[[Iterable[Iterable[complex]], int], NDArray[complex, :, :]], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]], int], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], int], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], int], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], int], NDArray[complex, :, :, :]], # 4d Iterable Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]], int], NDArray[int, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]], int], NDArray[int, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]], int], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]], int], NDArray[complex, :, :, :, :]], ] _numpy_unary_op_average_axis_signature = Union[ # no axis # 1d Fun[[bool], float], Fun[[int], float], Fun[[float], float], Fun[[complex], complex], # 1d Iterable Fun[[Iterable[bool]], float], Fun[[Iterable[int]], float], Fun[[Iterable[float]], float], Fun[[Iterable[complex]], complex], # 2d Iterable Fun[[Iterable[Iterable[bool]]], float], Fun[[Iterable[Iterable[int]]], float], Fun[[Iterable[Iterable[float]]], float], Fun[[Iterable[Iterable[complex]]], complex], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]]], float], Fun[[Iterable[Iterable[Iterable[int]]]], float], Fun[[Iterable[Iterable[Iterable[float]]]], float], Fun[[Iterable[Iterable[Iterable[complex]]]], complex], # 4d Iterable Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]]], float], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]]], float], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]]], float], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]]], complex], # axis None # 1d Fun[[bool, None], float], Fun[[int, None], float], Fun[[float, None], float], Fun[[complex, None], complex], # 1d Iterable Fun[[Iterable[bool], None], float], Fun[[Iterable[int], None], float], Fun[[Iterable[float], None], float], Fun[[Iterable[complex], None], complex], # 2d Iterable Fun[[Iterable[Iterable[bool]], None], float], Fun[[Iterable[Iterable[int]], None], float], Fun[[Iterable[Iterable[float]], None], float], Fun[[Iterable[Iterable[complex]], None], complex], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]], None], float], Fun[[Iterable[Iterable[Iterable[int]]], None], float], Fun[[Iterable[Iterable[Iterable[float]]], None], float], Fun[[Iterable[Iterable[Iterable[complex]]], None], complex], # 4d Iterable Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]], None], float], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]], None], float], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]], None], float], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]], None], complex], # axis # 1d Fun[[bool, int], float], Fun[[int, int], float], Fun[[float, int], float], Fun[[complex, int], complex], # 1d Iterable Fun[[Iterable[bool], int], float], Fun[[Iterable[int], int], float], Fun[[Iterable[float], int], float], Fun[[Iterable[complex], int], complex], # 2d Iterable Fun[[Iterable[Iterable[bool]], int], NDArray[float, :]], Fun[[Iterable[Iterable[int]], int], NDArray[float, :]], Fun[[Iterable[Iterable[float]], int], NDArray[float, :]], Fun[[Iterable[Iterable[complex]], int], NDArray[complex, :]], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]], int], NDArray[float, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], int], NDArray[float, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], int], NDArray[float, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], int], NDArray[complex, :, :]], # 4d Iterable Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]], int], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]], int], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]], int], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]], int], NDArray[complex, :, :, :]], # axis None + weight # 1d Fun[[bool, None, float], float], Fun[[int, None, float], float], Fun[[float, None, float], float], Fun[[complex, None, float], complex], # 1d Iterable Fun[[Iterable[bool], None, Iterable[float]], float], Fun[[Iterable[int], None, Iterable[float]], float], Fun[[Iterable[float], None, Iterable[float]], float], Fun[[Iterable[complex], None, Iterable[float]], complex], # 2d Iterable Fun[[Iterable[Iterable[bool]], None, Iterable[float]], float], Fun[[Iterable[Iterable[int]], None, Iterable[float]], float], Fun[[Iterable[Iterable[float]], None, Iterable[float]], float], Fun[[Iterable[Iterable[complex]], None, Iterable[float]], complex], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]], None, Iterable[float]], float], Fun[[Iterable[Iterable[Iterable[int]]], None, Iterable[float]], float], Fun[[Iterable[Iterable[Iterable[float]]], None, Iterable[float]], float], Fun[[Iterable[Iterable[Iterable[complex]]], None, Iterable[float]], complex], # 4d Iterable Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]], None, Iterable[float]], float], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]], None, Iterable[float]], float], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]], None, Iterable[float]], float], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]], None, Iterable[float]], complex], # axis # 1d Fun[[bool, int, float], float], Fun[[int, int, float], float], Fun[[float, int, float], float], Fun[[complex, int, float], complex], # 1d Iterable Fun[[Iterable[bool], int, Iterable[float]], float], Fun[[Iterable[int], int, Iterable[float]], float], Fun[[Iterable[float], int, Iterable[float]], float], Fun[[Iterable[complex], int, Iterable[float]], complex], # 2d Iterable Fun[[Iterable[Iterable[bool]], int, Iterable[Iterable[bool]]], NDArray[float, :]], Fun[[Iterable[Iterable[int]], int, Iterable[Iterable[int]]], NDArray[float, :]], Fun[[Iterable[Iterable[float]], int, Iterable[ Iterable[float]]], NDArray[float, :]], Fun[[Iterable[Iterable[complex]], int, Iterable[ Iterable[complex]]], NDArray[complex, :]], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]], int, Iterable[ Iterable[Iterable[bool]]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], int, Iterable[ Iterable[Iterable[int]]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], int, Iterable[ Iterable[Iterable[float]]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], int, Iterable[ Iterable[Iterable[complex]]]], NDArray[complex, :, :]], # 4d Iterable Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]], int, Iterable[ Iterable[Iterable[Iterable[bool]]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]], int, Iterable[ Iterable[Iterable[Iterable[int]]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]], int, Iterable[ Iterable[Iterable[Iterable[float]]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]], int, Iterable[ Iterable[Iterable[Iterable[complex]]]]], NDArray[complex, :, :, :]], ] _numpy_unary_op_bool_axis_signature = Union[ # no axis # 1d Fun[[bool], bool], Fun[[int], bool], Fun[[float], bool], Fun[[complex], bool], # 1d Iterable Fun[[Iterable[bool]], bool], Fun[[Iterable[int]], bool], Fun[[Iterable[float]], bool], Fun[[Iterable[complex]], bool], # 2d Iterable Fun[[Iterable[Iterable[bool]]], bool], Fun[[Iterable[Iterable[int]]], bool], Fun[[Iterable[Iterable[float]]], bool], Fun[[Iterable[Iterable[complex]]], bool], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]]], bool], Fun[[Iterable[Iterable[Iterable[int]]]], bool], Fun[[Iterable[Iterable[Iterable[float]]]], bool], Fun[[Iterable[Iterable[Iterable[complex]]]], bool], # 4d Iterable Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]]], bool], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]]], bool], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]]], bool], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]]], bool], # axis # 1d Fun[[bool, int], bool], Fun[[int, int], bool], Fun[[float, int], bool], Fun[[complex, int], bool], # 1d Iterable Fun[[Iterable[bool], int], bool], Fun[[Iterable[int], int], bool], Fun[[Iterable[float], int], bool], Fun[[Iterable[complex], int], bool], # 2d Iterable Fun[[Iterable[Iterable[bool]], int], NDArray[bool, :]], Fun[[Iterable[Iterable[int]], int], NDArray[bool, :]], Fun[[Iterable[Iterable[float]], int], NDArray[bool, :]], Fun[[Iterable[Iterable[complex]], int], NDArray[bool, :]], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]], int], NDArray[bool, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], int], NDArray[bool, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], int], NDArray[bool, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], int], NDArray[bool, :, :]], # 4d Iterable Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]], int], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]], int], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]], int], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]], int], NDArray[bool, :, :, :]], ] _numpy_binary_op_signature = Union[ # 1d Fun[[bool, bool], bool], Fun[[int, int], int], Fun[[float, float], float], Fun[[complex, complex], complex], # 1d Iterable Fun[[Iterable[bool], Iterable[bool]], NDArray[bool, :]], Fun[[Iterable[bool], bool], NDArray[bool, :]], Fun[[bool, Iterable[bool]], NDArray[bool, :]], Fun[[Iterable[int], Iterable[int]], NDArray[int, :]], Fun[[Iterable[int], int], NDArray[int, :]], Fun[[int, Iterable[int]], NDArray[int, :]], Fun[[Iterable[float], Iterable[float]], NDArray[float, :]], Fun[[Iterable[float], float], NDArray[float, :]], Fun[[float, Iterable[float]], NDArray[float, :]], Fun[[Iterable[complex], Iterable[complex]], NDArray[complex, :]], Fun[[Iterable[complex], complex], NDArray[complex, :]], Fun[[complex, Iterable[complex]], NDArray[complex, :]], # 2d Iterable Fun[[Iterable[Iterable[bool]], Iterable[Iterable[bool]]], NDArray[bool, :, :]], Fun[[Iterable[bool], Iterable[Iterable[bool]]], NDArray[bool, :, :]], Fun[[Iterable[Iterable[bool]], Iterable[bool]], NDArray[bool, :, :]], Fun[[bool, Iterable[Iterable[bool]]], NDArray[bool, :, :]], Fun[[Iterable[Iterable[bool]], bool], NDArray[bool, :, :]], Fun[[Iterable[Iterable[int]], Iterable[Iterable[int]]], NDArray[int, :, :]], Fun[[Iterable[int], Iterable[Iterable[int]]], NDArray[int, :, :]], Fun[[Iterable[Iterable[int]], Iterable[int]], NDArray[int, :, :]], Fun[[int, Iterable[Iterable[int]]], NDArray[int, :, :]], Fun[[Iterable[Iterable[int]], int], NDArray[int, :, :]], Fun[[Iterable[Iterable[float]], Iterable[Iterable[float]]], NDArray[float, :, :]], Fun[[Iterable[float], Iterable[Iterable[float]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]], Iterable[float]], NDArray[float, :, :]], Fun[[float, Iterable[Iterable[float]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]], float], NDArray[float, :, :]], Fun[[Iterable[Iterable[complex]], Iterable[ Iterable[complex]]], NDArray[complex, :, :]], Fun[[Iterable[complex], Iterable[Iterable[complex]]], NDArray[complex, :, :]], Fun[[Iterable[Iterable[complex]], Iterable[complex]], NDArray[complex, :, :]], Fun[[complex, Iterable[Iterable[complex]]], NDArray[complex, :, :]], Fun[[Iterable[Iterable[complex]], complex], NDArray[complex, :, :]], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]], Iterable[ Iterable[Iterable[bool]]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[bool]], Iterable[ Iterable[Iterable[bool]]]], NDArray[bool, :, :, :]], Fun[[Iterable[bool], Iterable[Iterable[Iterable[bool]]]], NDArray[bool, :, :, :]], Fun[[bool, Iterable[Iterable[Iterable[bool]]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[bool]]], Iterable[ Iterable[bool]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[bool]]], Iterable[bool]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[bool]]], bool], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], Iterable[ Iterable[Iterable[int]]]], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[int]], Iterable[ Iterable[Iterable[int]]]], NDArray[int, :, :, :]], Fun[[Iterable[int], Iterable[Iterable[Iterable[int]]]], NDArray[int, :, :, :]], Fun[[int, Iterable[Iterable[Iterable[int]]]], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], Iterable[ Iterable[int]]], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], Iterable[int]], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], int], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], Iterable[ Iterable[Iterable[float]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[float]], Iterable[ Iterable[Iterable[float]]]], NDArray[float, :, :, :]], Fun[[Iterable[float], Iterable[Iterable[Iterable[float]]]], NDArray[float, :, :, :]], Fun[[float, Iterable[Iterable[Iterable[float]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], Iterable[ Iterable[float]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], Iterable[float]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], float], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], Iterable[ Iterable[Iterable[complex]]]], NDArray[complex, :, :, :]], Fun[[Iterable[Iterable[complex]], Iterable[ Iterable[Iterable[complex]]]], NDArray[complex, :, :, :]], Fun[[Iterable[complex], Iterable[Iterable[Iterable[complex]]]], NDArray[complex, :, :, :]], Fun[[complex, Iterable[Iterable[Iterable[complex]]]], NDArray[complex, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], Iterable[ Iterable[complex]]], NDArray[complex, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], Iterable[complex]], NDArray[complex, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], complex], NDArray[complex, :, :, :]], ] _numpy_binary_op_bool_signature = Union[ # 1d Fun[[bool, bool], bool], Fun[[int, int], bool], Fun[[float, float], bool], Fun[[complex, complex], bool], # 1d Iterable Fun[[Iterable[bool], Iterable[bool]], NDArray[bool, :]], Fun[[Iterable[bool], bool], NDArray[bool, :]], Fun[[bool, Iterable[bool]], NDArray[bool, :]], Fun[[Iterable[int], Iterable[int]], NDArray[bool, :]], Fun[[Iterable[int], int], NDArray[bool, :]], Fun[[int, Iterable[int]], NDArray[bool, :]], Fun[[Iterable[float], Iterable[float]], NDArray[bool, :]], Fun[[Iterable[float], float], NDArray[bool, :]], Fun[[float, Iterable[float]], NDArray[bool, :]], Fun[[Iterable[complex], Iterable[complex]], NDArray[bool, :]], Fun[[Iterable[complex], complex], NDArray[bool, :]], Fun[[complex, Iterable[complex]], NDArray[bool, :]], # 2d Iterable Fun[[Iterable[Iterable[bool]], Iterable[Iterable[bool]]], NDArray[bool, :, :]], Fun[[Iterable[bool], Iterable[Iterable[bool]]], NDArray[bool, :, :]], Fun[[Iterable[Iterable[bool]], Iterable[bool]], NDArray[bool, :, :]], Fun[[bool, Iterable[Iterable[bool]]], NDArray[bool, :, :]], Fun[[Iterable[Iterable[bool]], bool], NDArray[bool, :, :]], Fun[[Iterable[Iterable[int]], Iterable[Iterable[int]]], NDArray[bool, :, :]], Fun[[Iterable[int], Iterable[Iterable[int]]], NDArray[bool, :, :]], Fun[[Iterable[Iterable[int]], Iterable[int]], NDArray[bool, :, :]], Fun[[int, Iterable[Iterable[int]]], NDArray[bool, :, :]], Fun[[Iterable[Iterable[int]], int], NDArray[bool, :, :]], Fun[[Iterable[Iterable[float]], Iterable[Iterable[float]]], NDArray[bool, :, :]], Fun[[Iterable[float], Iterable[Iterable[float]]], NDArray[bool, :, :]], Fun[[Iterable[Iterable[float]], Iterable[float]], NDArray[bool, :, :]], Fun[[float, Iterable[Iterable[float]]], NDArray[bool, :, :]], Fun[[Iterable[Iterable[float]], float], NDArray[bool, :, :]], Fun[[Iterable[Iterable[complex]], Iterable[ Iterable[complex]]], NDArray[bool, :, :]], Fun[[Iterable[complex], Iterable[Iterable[complex]]], NDArray[bool, :, :]], Fun[[Iterable[Iterable[complex]], Iterable[complex]], NDArray[bool, :, :]], Fun[[complex, Iterable[Iterable[complex]]], NDArray[bool, :, :]], Fun[[Iterable[Iterable[complex]], complex], NDArray[bool, :, :]], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]], Iterable[ Iterable[Iterable[bool]]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[bool]], Iterable[ Iterable[Iterable[bool]]]], NDArray[bool, :, :, :]], Fun[[Iterable[bool], Iterable[Iterable[Iterable[bool]]]], NDArray[bool, :, :, :]], Fun[[bool, Iterable[Iterable[Iterable[bool]]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[bool]]], Iterable[ Iterable[bool]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[bool]]], Iterable[bool]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[bool]]], bool], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], Iterable[ Iterable[Iterable[int]]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[int]], Iterable[ Iterable[Iterable[int]]]], NDArray[bool, :, :, :]], Fun[[Iterable[int], Iterable[Iterable[Iterable[int]]]], NDArray[bool, :, :, :]], Fun[[int, Iterable[Iterable[Iterable[int]]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], Iterable[ Iterable[int]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], Iterable[int]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], int], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], Iterable[ Iterable[Iterable[float]]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[float]], Iterable[ Iterable[Iterable[float]]]], NDArray[bool, :, :, :]], Fun[[Iterable[float], Iterable[Iterable[Iterable[float]]]], NDArray[bool, :, :, :]], Fun[[float, Iterable[Iterable[Iterable[float]]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], Iterable[ Iterable[float]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], Iterable[float]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], float], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], Iterable[ Iterable[Iterable[complex]]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[complex]], Iterable[ Iterable[Iterable[complex]]]], NDArray[bool, :, :, :]], Fun[[Iterable[complex], Iterable[Iterable[Iterable[complex]]]], NDArray[bool, :, :, :]], Fun[[complex, Iterable[Iterable[Iterable[complex]]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], Iterable[ Iterable[complex]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], Iterable[complex]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], complex], NDArray[bool, :, :, :]], ] _numpy_binary_op_float_signature = Union[ # 1d Fun[[bool, bool], float], Fun[[int, int], float], Fun[[float, float], float], Fun[[complex, complex], complex], # 1d Iterable Fun[[Iterable[bool], Iterable[bool]], NDArray[float, :]], Fun[[Iterable[bool], bool], NDArray[float, :]], Fun[[bool, Iterable[bool]], NDArray[float, :]], Fun[[Iterable[int], Iterable[int]], NDArray[float, :]], Fun[[Iterable[int], int], NDArray[float, :]], Fun[[int, Iterable[int]], NDArray[float, :]], Fun[[Iterable[float], Iterable[float]], NDArray[float, :]], Fun[[Iterable[float], float], NDArray[float, :]], Fun[[float, Iterable[float]], NDArray[float, :]], Fun[[Iterable[complex], Iterable[complex]], NDArray[complex, :]], Fun[[Iterable[complex], complex], NDArray[complex, :]], Fun[[complex, Iterable[complex]], NDArray[complex, :]], # 2d Iterable Fun[[Iterable[Iterable[bool]], Iterable[Iterable[bool]]], NDArray[float, :, :]], Fun[[Iterable[bool], Iterable[Iterable[bool]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[bool]], Iterable[bool]], NDArray[float, :, :]], Fun[[bool, Iterable[Iterable[bool]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[bool]], bool], NDArray[float, :, :]], Fun[[Iterable[Iterable[int]], Iterable[Iterable[int]]], NDArray[float, :, :]], Fun[[Iterable[int], Iterable[Iterable[int]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[int]], Iterable[int]], NDArray[float, :, :]], Fun[[int, Iterable[Iterable[int]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[int]], int], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]], Iterable[Iterable[float]]], NDArray[float, :, :]], Fun[[Iterable[float], Iterable[Iterable[float]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]], Iterable[float]], NDArray[float, :, :]], Fun[[float, Iterable[Iterable[float]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]], float], NDArray[float, :, :]], Fun[[Iterable[Iterable[complex]], Iterable[ Iterable[complex]]], NDArray[complex, :, :]], Fun[[Iterable[complex], Iterable[Iterable[complex]]], NDArray[complex, :, :]], Fun[[Iterable[Iterable[complex]], Iterable[complex]], NDArray[complex, :, :]], Fun[[complex, Iterable[Iterable[complex]]], NDArray[complex, :, :]], Fun[[Iterable[Iterable[complex]], complex], NDArray[complex, :, :]], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]], Iterable[ Iterable[Iterable[bool]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[bool]], Iterable[ Iterable[Iterable[bool]]]], NDArray[float, :, :, :]], Fun[[Iterable[bool], Iterable[Iterable[Iterable[bool]]]], NDArray[float, :, :, :]], Fun[[bool, Iterable[Iterable[Iterable[bool]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[bool]]], Iterable[ Iterable[bool]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[bool]]], Iterable[bool]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[bool]]], bool], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], Iterable[ Iterable[Iterable[int]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[int]], Iterable[Iterable[Iterable[int]]]], NDArray[float, :, :, :]], Fun[[Iterable[int], Iterable[Iterable[Iterable[int]]]], NDArray[float, :, :, :]], Fun[[int, Iterable[Iterable[Iterable[int]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], Iterable[ Iterable[int]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], Iterable[int]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], int], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], Iterable[ Iterable[Iterable[float]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[float]], Iterable[ Iterable[Iterable[float]]]], NDArray[float, :, :, :]], Fun[[Iterable[float], Iterable[Iterable[Iterable[float]]]], NDArray[float, :, :, :]], Fun[[float, Iterable[Iterable[Iterable[float]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], Iterable[ Iterable[float]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], Iterable[float]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], float], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], Iterable[ Iterable[Iterable[complex]]]], NDArray[complex, :, :, :]], Fun[[Iterable[Iterable[complex]], Iterable[ Iterable[Iterable[complex]]]], NDArray[complex, :, :, :]], Fun[[Iterable[complex], Iterable[Iterable[Iterable[complex]]]], NDArray[complex, :, :, :]], Fun[[complex, Iterable[Iterable[Iterable[complex]]]], NDArray[complex, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], Iterable[ Iterable[complex]]], NDArray[complex, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], Iterable[complex]], NDArray[complex, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], complex], NDArray[complex, :, :, :]], ] _numpy_ternary_op_signature = Union[ # scalar Fun[[int, int, int], int], Fun[[float, float, float], float], Fun[[complex, complex, complex], complex], # 1D Fun[[Iterable[int], int, int], NDArray[int, :]], Fun[[Iterable[int], Iterable[int], int], NDArray[int, :]], Fun[[Iterable[int], int, Iterable[int]], NDArray[int, :]], Fun[[Iterable[int], Iterable[int], Iterable[int]], NDArray[int, :]], Fun[[Iterable[float], float, float], NDArray[float, :]], Fun[[Iterable[float], Iterable[float], float], NDArray[float, :]], Fun[[Iterable[float], float, Iterable[float]], NDArray[float, :]], Fun[[Iterable[float], Iterable[float], Iterable[float]], NDArray[float, :]], Fun[[Iterable[complex], complex, complex], NDArray[complex, :]], Fun[[Iterable[complex], Iterable[complex], complex], NDArray[complex, :]], Fun[[Iterable[complex], complex, Iterable[complex]], NDArray[complex, :]], Fun[[Iterable[complex], Iterable[complex], Iterable[complex]], NDArray[complex, :]], # 2D Fun[[Iterable[Iterable[int]], int, int], NDArray[int, :, :]], Fun[[Iterable[Iterable[int]], Iterable[int], int], NDArray[int, :, :]], Fun[[Iterable[Iterable[int]], Iterable[int], Iterable[int]], NDArray[int, :, :]], Fun[[Iterable[Iterable[int]], int, Iterable[int]], NDArray[int, :, :]], Fun[[Iterable[Iterable[int]], Iterable[Iterable[int]], int], NDArray[int, :, :]], Fun[[Iterable[Iterable[int]], int, Iterable[Iterable[int]]], NDArray[int, :, :]], Fun[[Iterable[Iterable[int]], Iterable[Iterable[int]], Iterable[int]], NDArray[int, :, :]], Fun[[Iterable[Iterable[int]], Iterable[int], Iterable[Iterable[int]]], NDArray[int, :, :]], Fun[[Iterable[Iterable[int]], Iterable[Iterable[int]], Iterable[Iterable[int]]], NDArray[int, :, :]], Fun[[Iterable[Iterable[float]], float, float], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]], Iterable[float], float], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]], Iterable[float], Iterable[float]], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]], float, Iterable[float]], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]], Iterable[ Iterable[float]], float], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]], float, Iterable[ Iterable[float]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]], Iterable[Iterable[float]], Iterable[float]], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]], Iterable[float], Iterable[Iterable[float]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]], Iterable[Iterable[float]], Iterable[Iterable[float]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[complex]], complex, complex], NDArray[complex, :, :]], Fun[[Iterable[Iterable[complex]], Iterable[ complex], complex], NDArray[complex, :, :]], Fun[[Iterable[Iterable[complex]], Iterable[complex], Iterable[complex]], NDArray[complex, :, :]], Fun[[Iterable[Iterable[complex]], complex, Iterable[complex]], NDArray[complex, :, :]], Fun[[Iterable[Iterable[complex]], Iterable[ Iterable[complex]], complex], NDArray[complex, :, :]], Fun[[Iterable[Iterable[complex]], complex, Iterable[ Iterable[complex]]], NDArray[complex, :, :]], Fun[[Iterable[Iterable[complex]], Iterable[Iterable[complex]], Iterable[complex]], NDArray[complex, :, :]], Fun[[Iterable[Iterable[complex]], Iterable[complex], Iterable[Iterable[complex]]], NDArray[complex, :, :]], Fun[[Iterable[Iterable[complex]], Iterable[Iterable[complex]], Iterable[Iterable[complex]]], NDArray[complex, :, :]], ] _numpy_int_binary_op_signature = Union[ # 1d Fun[[bool, bool], bool], Fun[[int, int], int], # 1d Iterable Fun[[Iterable[bool], Iterable[bool]], NDArray[bool, :]], Fun[[Iterable[bool], bool], NDArray[bool, :]], Fun[[bool, Iterable[bool]], NDArray[bool, :]], Fun[[Iterable[int], Iterable[int]], NDArray[int, :]], Fun[[Iterable[int], int], NDArray[int, :]], Fun[[int, Iterable[int]], NDArray[int, :]], # 2d Iterable Fun[[Iterable[Iterable[bool]], Iterable[Iterable[bool]]], NDArray[bool, :, :]], Fun[[Iterable[bool], Iterable[Iterable[bool]]], NDArray[bool, :, :]], Fun[[Iterable[Iterable[bool]], Iterable[bool]], NDArray[bool, :, :]], Fun[[bool, Iterable[Iterable[bool]]], NDArray[bool, :, :]], Fun[[Iterable[Iterable[bool]], bool], NDArray[bool, :, :]], Fun[[Iterable[Iterable[int]], Iterable[Iterable[int]]], NDArray[int, :, :]], Fun[[Iterable[int], Iterable[Iterable[int]]], NDArray[int, :, :]], Fun[[Iterable[Iterable[int]], Iterable[int]], NDArray[int, :, :]], Fun[[int, Iterable[Iterable[int]]], NDArray[int, :, :]], Fun[[Iterable[Iterable[int]], int], NDArray[int, :, :]], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]], Iterable[ Iterable[Iterable[bool]]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[bool]], Iterable[ Iterable[Iterable[bool]]]], NDArray[bool, :, :, :]], Fun[[Iterable[bool], Iterable[Iterable[Iterable[bool]]]], NDArray[bool, :, :, :]], Fun[[bool, Iterable[Iterable[Iterable[bool]]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[bool]]], Iterable[ Iterable[bool]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[bool]]], Iterable[bool]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[bool]]], bool], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], Iterable[ Iterable[Iterable[int]]]], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[int]], Iterable[ Iterable[Iterable[int]]]], NDArray[int, :, :, :]], Fun[[Iterable[int], Iterable[Iterable[Iterable[int]]]], NDArray[int, :, :, :]], Fun[[int, Iterable[Iterable[Iterable[int]]]], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], Iterable[ Iterable[int]]], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], Iterable[int]], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], int], NDArray[int, :, :, :]], ] _numpy_binary_op_float_no_complex_signature = Union[ # 1d Fun[[bool, bool], float], Fun[[int, int], float], Fun[[float, float], float], # 1d Iterable Fun[[Iterable[bool], Iterable[bool]], NDArray[float, :]], Fun[[Iterable[bool], bool], NDArray[float, :]], Fun[[bool, Iterable[bool]], NDArray[float, :]], Fun[[Iterable[int], Iterable[int]], NDArray[float, :]], Fun[[Iterable[int], int], NDArray[float, :]], Fun[[int, Iterable[int]], NDArray[float, :]], Fun[[Iterable[float], Iterable[float]], NDArray[float, :]], Fun[[Iterable[float], float], NDArray[float, :]], Fun[[float, Iterable[float]], NDArray[float, :]], # 2d Iterable Fun[[Iterable[Iterable[bool]], Iterable[Iterable[bool]]], NDArray[float, :, :]], Fun[[Iterable[bool], Iterable[Iterable[bool]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[bool]], Iterable[bool]], NDArray[float, :, :]], Fun[[bool, Iterable[Iterable[bool]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[bool]], bool], NDArray[float, :, :]], Fun[[Iterable[Iterable[int]], Iterable[Iterable[int]]], NDArray[float, :, :]], Fun[[Iterable[int], Iterable[Iterable[int]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[int]], Iterable[int]], NDArray[float, :, :]], Fun[[int, Iterable[Iterable[int]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[int]], int], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]], Iterable[Iterable[float]]], NDArray[float, :, :]], Fun[[Iterable[float], Iterable[Iterable[float]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]], Iterable[float]], NDArray[float, :, :]], Fun[[float, Iterable[Iterable[float]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]], float], NDArray[float, :, :]], # 3d Iterable Fun[[Iterable[Iterable[Iterable[bool]]], Iterable[ Iterable[Iterable[bool]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[bool]], Iterable[ Iterable[Iterable[bool]]]], NDArray[float, :, :, :]], Fun[[Iterable[bool], Iterable[Iterable[Iterable[bool]]]], NDArray[float, :, :, :]], Fun[[bool, Iterable[Iterable[Iterable[bool]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[bool]]], Iterable[ Iterable[bool]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[bool]]], Iterable[bool]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[bool]]], bool], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], Iterable[ Iterable[Iterable[int]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[int]], Iterable[Iterable[Iterable[int]]]], NDArray[float, :, :, :]], Fun[[Iterable[int], Iterable[Iterable[Iterable[int]]]], NDArray[float, :, :, :]], Fun[[int, Iterable[Iterable[Iterable[int]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], Iterable[ Iterable[int]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], Iterable[int]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], int], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], Iterable[ Iterable[Iterable[float]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[float]], Iterable[ Iterable[Iterable[float]]]], NDArray[float, :, :, :]], Fun[[Iterable[float], Iterable[Iterable[Iterable[float]]]], NDArray[float, :, :, :]], Fun[[float, Iterable[Iterable[Iterable[float]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], Iterable[ Iterable[float]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], Iterable[float]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], float], NDArray[float, :, :, :]], ] _numpy_allclose_signature = Union[ _numpy_binary_op_signature.__args__ + tuple([Fun[c.__args__[:-1] + (float,), c.__args__[-1]] for c in _numpy_unary_op_signature.__args__] + [Fun[c.__args__[:-1] + (float, float), c.__args__[-1]] for c in _numpy_unary_op_signature.__args__] + [Fun[c.__args__[:-1] + (float, float, bool), c.__args__[-1]] for c in _numpy_unary_op_signature.__args__] ) ] _numpy_around_signature = Union[ _numpy_unary_op_float_signature.__args__ + tuple([Fun[c.__args__[:-1] + (int,), c.__args__[-1]] for c in _numpy_unary_op_float_signature.__args__]) ] _functools_reduce_signature = Union[ Fun[[Fun[[T0, T0], T0], Iterable[T0]], T0], Fun[[Fun[[T0, T1], T0], Iterable[T1], T0], T0], ] def partialsum(seq): s = tuple() for elt in seq: s += elt, yield s _operator_add_signature = Union[ _numpy_binary_op_signature.__args__ + (Fun[[str, str], str], Fun[[List[T0], List[T0]], List[T0]],) + tuple(Fun[[Tuple[t0], Tuple[t1]], Tuple[t0 + t1]] for t0 in partialsum([T0, T1, T2, T3]) for t1 in partialsum([T4, T5, T6, T7])) ] _operator_eq_signature = Union[ _numpy_binary_op_bool_signature.__args__ + (Fun[[str, str], bool], Fun[[List[T0], List[T0]], bool], Fun[[Set[T0], Set[T0]], bool], Fun[[T0, None], bool], Fun[[None, T0], bool], Fun[[Dict[T0, T1], Dict[T0, T1]], bool],) + tuple(Fun[[Tuple[t0], Tuple[t1]], Tuple[t0 + t1]] for t0 in partialsum([T0, T1, T2, T3]) for t1 in partialsum([T4, T5, T6, T7])) ] _operator_sub_signature = Union[ _numpy_binary_op_signature.__args__ + (Fun[[Set[T0], Set[T0]], Set[T0]],) ] _operator_mod_signature = Union[ _numpy_binary_op_signature.__args__ + (Fun[[str, T0], str],) ] _operator_mul_signature = Union[ _numpy_binary_op_signature.__args__ + (Fun[[str, int], str], Fun[[int, str], str], Fun[[List[T0], int], List[T0]], Fun[[int, List[T0]], List[T0]]) ] _operator_contains_signature = Fun[[Iterable[T0], T0], bool] _operator_getitem_signature = Union[ Fun[[List[T0], int], T0], Fun[[List[T0], slice], List[T0]], Fun[[Dict[T0, T1], T0], T1], Fun[[str, int], str], Fun[[str, slice], str], # arrays Fun[[NDArray[T0, :], T1], T2], # large tuple Fun[[Iterable[T0], int], T0], ] _numpy_farray_signature = Union[ # no dtype # scalar Fun[[bool], float], Fun[[int], float], Fun[[float], float], # 1D array Fun[[Iterable[bool]], NDArray[float, :]], Fun[[Iterable[int]], NDArray[float, :]], Fun[[Iterable[float]], NDArray[float, :]], # 2D array Fun[[Iterable[Iterable[bool]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[int]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]]], NDArray[float, :, :]], # 3D array Fun[[Iterable[Iterable[Iterable[bool]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]]], NDArray[float, :, :, :]], # 4D array Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]]], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]]], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]]], NDArray[float, :, :, :, :]], # bool dtype # scalar Fun[[bool, _bool_signature], float], Fun[[int, _bool_signature], float], Fun[[float, _bool_signature], float], Fun[[complex, _bool_signature], float], # 1D array Fun[[Iterable[bool], _bool_signature], NDArray[float, :]], Fun[[Iterable[int], _bool_signature], NDArray[float, :]], Fun[[Iterable[float], _bool_signature], NDArray[float, :]], # 2D array Fun[[Iterable[Iterable[bool]], _bool_signature], NDArray[float, :, :]], Fun[[Iterable[Iterable[int]], _bool_signature], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]], _bool_signature], NDArray[float, :, :]], # 3D array Fun[[Iterable[Iterable[Iterable[bool]]], _bool_signature], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], _bool_signature], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], _bool_signature], NDArray[float, :, :, :]], # 4D array Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]], _bool_signature], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]], _bool_signature], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]], _bool_signature], NDArray[float, :, :, :, :]], # int dtype Fun[[bool, _int_signature], float], Fun[[int, _int_signature], float], Fun[[float, _int_signature], float], Fun[[complex, _int_signature], float], # 1D array Fun[[Iterable[bool], _int_signature], NDArray[float, :]], Fun[[Iterable[int], _int_signature], NDArray[float, :]], Fun[[Iterable[float], _int_signature], NDArray[float, :]], # 2D array Fun[[Iterable[Iterable[bool]], _int_signature], NDArray[float, :, :]], Fun[[Iterable[Iterable[int]], _int_signature], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]], _int_signature], NDArray[float, :, :]], # 3D array Fun[[Iterable[Iterable[Iterable[bool]]], _int_signature], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], _int_signature], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], _int_signature], NDArray[float, :, :, :]], # 4D array Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]], _int_signature], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]], _int_signature], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]], _int_signature], NDArray[float, :, :, :, :]], # float dtype # scalar Fun[[bool, _float_signature], float], Fun[[int, _float_signature], float], Fun[[float, _float_signature], float], Fun[[complex, _float_signature], float], # 1D array Fun[[Iterable[bool], _float_signature], NDArray[float, :]], Fun[[Iterable[int], _float_signature], NDArray[float, :]], Fun[[Iterable[float], _float_signature], NDArray[float, :]], Fun[[Iterable[complex], _float_signature], NDArray[float, :]], # 2D array Fun[[Iterable[Iterable[bool]], _float_signature], NDArray[float, :, :]], Fun[[Iterable[Iterable[int]], _float_signature], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]], _float_signature], NDArray[float, :, :]], Fun[[Iterable[Iterable[complex]], _float_signature], NDArray[float, :, :]], # 3D array Fun[[Iterable[Iterable[Iterable[bool]]], _float_signature], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], _float_signature], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], _float_signature], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], _float_signature], NDArray[float, :, :, :]], # 4D array Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]], _float_signature], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]], _float_signature], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]], _float_signature], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]], _float_signature], NDArray[float, :, :, :, :]], # complex dtype # scalar Fun[[bool, _complex_signature], complex], Fun[[int, _complex_signature], complex], Fun[[float, _complex_signature], complex], Fun[[complex, _complex_signature], complex], # 1D array Fun[[Iterable[bool], _complex_signature], NDArray[complex, :]], Fun[[Iterable[int], _complex_signature], NDArray[complex, :]], Fun[[Iterable[float], _complex_signature], NDArray[complex, :]], Fun[[Iterable[complex], _complex_signature], NDArray[complex, :]], # 2D array Fun[[Iterable[Iterable[bool]], _complex_signature], NDArray[complex, :, :]], Fun[[Iterable[Iterable[int]], _complex_signature], NDArray[complex, :, :]], Fun[[Iterable[Iterable[float]], _complex_signature], NDArray[complex, :, :]], Fun[[Iterable[Iterable[complex]], _complex_signature], NDArray[complex, :, :]], # 3D array Fun[[Iterable[Iterable[Iterable[bool]]], _complex_signature], NDArray[complex, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], _complex_signature], NDArray[complex, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], _complex_signature], NDArray[complex, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], _complex_signature], NDArray[complex, :, :, :]], # 4D array Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]], _complex_signature], NDArray[complex, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]], _complex_signature], NDArray[complex, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]], _complex_signature], NDArray[complex, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]], _complex_signature], NDArray[complex, :, :, :, :]], ] _numpy_array_signature = Union[ # no dtype # scalar Fun[[bool], bool], Fun[[int], int], Fun[[float], float], Fun[[complex], complex], # 1D array Fun[[Iterable[bool]], NDArray[bool, :]], Fun[[Iterable[int]], NDArray[int, :]], Fun[[Iterable[float]], NDArray[float, :]], Fun[[Iterable[complex]], NDArray[complex, :]], # 2D array Fun[[Iterable[Iterable[bool]]], NDArray[bool, :, :]], Fun[[Iterable[Iterable[int]]], NDArray[int, :, :]], Fun[[Iterable[Iterable[float]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[complex]]], NDArray[complex, :, :]], # 3D array Fun[[Iterable[Iterable[Iterable[bool]]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]]], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]]], NDArray[complex, :, :, :]], # 4D array Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]]], NDArray[bool, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]]], NDArray[int, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]]], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]]], NDArray[complex, :, :, :, :]], # bool dtype # scalar Fun[[bool, _bool_signature], bool], Fun[[int, _bool_signature], bool], Fun[[float, _bool_signature], bool], Fun[[complex, _bool_signature], bool], # 1D array Fun[[Iterable[bool], _bool_signature], NDArray[bool, :]], Fun[[Iterable[int], _bool_signature], NDArray[bool, :]], Fun[[Iterable[float], _bool_signature], NDArray[bool, :]], Fun[[Iterable[complex], _bool_signature], NDArray[bool, :]], # 2D array Fun[[Iterable[Iterable[bool]], _bool_signature], NDArray[bool, :, :]], Fun[[Iterable[Iterable[int]], _bool_signature], NDArray[bool, :, :]], Fun[[Iterable[Iterable[float]], _bool_signature], NDArray[bool, :, :]], Fun[[Iterable[Iterable[complex]], _bool_signature], NDArray[bool, :, :]], # 3D array Fun[[Iterable[Iterable[Iterable[bool]]], _bool_signature], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], _bool_signature], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], _bool_signature], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], _bool_signature], NDArray[bool, :, :, :]], # 4D array Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]], _bool_signature], NDArray[bool, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]], _bool_signature], NDArray[bool, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]], _bool_signature], NDArray[bool, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]], _bool_signature], NDArray[bool, :, :, :, :]], # int dtype # scalar Fun[[bool, _int_signature], int], Fun[[int, _int_signature], int], Fun[[float, _int_signature], int], Fun[[complex, _int_signature], int], # 1D array Fun[[Iterable[bool], _int_signature], NDArray[int, :]], Fun[[Iterable[int], _int_signature], NDArray[int, :]], Fun[[Iterable[float], _int_signature], NDArray[int, :]], Fun[[Iterable[complex], _int_signature], NDArray[int, :]], # 2D array Fun[[Iterable[Iterable[bool]], _int_signature], NDArray[int, :, :]], Fun[[Iterable[Iterable[int]], _int_signature], NDArray[int, :, :]], Fun[[Iterable[Iterable[float]], _int_signature], NDArray[int, :, :]], Fun[[Iterable[Iterable[complex]], _int_signature], NDArray[int, :, :]], # 3D array Fun[[Iterable[Iterable[Iterable[bool]]], _int_signature], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], _int_signature], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], _int_signature], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], _int_signature], NDArray[int, :, :, :]], # 4D array Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]], _int_signature], NDArray[int, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]], _int_signature], NDArray[int, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]], _int_signature], NDArray[int, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]], _int_signature], NDArray[int, :, :, :, :]], # float dtype # scalar Fun[[bool, _float_signature], float], Fun[[int, _float_signature], float], Fun[[float, _float_signature], float], Fun[[complex, _float_signature], float], # 1D array Fun[[Iterable[bool], _float_signature], NDArray[float, :]], Fun[[Iterable[int], _float_signature], NDArray[float, :]], Fun[[Iterable[float], _float_signature], NDArray[float, :]], Fun[[Iterable[complex], _float_signature], NDArray[float, :]], # 2D array Fun[[Iterable[Iterable[bool]], _float_signature], NDArray[float, :, :]], Fun[[Iterable[Iterable[int]], _float_signature], NDArray[float, :, :]], Fun[[Iterable[Iterable[float]], _float_signature], NDArray[float, :, :]], Fun[[Iterable[Iterable[complex]], _float_signature], NDArray[float, :, :]], # 3D array Fun[[Iterable[Iterable[Iterable[bool]]], _float_signature], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], _float_signature], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], _float_signature], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], _float_signature], NDArray[float, :, :, :]], # 4D array Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]], _float_signature], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]], _float_signature], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]], _float_signature], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]], _float_signature], NDArray[float, :, :, :, :]], # complex dtype # scalar Fun[[bool, _complex_signature], complex], Fun[[int, _complex_signature], complex], Fun[[float, _complex_signature], complex], Fun[[complex, _complex_signature], complex], # 1D array Fun[[Iterable[bool], _complex_signature], NDArray[complex, :]], Fun[[Iterable[int], _complex_signature], NDArray[complex, :]], Fun[[Iterable[float], _complex_signature], NDArray[complex, :]], Fun[[Iterable[complex], _complex_signature], NDArray[complex, :]], # 2D array Fun[[Iterable[Iterable[bool]], _complex_signature], NDArray[complex, :, :]], Fun[[Iterable[Iterable[int]], _complex_signature], NDArray[complex, :, :]], Fun[[Iterable[Iterable[float]], _complex_signature], NDArray[complex, :, :]], Fun[[Iterable[Iterable[complex]], _complex_signature], NDArray[complex, :, :]], # 3D array Fun[[Iterable[Iterable[Iterable[bool]]], _complex_signature], NDArray[complex, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]], _complex_signature], NDArray[complex, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]], _complex_signature], NDArray[complex, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]], _complex_signature], NDArray[complex, :, :, :]], # 4D array Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]], _complex_signature], NDArray[complex, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]], _complex_signature], NDArray[complex, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]], _complex_signature], NDArray[complex, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]], _complex_signature], NDArray[complex, :, :, :, :]], ] # each module consist in a module_name <> set of symbols MODULES = { "builtins": { "pythran": { "abssqr": ConstFunctionIntr(), "static_list": ReadOnceFunctionIntr( signature=Fun[[Iterable[T0]], List[T0]], return_alias=lambda args: {args[0]}), "is_none": ConstFunctionIntr(), "kwonly": ConstFunctionIntr(), "len_set": ConstFunctionIntr(signature=Fun[[Iterable[T0]], int]), "make_shape": ConstFunctionIntr(), "static_if": ConstFunctionIntr(), "StaticIfBreak": ConstFunctionIntr(), "StaticIfCont": ConstFunctionIntr(), "StaticIfNoReturn": ConstFunctionIntr(), "StaticIfReturn": ConstFunctionIntr(), }, "abs": ConstFunctionIntr( signature=Union[ Fun[[int], int], Fun[[float], float], Fun[[complex], float], Fun[[NDArray[int, :]], NDArray[int, :]], Fun[[NDArray[int, :, :]], NDArray[int, :, :]], Fun[[NDArray[int, :, :, :]], NDArray[int, :, :, :]], Fun[[NDArray[int, :, :, :, :]], NDArray[int, :, :, :, :]], Fun[[NDArray[float, :]], NDArray[float, :]], Fun[[NDArray[float, :, :]], NDArray[float, :, :]], Fun[[NDArray[float, :, :, :]], NDArray[float, :, :, :]], Fun[[NDArray[float, :, :, :, :]], NDArray[float, :, :, :, :]], Fun[[NDArray[complex, :]], NDArray[float, :]], Fun[[NDArray[complex, :, :]], NDArray[float, :, :]], Fun[[NDArray[complex, :, :, :]], NDArray[float, :, :, :]], Fun[[NDArray[complex, :, :, :, :]], NDArray[float, :, :, :, :]] ], ), "BaseException": ConstExceptionIntr(), "SystemExit": ConstExceptionIntr(), "KeyboardInterrupt": ConstExceptionIntr(), "GeneratorExit": ConstExceptionIntr(), "Exception": ExceptionClass(CLASSES["Exception"]), "StopIteration": ConstExceptionIntr(), "Warning": ConstExceptionIntr(), "BytesWarning": ConstExceptionIntr(), "UnicodeWarning": ConstExceptionIntr(), "ImportWarning": ConstExceptionIntr(), "FutureWarning": ConstExceptionIntr(), "UserWarning": ConstExceptionIntr(), "SyntaxWarning": ConstExceptionIntr(), "RuntimeWarning": ConstExceptionIntr(), "PendingDeprecationWarning": ConstExceptionIntr(), "DeprecationWarning": ConstExceptionIntr(), "BufferError": ConstExceptionIntr(), "ArithmeticError": ConstExceptionIntr(), "AssertionError": ConstExceptionIntr(), "AttributeError": ConstExceptionIntr(), "EnvironmentError": ConstExceptionIntr(), "EOFError": ConstExceptionIntr(), "ImportError": ConstExceptionIntr(), "LookupError": ConstExceptionIntr(), "MemoryError": ConstExceptionIntr(), "NameError": ConstExceptionIntr(), "ReferenceError": ConstExceptionIntr(), "RuntimeError": ConstExceptionIntr(), "SyntaxError": ConstExceptionIntr(), "SystemError": ConstExceptionIntr(), "TypeError": ConstExceptionIntr(), "ValueError": ConstExceptionIntr(), "FloatingPointError": ConstExceptionIntr(), "OverflowError": ConstExceptionIntr(), "ZeroDivisionError": ConstExceptionIntr(), "IOError": ConstExceptionIntr(), "OSError": ConstExceptionIntr(), "IndexError": ConstExceptionIntr(), "KeyError": ConstExceptionIntr(), "UnboundLocalError": ConstExceptionIntr(), "NotImplementedError": ConstExceptionIntr(), "IndentationError": ConstExceptionIntr(), "TabError": ConstExceptionIntr(), "UnicodeError": ConstExceptionIntr(), # "UnicodeDecodeError": ConstExceptionIntr(), # "UnicodeEncodeError": ConstExceptionIntr(), # "UnicodeTranslateError": ConstExceptionIntr(), "all": ReadOnceFunctionIntr(signature=Fun[[Iterable[T0]], bool]), "any": ReadOnceFunctionIntr(signature=Fun[[Iterable[T0]], bool]), "bin": ConstFunctionIntr(signature=Fun[[int], str]), "bool": ConstFunctionIntr(signature=_bool_signature), "chr": ConstFunctionIntr(signature=Fun[[int], str]), "complex": ClassWithConstConstructor( CLASSES['complex'], signature=_complex_signature ), "dict": ClassWithReadOnceConstructor( CLASSES['dict'], signature=Union[ Fun[[], Dict[T0, T1]], Fun[[Iterable[Tuple[T0, T1]]], Dict[T0, T1]], ], ), "divmod": ConstFunctionIntr( signature=Union[ Fun[[int, int], Tuple[int, int]], Fun[[float, int], Tuple[float, float]], Fun[[int, float], Tuple[float, float]], Fun[[float, float], Tuple[float, float]], ], ), "enumerate": ReadOnceFunctionIntr( signature=Union[ Fun[[Iterable[T0]], Generator[Tuple[int, T0]]], Fun[[Iterable[T0], int], Generator[Tuple[int, T0]]], ], ), "filter": ReadOnceFunctionIntr( signature=Union[ Fun[[None, Iterable[T0]], List[T0]], Fun[[Fun[[T0], bool], Iterable[T0]], List[T0]], ], ), "float": ClassWithConstConstructor( CLASSES['float'], signature=_float_signature ), "getattr": ConstFunctionIntr(), "hex": ConstFunctionIntr(signature=Fun[[int], str]), "id": ConstFunctionIntr(signature=Fun[[T0], int]), "int": ConstFunctionIntr(signature=_int_signature), "isinstance": ConstFunctionIntr(signature=Fun[[T0, T1], bool]), "iter": FunctionIntr( signature=Fun[[Iterable[T0]], Generator[T0]]), # not const "len": ConstFunctionIntr( signature=Fun[[Sized], int], return_range=interval.positive_values ), "list": ClassWithReadOnceConstructor( CLASSES['list'], signature=Union[ Fun[[], List[T0]], Fun[[Iterable[T0]], List[T0]] ], ), "map": ReadOnceFunctionIntr( signature=Union[ Fun[[None, Iterable[T0]], List[T0]], Fun[[None, Iterable[T0], Iterable[T1]], List[Tuple[T0, T1]]], Fun[[None, Iterable[T0], Iterable[T1], Iterable[T2]], List[Tuple[T0, T1, T2]]], Fun[[None, Iterable[T0], Iterable[T1], Iterable[T2], Iterable[T3]], List[Tuple[T0, T1, T2, T3]]], Fun[[Fun[[T0], T7], Iterable[T0]], List[T7]], Fun[[Fun[[T0, T1], T7], Iterable[T0], Iterable[T1]], List[T7]], Fun[[Fun[[T0, T1, T2], T7], Iterable[T0], Iterable[T1], Iterable[T2]], List[T7]], Fun[[Fun[[T0, T1, T2, T3], T7], Iterable[T0], Iterable[T1], Iterable[T2], Iterable[T3]], List[T7]], ] ), "max": ReadOnceFunctionIntr( kwonlyargs=('key',), signature=Union[ Fun[[T0, T0], T0], Fun[[T0, T0, T0], T0], Fun[[T0, T0, T0, T0], T0], Fun[[Iterable[T0]], T0], ], return_range=interval.max_values ), "min": ReadOnceFunctionIntr( kwonlyargs=('key', 'default'), signature=Union[ Fun[[int, int], int], Fun[[float, float], float], Fun[[Iterable[T0]], T0], ], return_range=interval.min_values ), "next": FunctionIntr( # not const signature=Union[ Fun[[Iterable[T0]], T0], Fun[[Iterable[T0], T0], T0], ], ), # not const "oct": ConstFunctionIntr(signature=Fun[[int], str]), "ord": ConstFunctionIntr( signature=Fun[[str], int], return_range=interval.ord_values ), "open": ConstFunctionIntr( signature=Union[ Fun[[str], File], Fun[[str, str], File], ], global_effects=True ), "print": ConstFunctionIntr(global_effects=True), "pow": ConstFunctionIntr( signature=Union[ Fun[[int, int], int], Fun[[int, int, int], int], Fun[[int, float], float], Fun[[int, float, int], float], Fun[[float, float], float], Fun[[float, float, int], float], ] ), "range": ConstFunctionIntr( signature=Union[ Fun[[int], List[int]], Fun[[int, int], List[int]], Fun[[int, int, int], List[int]], ], return_range_content=interval.range_values ), "reduce": ReadOnceFunctionIntr(signature=_functools_reduce_signature), "reversed": ReadOnceFunctionIntr( signature=Fun[[Iterable[T0]], Iterable[T0]] ), "round": ConstFunctionIntr( signature=Union[ Fun[[float], float], Fun[[float, int], float], ], ), "set": ClassWithReadOnceConstructor( CLASSES['set'], signature=Union[ Fun[[], Set[T0]], Fun[[Iterable[T0]], Set[T0]] ], ), "slice": ClassWithConstConstructor(CLASSES['slice']), "sorted": ConstFunctionIntr(signature=Fun[[Iterable[T0]], List[T0]]), "str": ClassWithConstConstructor( CLASSES['str'], signature=Fun[[T0], str], ), "sum": ReadOnceFunctionIntr( signature=Union[ Fun[[Iterable[int]], int], Fun[[Iterable[float]], float], Fun[[Iterable[int], int], int], Fun[[Iterable[float], float], float], ], ), "tuple": ReadOnceFunctionIntr( signature=Union[ Fun[[], Tuple[()]], Fun[[Tuple[T0]], Tuple[T0]], Fun[[Tuple[T0, T1]], Tuple[T0, T1]], Fun[[Tuple[T0, T1, T2]], Tuple[T0, T1, T2]], # FIXME: We accept some type loss here Fun[[List[T0]], Iterable[T0]], ], ), "type": ConstFunctionIntr(), "zip": ReadOnceFunctionIntr( signature=Union[ Fun[[], List[T0]], Fun[[Iterable[T0]], List[Tuple[T0]]], Fun[[Iterable[T0], Iterable[T1]], List[Tuple[T0, T1]]], Fun[[Iterable[T0], Iterable[T1], Iterable[T2]], List[Tuple[T0, T1, T2]]], Fun[[Iterable[T0], Iterable[T1], Iterable[T2], Iterable[T3]], List[Tuple[T0, T1, T2, T3]]], ] ), "False": ConstantIntr( signature=bool, return_range=lambda _: interval.Range(0, 0) ), "None": ConstantIntr(signature=None), "True": ConstantIntr( signature=bool, return_range=lambda _: interval.Range(1, 1) ), }, "scipy": { "special": { "binom": ConstFunctionIntr( signature=_numpy_binary_op_float_signature ), "gammaln": ConstFunctionIntr( signature=_numpy_unary_op_float_signature ), "gamma": ConstFunctionIntr( signature=_numpy_unary_op_float_signature ), "hankel1": UFunc( BINARY_UFUNC, signature=_numpy_binary_op_float_signature ), "hankel2": UFunc( BINARY_UFUNC, signature=_numpy_binary_op_float_signature ), "iv": UFunc( BINARY_UFUNC, signature=_numpy_binary_op_float_signature ), "ivp": UFunc( BINARY_UFUNC, signature=_numpy_binary_op_float_signature ), "jv": UFunc( BINARY_UFUNC, signature=_numpy_binary_op_float_signature ), "jvp": UFunc( BINARY_UFUNC, signature=_numpy_binary_op_float_signature ), "kv": UFunc( BINARY_UFUNC, signature=_numpy_binary_op_float_signature ), "kvp": UFunc( BINARY_UFUNC, signature=_numpy_binary_op_float_signature ), "yv": UFunc( BINARY_UFUNC, signature=_numpy_binary_op_float_signature ), "yvp": UFunc( BINARY_UFUNC, signature=_numpy_binary_op_float_signature ), "spherical_jn": UFunc( BINARY_UFUNC, signature=_numpy_binary_op_float_signature ), "spherical_yn": UFunc( BINARY_UFUNC, signature=_numpy_binary_op_float_signature ), } }, "numpy": { "abs": ConstFunctionIntr(signature=_numpy_unary_op_signature), "absolute": ConstFunctionIntr(signature=_numpy_ones_signature), "add": UFunc( REDUCED_BINARY_UFUNC, signature=_numpy_binary_op_signature, ), "alen": ConstFunctionIntr( signature=Union[ # scalar Fun[[bool], int], Fun[[int], int], Fun[[float], int], Fun[[complex], int], # Sized Fun[[Sized], int], ], return_range=interval.positive_values ), "all": ConstMethodIntr( signature=_numpy_unary_op_bool_axis_signature, return_range=interval.bool_values ), "allclose": ConstFunctionIntr( signature=_numpy_allclose_signature, return_range=interval.bool_values ), "alltrue": ConstFunctionIntr( signature=_numpy_unary_op_bool_axis_signature, return_range=interval.bool_values ), "amax": ConstFunctionIntr(signature=_numpy_unary_op_axis_signature), "amin": ConstFunctionIntr(signature=_numpy_unary_op_axis_signature), "angle": ConstFunctionIntr(signature=_numpy_unary_op_angle_signature), "any": ConstMethodIntr( signature=_numpy_unary_op_bool_axis_signature, return_range=interval.bool_values ), "append": ConstFunctionIntr( signature=Union[ # no axis -> flattened output # scalar Fun[[bool, bool], NDArray[bool, :]], Fun[[int, int], NDArray[int, :]], Fun[[float, float], NDArray[float, :]], Fun[[complex, float], NDArray[float, :]], # 1D Array # FIXME: second argument could have a shape larger than first Fun[[Iterable[bool], bool], NDArray[bool, :]], Fun[[Iterable[int], int], NDArray[int, :]], Fun[[Iterable[float], float], NDArray[float, :]], Fun[[Iterable[complex], complex], NDArray[complex, :]], Fun[[Iterable[bool], Iterable[bool]], NDArray[bool, :]], Fun[[Iterable[int], Iterable[int]], NDArray[int, :]], Fun[[Iterable[float], Iterable[float]], NDArray[float, :]], Fun[[Iterable[complex], Iterable[complex]], NDArray[complex, :]], # 2D Array Fun[[Iterable[Iterable[bool]], bool], NDArray[bool, :]], Fun[[Iterable[Iterable[int]], int], NDArray[int, :]], Fun[[Iterable[Iterable[float]], float], NDArray[float, :]], Fun[[Iterable[Iterable[complex]], complex], NDArray[complex, :]], Fun[[Iterable[Iterable[bool]], Iterable[bool]], NDArray[bool, :]], Fun[[Iterable[Iterable[int]], Iterable[int]], NDArray[int, :]], Fun[[Iterable[Iterable[float]], Iterable[float]], NDArray[float, :]], Fun[[Iterable[Iterable[complex]], Iterable[ complex]], NDArray[complex, :]], Fun[[Iterable[Iterable[bool]], Iterable[ Iterable[bool]]], NDArray[bool, :]], Fun[[Iterable[Iterable[int]], Iterable[ Iterable[int]]], NDArray[int, :]], Fun[[Iterable[Iterable[float]], Iterable[ Iterable[float]]], NDArray[float, :]], Fun[[Iterable[Iterable[complex]], Iterable[ Iterable[complex]]], NDArray[complex, :]], Fun[[bool, Iterable[Iterable[bool]]], NDArray[bool, :]], Fun[[int, Iterable[Iterable[int]]], NDArray[int, :]], Fun[[float, Iterable[Iterable[float]]], NDArray[float, :]], Fun[[complex, Iterable[Iterable[complex]]], NDArray[complex, :]], Fun[[Iterable[bool], Iterable[Iterable[bool]]], NDArray[bool, :]], Fun[[Iterable[int], Iterable[Iterable[int]]], NDArray[int, :]], Fun[[Iterable[float], Iterable[Iterable[float]]], NDArray[float, :]], Fun[[Iterable[complex], Iterable[Iterable[complex]]], NDArray[complex, :]], Fun[[Iterable[Iterable[bool]], Iterable[ Iterable[bool]]], NDArray[bool, :]], Fun[[Iterable[Iterable[int]], Iterable[ Iterable[int]]], NDArray[int, :]], Fun[[Iterable[Iterable[float]], Iterable[ Iterable[float]]], NDArray[float, :]], Fun[[Iterable[Iterable[complex]], Iterable[ Iterable[complex]]], NDArray[complex, :]], # 3D Array FIXME: same as above Fun[[Iterable[Iterable[Iterable[bool]]], bool], NDArray[bool, :]], Fun[[Iterable[Iterable[Iterable[int]]], int], NDArray[int, :]], Fun[[Iterable[Iterable[Iterable[float]]], float], NDArray[float, :]], Fun[[Iterable[Iterable[Iterable[complex]]], complex], NDArray[complex, :]], Fun[[Iterable[Iterable[Iterable[bool]]], Iterable[bool]], NDArray[bool, :]], Fun[[Iterable[Iterable[Iterable[int]]], Iterable[int]], NDArray[int, :]], Fun[[Iterable[Iterable[Iterable[float]]], Iterable[float]], NDArray[float, :]], Fun[[Iterable[Iterable[Iterable[complex]]], Iterable[complex]], NDArray[complex, :]], Fun[[Iterable[Iterable[Iterable[bool]]], Iterable[Iterable[bool]]], NDArray[bool, :]], Fun[[Iterable[Iterable[Iterable[int]]], Iterable[Iterable[int]]], NDArray[int, :]], Fun[[Iterable[Iterable[Iterable[float]]], Iterable[ Iterable[float]]], NDArray[float, :]], Fun[[Iterable[Iterable[Iterable[complex]]], Iterable[ Iterable[complex]]], NDArray[complex, :]], Fun[[Iterable[Iterable[Iterable[bool]]], Iterable[ Iterable[Iterable[bool]]]], NDArray[bool, :]], Fun[[Iterable[Iterable[Iterable[int]]], Iterable[ Iterable[Iterable[int]]]], NDArray[int, :]], Fun[[Iterable[Iterable[Iterable[float]]], Iterable[ Iterable[Iterable[float]]]], NDArray[float, :]], Fun[[Iterable[Iterable[Iterable[complex]]], Iterable[ Iterable[Iterable[complex]]]], NDArray[complex, :]], # 4D Array FIXME: same as above Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]], bool], NDArray[bool, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]], int], NDArray[int, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]], float], NDArray[float, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]], complex], NDArray[complex, :]], Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]], Iterable[bool]], NDArray[bool, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]], Iterable[int]], NDArray[int, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]], Iterable[float]], NDArray[float, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]], Iterable[complex]], NDArray[complex, :]], Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]], Iterable[Iterable[bool]]], NDArray[bool, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]], Iterable[Iterable[int]]], NDArray[int, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]], Iterable[Iterable[float]]], NDArray[float, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]], Iterable[Iterable[complex]]], NDArray[complex, :]], Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]], Iterable[Iterable[Iterable[bool]]]], NDArray[bool, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]], Iterable[Iterable[Iterable[int]]]], NDArray[int, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]], Iterable[ Iterable[Iterable[float]]]], NDArray[float, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]], Iterable[ Iterable[Iterable[complex]]]], NDArray[complex, :]], Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]], Iterable[ Iterable[Iterable[Iterable[bool]]]]], NDArray[bool, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]], Iterable[ Iterable[Iterable[Iterable[int]]]]], NDArray[int, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]], Iterable[ Iterable[Iterable[Iterable[float]]]]], NDArray[float, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]], Iterable[ Iterable[Iterable[Iterable[complex]]]]], NDArray[complex, :]], # FIXME: same as above with None axis # axis -> same dims # 1D Fun[[Iterable[bool], Iterable[bool], int], Iterable[bool]], Fun[[Iterable[int], Iterable[int], int], Iterable[int]], Fun[[Iterable[float], Iterable[float], int], Iterable[float]], Fun[[Iterable[complex], Iterable[complex], int], Iterable[complex]], # 2D Fun[[Iterable[Iterable[bool]], Iterable[ Iterable[bool]], int], Iterable[Iterable[bool]]], Fun[[Iterable[Iterable[int]], Iterable[ Iterable[int]], int], Iterable[Iterable[int]]], Fun[[Iterable[Iterable[float]], Iterable[ Iterable[float]], int], Iterable[Iterable[float]]], Fun[[Iterable[Iterable[complex]], Iterable[ Iterable[complex]], int], Iterable[Iterable[complex]]], # 3D Fun[[Iterable[Iterable[Iterable[bool]]], Iterable[ Iterable[Iterable[bool]]], int], Iterable[Iterable[Iterable[bool]]]], Fun[[Iterable[Iterable[Iterable[int]]], Iterable[ Iterable[Iterable[int]]], int], Iterable[Iterable[Iterable[int]]]], Fun[[Iterable[Iterable[Iterable[float]]], Iterable[ Iterable[Iterable[float]]], int], Iterable[Iterable[Iterable[float]]]], Fun[[Iterable[Iterable[Iterable[complex]]], Iterable[ Iterable[Iterable[complex]]], int], Iterable[Iterable[Iterable[complex]]]], # 4D Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]], Iterable[Iterable[Iterable[Iterable[bool]]]], int], Iterable[Iterable[Iterable[Iterable[bool]]]]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]], Iterable[Iterable[Iterable[Iterable[int]]]], int], Iterable[Iterable[Iterable[Iterable[int]]]]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]], Iterable[Iterable[Iterable[Iterable[float]]]], int], Iterable[Iterable[Iterable[Iterable[float]]]]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]], Iterable[Iterable[Iterable[Iterable[complex]]]], int], Iterable[Iterable[Iterable[Iterable[complex]]]]], ] ), "arange": ConstFunctionIntr( signature=Union[ Fun[[float], NDArray[float, :]], Fun[[float, float], NDArray[float, :]], Fun[[float, float, float], NDArray[float, :]], Fun[[float, float, float, None], NDArray[float, :]], Fun[[float, float, float, _bool_signature], NDArray[bool, :]], Fun[[float, float, float, _int_signature], NDArray[int, :]], Fun[[float, float, float, _float_signature], NDArray[float, :]], Fun[[float, float, float, _complex_signature], NDArray[complex, :]], ], return_range_content=interval.range_values, args=('start', 'stop', 'step', 'dtype'), defaults=(1, None) ), "arccos": ConstFunctionIntr(signature=_numpy_unary_op_float_signature), "arccosh": ConstFunctionIntr( signature=_numpy_unary_op_float_signature), "arcsin": ConstFunctionIntr(signature=_numpy_unary_op_float_signature), "arcsinh": ConstFunctionIntr( signature=_numpy_unary_op_float_signature), "arctan": ConstFunctionIntr(signature=_numpy_unary_op_float_signature), "arctan2": UFunc( BINARY_UFUNC, signature=_numpy_binary_op_float_no_complex_signature ), "arctanh": ConstFunctionIntr( signature=_numpy_unary_op_float_signature), "argmax": ConstMethodIntr( signature=_numpy_unary_op_int_axis_signature, return_range=interval.positive_values ), "argmin": ConstMethodIntr( signature=_numpy_unary_op_int_axis_signature, return_range=interval.positive_values ), "argsort": ConstMethodIntr( signature=_numpy_unary_op_int_axis_signature, return_range=interval.positive_values ), "argwhere": ConstFunctionIntr( signature=_numpy_unary_op_int_signature, return_range=interval.positive_values ), "around": ConstFunctionIntr(signature=_numpy_around_signature), "array": ReadOnceFunctionIntr(signature=_numpy_array_signature, args=('object', 'dtype'), defaults=(None,)), "array2string": ConstFunctionIntr( signature=_numpy_array_str_signature), "array_equal": ConstFunctionIntr(signature=Fun[[T0, T1], bool]), "array_equiv": ConstFunctionIntr(signature=Fun[[T0, T1], bool]), "array_split": ConstFunctionIntr( signature=Union[ # int split Fun[[NDArray[T0, :], int], List[NDArray[T0, :]]], # array split Fun[[NDArray[T0, :], Iterable[int]], List[NDArray[T0, :]]], ] ), "array_str": ConstFunctionIntr(signature=_numpy_array_str_signature), "asarray": ReadOnceFunctionIntr(signature=_numpy_array_signature, **extra_numpy_asarray_descr), "asarray_chkfinite": ConstFunctionIntr( signature=_numpy_array_signature), "ascontiguousarray": ConstFunctionIntr( signature=_numpy_array_signature), "asfarray": ConstFunctionIntr(signature=_numpy_farray_signature), "asscalar": ConstFunctionIntr( signature=Union[ Fun[[NDArray[bool, :]], bool], Fun[[NDArray[int, :]], int], Fun[[NDArray[float, :]], float], Fun[[NDArray[complex, :]], complex], Fun[[NDArray[bool, :, :]], bool], Fun[[NDArray[int, :, :]], int], Fun[[NDArray[float, :, :]], float], Fun[[NDArray[complex, :, :]], complex], Fun[[NDArray[bool, :, :, :]], bool], Fun[[NDArray[int, :, :, :]], int], Fun[[NDArray[float, :, :, :]], float], Fun[[NDArray[complex, :, :, :]], complex], ] ), "atleast_1d": ConstFunctionIntr( signature=Union[ # scalar Fun[[bool], NDArray[bool, :]], Fun[[int], NDArray[int, :]], Fun[[float], NDArray[float, :]], Fun[[complex], NDArray[complex, :]], # 1d Fun[[Iterable[bool]], NDArray[bool, :]], Fun[[Iterable[int]], NDArray[int, :]], Fun[[Iterable[float]], NDArray[float, :]], Fun[[Iterable[complex]], NDArray[complex, :]], # 2d+ Fun[[NDArray[T0, :, :]], NDArray[T0, :, :]], Fun[[Iterable[Iterable[bool]]], NDArray[bool, :, :]], Fun[[Iterable[Iterable[int]]], NDArray[int, :, :]], Fun[[Iterable[Iterable[float]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[complex]]], NDArray[complex, :, :]], # 3d Fun[[Iterable[Iterable[Iterable[bool]]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]]], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]]], NDArray[complex, :, :, :]], # 4d Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]]], NDArray[bool, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]]], NDArray[int, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]]], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]]], NDArray[complex, :, :, :, :]], ] ), "atleast_2d": ConstFunctionIntr( signature=Union[ # scalar Fun[[bool], NDArray[bool, :, :]], Fun[[int], NDArray[int, :, :]], Fun[[float], NDArray[float, :, :]], Fun[[complex], NDArray[complex, :, :]], # 1d Fun[[Iterable[bool]], NDArray[bool, :, :]], Fun[[Iterable[int]], NDArray[int, :, :]], Fun[[Iterable[float]], NDArray[float, :, :]], Fun[[Iterable[complex]], NDArray[complex, :, :]], # 2d+ Fun[[NDArray[T0, :, :]], NDArray[T0, :, :]], Fun[[Iterable[Iterable[bool]]], NDArray[bool, :, :]], Fun[[Iterable[Iterable[int]]], NDArray[int, :, :]], Fun[[Iterable[Iterable[float]]], NDArray[float, :, :]], Fun[[Iterable[Iterable[complex]]], NDArray[complex, :, :]], # 3d Fun[[Iterable[Iterable[Iterable[bool]]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]]], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]]], NDArray[complex, :, :, :]], # 4d Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]]], NDArray[bool, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]]], NDArray[int, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]]], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]]], NDArray[complex, :, :, :, :]], ] ), "atleast_3d": ConstFunctionIntr( signature=Union[ # scalar Fun[[bool], NDArray[bool, :, :, :]], Fun[[int], NDArray[int, :, :, :]], Fun[[float], NDArray[float, :, :, :]], Fun[[complex], NDArray[complex, :, :, :]], # 1d Fun[[Iterable[bool]], NDArray[bool, :, :, :]], Fun[[Iterable[int]], NDArray[int, :, :, :]], Fun[[Iterable[float]], NDArray[float, :, :, :]], Fun[[Iterable[complex]], NDArray[complex, :, :, :]], # 2d+ Fun[[NDArray[T0, :, :]], NDArray[T0, :, :, :]], Fun[[Iterable[Iterable[bool]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[int]]], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[float]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[complex]]], NDArray[complex, :, :, :]], # 3d Fun[[Iterable[Iterable[Iterable[bool]]]], NDArray[bool, :, :, :]], Fun[[Iterable[Iterable[Iterable[int]]]], NDArray[int, :, :, :]], Fun[[Iterable[Iterable[Iterable[float]]]], NDArray[float, :, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]]], NDArray[complex, :, :, :]], # 4d Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]]], NDArray[bool, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]]], NDArray[int, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]]], NDArray[float, :, :, :, :]], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]]], NDArray[complex, :, :, :, :]], ] ), "average": ConstFunctionIntr( signature=_numpy_unary_op_average_axis_signature), "base_repr": ConstFunctionIntr( signature=Union[ Fun[[bool], str], Fun[[bool, int], str], Fun[[bool, int, int], str], Fun[[int], str], Fun[[int, int], str], Fun[[int, int, int], str], ] ), "binary_repr": ConstFunctionIntr( signature=Union[ Fun[[bool], str], Fun[[bool, int], str], Fun[[bool, None], str], Fun[[int], str], Fun[[int, int], str], Fun[[int, None], str], ] ), "bincount": ConstFunctionIntr( signature=Union[ Fun[[Iterable[bool]], NDArray[int, :]], Fun[[Iterable[int]], NDArray[int, :]], # Fun[[Iterable[bool], Iterable[float]], NDArray[int, :]], Fun[[Iterable[int], Iterable[float]], NDArray[int, :]], # Fun[[Iterable[bool], Iterable[float], int], NDArray[int, :]], Fun[[Iterable[int], Iterable[float], int], NDArray[int, :]], ], ), "bitwise_and": UFunc( REDUCED_BINARY_UFUNC, signature=_numpy_int_binary_op_signature ), "bitwise_not": ConstFunctionIntr( signature=_numpy_int_unary_op_signature ), "bitwise_or": UFunc( REDUCED_BINARY_UFUNC, signature=_numpy_int_binary_op_signature ), "bitwise_xor": UFunc( REDUCED_BINARY_UFUNC, signature=_numpy_int_binary_op_signature ), "bool": ConstFunctionIntr(signature=_bool_signature), "broadcast_to": ConstFunctionIntr(), "byte": ConstFunctionIntr(signature=_int_signature), "cbrt": ConstFunctionIntr( signature=_numpy_unary_op_float_signature ), "ceil": ConstFunctionIntr(signature=_numpy_float_unary_op_signature), "clip": ConstMethodIntr(signature=_numpy_ternary_op_signature), "concatenate": ConstFunctionIntr( args=('_', 'axis'), defaults=(0,), signature=Union[ # 1D Fun[[Iterable[Iterable[bool]]], NDArray[bool, :]], Fun[[Tuple[Iterable[bool]]], NDArray[bool, :]], Fun[[Tuple[Iterable[bool], int]], NDArray[bool, :]], Fun[[Tuple[Iterable[bool], Iterable[bool]]], NDArray[bool, :]], Fun[[Tuple[Iterable[bool], Iterable[bool], int]], NDArray[bool, :]], Fun[[Tuple[Iterable[bool], Iterable[bool], Iterable[bool]]], NDArray[bool, :]], Fun[[Tuple[Iterable[bool], Iterable[bool], Iterable[bool], int]], NDArray[bool, :]], Fun[[Tuple[Iterable[bool], Iterable[bool], Iterable[ bool], Iterable[bool]]], NDArray[bool, :]], Fun[[Tuple[Iterable[bool], Iterable[bool], Iterable[ bool], Iterable[bool], int]], NDArray[bool, :]], Fun[[Iterable[Iterable[int]]], NDArray[int, :]], Fun[[Tuple[Iterable[int]]], NDArray[int, :]], Fun[[Tuple[Iterable[int], int]], NDArray[int, :]], Fun[[Tuple[Iterable[int], Iterable[int]]], NDArray[int, :]], Fun[[Tuple[Iterable[int], Iterable[int], int]], NDArray[int, :]], Fun[[Tuple[Iterable[int], Iterable[int], Iterable[int]]], NDArray[int, :]], Fun[[Tuple[Iterable[int], Iterable[int], Iterable[int], int]], NDArray[int, :]], Fun[[Tuple[Iterable[int], Iterable[int], Iterable[ int], Iterable[int]]], NDArray[int, :]], Fun[[Tuple[Iterable[int], Iterable[int], Iterable[ int], Iterable[int], int]], NDArray[int, :]], Fun[[Iterable[Iterable[float]]], NDArray[float, :]], Fun[[Tuple[Iterable[float]]], NDArray[float, :]], Fun[[Tuple[Iterable[float], int]], NDArray[float, :]], Fun[[Tuple[Iterable[float], Iterable[float]]], NDArray[float, :]], Fun[[Tuple[Iterable[float], Iterable[float], int]], NDArray[float, :]], Fun[[Tuple[Iterable[float], Iterable[float], Iterable[float]]], NDArray[float, :]], Fun[[Tuple[Iterable[float], Iterable[float], Iterable[float], int]], NDArray[float, :]], Fun[[Tuple[Iterable[float], Iterable[float], Iterable[ float], Iterable[float]]], NDArray[float, :]], Fun[[Tuple[Iterable[float], Iterable[float], Iterable[ float], Iterable[float], int]], NDArray[float, :]], Fun[[Iterable[Iterable[complex]]], NDArray[complex, :]], Fun[[Tuple[Iterable[complex]]], NDArray[complex, :]], Fun[[Tuple[Iterable[complex], int]], NDArray[complex, :]], Fun[[Tuple[Iterable[complex], Iterable[complex]]], NDArray[complex, :]], Fun[[Tuple[Iterable[complex], Iterable[complex], int]], NDArray[complex, :]], Fun[[Tuple[Iterable[complex], Iterable[complex], Iterable[complex]]], NDArray[complex, :]], Fun[[Tuple[Iterable[complex], Iterable[complex], Iterable[complex], int]], NDArray[complex, :]], Fun[[Tuple[Iterable[complex], Iterable[complex], Iterable[ complex], Iterable[complex]]], NDArray[complex, :]], Fun[[Tuple[Iterable[complex], Iterable[complex], Iterable[ complex], Iterable[complex], int]], NDArray[complex, :]], # 2D Fun[[Iterable[Iterable[Iterable[bool]]]], NDArray[bool, :, :]], Fun[[Tuple[Iterable[Iterable[bool]]]], NDArray[bool, :, :]], Fun[[Tuple[Iterable[Iterable[bool]], int]], NDArray[bool, :, :]], Fun[[Tuple[Iterable[Iterable[bool]], Iterable[ Iterable[bool]]]], NDArray[bool, :, :]], Fun[[Tuple[Iterable[Iterable[bool]], Iterable[ Iterable[bool]], int]], NDArray[bool, :, :]], Fun[[Tuple[Iterable[Iterable[bool]], Iterable[Iterable[bool]], Iterable[Iterable[bool]]]], NDArray[bool, :, :]], Fun[[Tuple[Iterable[Iterable[bool]], Iterable[Iterable[bool]], Iterable[Iterable[bool]], int]], NDArray[bool, :, :]], Fun[[Tuple[Iterable[Iterable[bool]], Iterable[Iterable[bool]], Iterable[Iterable[bool]], Iterable[Iterable[bool]]]], NDArray[bool, :, :]], Fun[[Tuple[Iterable[Iterable[bool]], Iterable[Iterable[bool]], Iterable[Iterable[bool]], Iterable[Iterable[bool]], int]], NDArray[bool, :, :]], Fun[[Iterable[Iterable[Iterable[int]]]], NDArray[int, :, :]], Fun[[Tuple[Iterable[Iterable[int]]]], NDArray[int, :, :]], Fun[[Tuple[Iterable[Iterable[int]], int]], NDArray[int, :, :]], Fun[[Tuple[Iterable[Iterable[int]], Iterable[ Iterable[int]]]], NDArray[int, :, :]], Fun[[Tuple[Iterable[Iterable[int]], Iterable[ Iterable[int]], int]], NDArray[int, :, :]], Fun[[Tuple[Iterable[Iterable[int]], Iterable[Iterable[int]], Iterable[Iterable[int]]]], NDArray[int, :, :]], Fun[[Tuple[Iterable[Iterable[int]], Iterable[Iterable[int]], Iterable[Iterable[int]], int]], NDArray[int, :, :]], Fun[[Tuple[Iterable[Iterable[int]], Iterable[Iterable[int]], Iterable[Iterable[int]], Iterable[Iterable[int]]]], NDArray[int, :, :]], Fun[[Tuple[Iterable[Iterable[int]], Iterable[Iterable[int]], Iterable[Iterable[int]], Iterable[Iterable[int]], int]], NDArray[int, :, :]], Fun[[Iterable[Iterable[Iterable[float]]]], NDArray[float, :, :]], Fun[[Tuple[Iterable[Iterable[float]]]], NDArray[float, :, :]], Fun[[Tuple[Iterable[Iterable[float]], int]], NDArray[float, :, :]], Fun[[Tuple[Iterable[Iterable[float]], Iterable[Iterable[float]]]], NDArray[float, :, :]], Fun[[Tuple[Iterable[Iterable[float]], Iterable[Iterable[float]], int]], NDArray[float, :, :]], Fun[[Tuple[Iterable[Iterable[float]], Iterable[Iterable[float]], Iterable[Iterable[float]]]], NDArray[float, :, :]], Fun[[Tuple[Iterable[Iterable[float]], Iterable[Iterable[float]], Iterable[Iterable[float]], int]], NDArray[float, :, :]], Fun[[Tuple[Iterable[Iterable[float]], Iterable[Iterable[float]], Iterable[Iterable[float]], Iterable[Iterable[float]]]], NDArray[float, :, :]], Fun[[Tuple[Iterable[Iterable[float]], Iterable[Iterable[float]], Iterable[Iterable[float]], Iterable[Iterable[float]], int]], NDArray[float, :, :]], Fun[[Iterable[Iterable[Iterable[complex]]]], NDArray[complex, :, :]], Fun[[Tuple[Iterable[Iterable[complex]]]], NDArray[complex, :, :]], Fun[[Tuple[Iterable[Iterable[complex]], int]], NDArray[complex, :, :]], Fun[[Tuple[Iterable[Iterable[complex]], Iterable[Iterable[complex]]]], NDArray[complex, :, :]], Fun[[Tuple[Iterable[Iterable[complex]], Iterable[Iterable[complex]], int]], NDArray[complex, :, :]], Fun[[Tuple[Iterable[Iterable[complex]], Iterable[Iterable[complex]], Iterable[Iterable[complex]]]], NDArray[complex, :, :]], Fun[[Tuple[Iterable[Iterable[complex]], Iterable[Iterable[complex]], Iterable[Iterable[complex]], int]], NDArray[complex, :, :]], Fun[[Tuple[Iterable[Iterable[complex]], Iterable[Iterable[complex]], Iterable[Iterable[complex]], Iterable[Iterable[complex]]]], NDArray[complex, :, :]], Fun[[Tuple[Iterable[Iterable[complex]], Iterable[Iterable[complex]], Iterable[Iterable[complex]], Iterable[Iterable[complex]], int]], NDArray[complex, :, :]], ] ), "complex": ConstFunctionIntr(signature=_complex_signature), "complex64": ConstFunctionIntr(signature=_complex_signature), "complex128": ConstFunctionIntr(signature=_complex_signature), "complex256": ConstFunctionIntr(signature=_complex_signature), "conj": ConstMethodIntr(signature=_numpy_unary_op_signature), "conjugate": ConstMethodIntr(signature=_numpy_unary_op_signature), "convolve": ConstMethodIntr(), "correlate": ConstMethodIntr(), "copy": ConstMethodIntr(signature=_numpy_array_signature), "copyto": FunctionIntr( argument_effects=[UpdateEffect(), ReadEffect(), ReadEffect(), ReadEffect()], signature=Union[ # 1d Fun[[NDArray[bool, :], bool], None], Fun[[NDArray[bool, :], Iterable[bool]], None], Fun[[NDArray[int, :], int], None], Fun[[NDArray[int, :], Iterable[int]], None], Fun[[NDArray[float, :], float], None], Fun[[NDArray[float, :], Iterable[float]], None], Fun[[NDArray[complex, :], complex], None], Fun[[NDArray[complex, :], Iterable[complex]], None], # 2d Fun[[NDArray[bool, :, :], bool], None], Fun[[NDArray[bool, :, :], Iterable[bool]], None], Fun[[NDArray[bool, :, :], Iterable[Iterable[bool]]], None], Fun[[NDArray[int, :, :], int], None], Fun[[NDArray[int, :, :], Iterable[int]], None], Fun[[NDArray[int, :, :], Iterable[Iterable[int]]], None], Fun[[NDArray[float, :, :], float], None], Fun[[NDArray[float, :, :], Iterable[float]], None], Fun[[NDArray[float, :, :], Iterable[Iterable[float]]], None], Fun[[NDArray[complex, :, :], complex], None], Fun[[NDArray[complex, :, :], Iterable[complex]], None], Fun[[NDArray[complex, :, :], Iterable[Iterable[complex]]], None], # 3d Fun[[NDArray[bool, :, :, :], bool], None], Fun[[NDArray[bool, :, :, :], Iterable[bool]], None], Fun[[NDArray[bool, :, :, :], Iterable[Iterable[bool]]], None], Fun[[NDArray[bool, :, :, :], Iterable[Iterable[Iterable[bool]]]], None], Fun[[NDArray[int, :, :, :], int], None], Fun[[NDArray[int, :, :, :], Iterable[int]], None], Fun[[NDArray[int, :, :, :], Iterable[Iterable[int]]], None], Fun[[NDArray[int, :, :, :], Iterable[Iterable[Iterable[int]]]], None], Fun[[NDArray[float, :, :, :], float], None], Fun[[NDArray[float, :, :, :], Iterable[float]], None], Fun[[NDArray[float, :, :, :], Iterable[Iterable[float]]], None], Fun[[NDArray[float, :, :, :], Iterable[Iterable[Iterable[float]]]], None], Fun[[NDArray[complex, :, :, :], complex], None], Fun[[NDArray[complex, :, :, :], Iterable[complex]], None], Fun[[NDArray[complex, :, :, :], Iterable[Iterable[complex]]], None], Fun[[NDArray[complex, :, :, :], Iterable[Iterable[Iterable[complex]]]], None], ] ), "copysign": UFunc(BINARY_UFUNC), "count_nonzero": ConstFunctionIntr( signature=Union[ # scalar Fun[[bool], int], Fun[[int], int], Fun[[float], int], Fun[[complex], int], # 1d Fun[[Iterable[bool]], int], Fun[[Iterable[int]], int], Fun[[Iterable[float]], int], Fun[[Iterable[complex]], int], # 2d Fun[[Iterable[Iterable[bool]]], int], Fun[[Iterable[Iterable[int]]], int], Fun[[Iterable[Iterable[float]]], int], Fun[[Iterable[Iterable[complex]]], int], # 3d Fun[[Iterable[Iterable[Iterable[bool]]]], int], Fun[[Iterable[Iterable[Iterable[int]]]], int], Fun[[Iterable[Iterable[Iterable[float]]]], int], Fun[[Iterable[Iterable[Iterable[complex]]]], int], # 4d Fun[[Iterable[Iterable[Iterable[Iterable[bool]]]]], int], Fun[[Iterable[Iterable[Iterable[Iterable[int]]]]], int], Fun[[Iterable[Iterable[Iterable[Iterable[float]]]]], int], Fun[[Iterable[Iterable[Iterable[Iterable[complex]]]]], int], ] ), "cos": ConstFunctionIntr( signature=_numpy_unary_op_float_signature ), "cosh": ConstFunctionIntr( signature=_numpy_unary_op_float_signature ), "cross": ConstFunctionIntr(), "ctypeslib": { "as_array": ConstFunctionIntr() }, "cumprod": ConstMethodIntr( signature=_numpy_unary_op_cumsum_axis_signature ), "cumproduct": ConstFunctionIntr( signature=_numpy_unary_op_cumsum_axis_signature ), "cumsum": ConstMethodIntr( signature=_numpy_unary_op_cumsum_axis_signature ), "deg2rad": ConstFunctionIntr( signature=_numpy_float_unary_op_float_signature ), "degrees": ConstFunctionIntr( signature=_numpy_float_unary_op_float_signature ), "delete": ConstFunctionIntr(), "diag": ConstFunctionIntr(), "diagflat": ConstFunctionIntr(), "diagonal": ConstMethodIntr(), "diff": ConstFunctionIntr(), "digitize": ConstFunctionIntr(), "divide": UFunc(BINARY_UFUNC), "dot": ConstMethodIntr(), "double": ConstFunctionIntr(signature=_float_signature), "dtype": ClassWithConstConstructor(CLASSES["dtype"]), "e": ConstantIntr(), "ediff1d": ConstFunctionIntr(), "empty": ConstFunctionIntr(args=('shape', 'dtype'), defaults=("numpy.float64",), signature=_numpy_ones_signature, ), "empty_like": ConstFunctionIntr( args=('a', 'dtype'), defaults=("numpy.float64",), signature=_numpy_ones_like_signature ), "equal": UFunc(BINARY_UFUNC), "exp": ConstFunctionIntr(signature=_numpy_unary_op_float_signature), "expand_dims": ConstFunctionIntr(), "expm1": ConstFunctionIntr(), "eye": ConstFunctionIntr(), "fabs": ConstFunctionIntr(), "fill_diagonal": FunctionIntr( argument_effects=[UpdateEffect(), ReadEffect()], signature=Union[ Fun[[NDArray[T0, :], bool], None], Fun[[NDArray[T0, :], int], None], Fun[[NDArray[T0, :], float], None], Fun[[NDArray[T0, :], complex], None], ] ), "finfo": ClassWithConstConstructor(CLASSES['finfo']), "fix": ConstFunctionIntr(), "flatnonzero": ConstFunctionIntr(), "fliplr": ConstFunctionIntr(), "flip": ConstFunctionIntr(), "flipud": ConstFunctionIntr(), "float32": ConstFunctionIntr(signature=_float_signature), "float64": ConstFunctionIntr(signature=_float_signature), "float128": ConstFunctionIntr(signature=_float_signature), "float": ConstFunctionIntr(signature=_float_signature), "floor": ConstFunctionIntr(signature=_numpy_float_unary_op_signature), "floor_divide": UFunc(BINARY_UFUNC), "fmax": UFunc(REDUCED_BINARY_UFUNC), "fmin": UFunc(REDUCED_BINARY_UFUNC), "fmod": UFunc(BINARY_UFUNC), "frexp": ConstFunctionIntr(), "fromfunction": ConstFunctionIntr(), "fromiter": ConstFunctionIntr(args=("iterable", "dtype", "count"), defaults=(-1,)), "fromstring": ConstFunctionIntr(), "fromfile": FunctionIntr(args=('file', 'dtype', 'count', "sep", "offset"), defaults=(None, None, -1, None, 0), global_effects=True), "full": ConstFunctionIntr(signature=_numpy_ones_signature), "full_like": ConstFunctionIntr(signature=_numpy_ones_like_signature), "greater": UFunc( BINARY_UFUNC, signature=_numpy_binary_op_bool_signature, ), "greater_equal": UFunc( BINARY_UFUNC, signature=_numpy_binary_op_bool_signature, ), "heaviside": UFunc(BINARY_UFUNC), "hstack": ConstFunctionIntr(), "hypot": UFunc(BINARY_UFUNC), "identity": ConstFunctionIntr(), "imag": FunctionIntr(), "indices": ConstFunctionIntr(), "inf": ConstantIntr(), "Inf": ConstantIntr(), "inner": ConstFunctionIntr(), "insert": ConstFunctionIntr(), "interp": ConstFunctionIntr(), "intersect1d": ConstFunctionIntr(), "int16": ConstFunctionIntr(signature=_int_signature), "int32": ConstFunctionIntr(signature=_int_signature), "int64": ConstFunctionIntr(signature=_int_signature), "int8": ConstFunctionIntr(signature=_int_signature), "intc": ConstFunctionIntr(signature=_int_signature), "intp": ConstFunctionIntr(signature=_int_signature), "invert": ConstFunctionIntr(), "isclose": ConstFunctionIntr(), "iscomplex": ConstFunctionIntr(), "isfinite": ConstFunctionIntr(), "isinf": ConstFunctionIntr(), "isnan": ConstFunctionIntr(), "isneginf": ConstFunctionIntr(), "isposinf": ConstFunctionIntr(), "isreal": ConstFunctionIntr(), "isrealobj": ConstFunctionIntr(), "isscalar": ConstFunctionIntr(), "issctype": ConstFunctionIntr(), "ldexp": UFunc(BINARY_UFUNC), "left_shift": UFunc( BINARY_UFUNC, signature=_numpy_int_binary_op_signature, ), "less": UFunc( BINARY_UFUNC, signature=_numpy_binary_op_bool_signature, ), "less_equal": UFunc( BINARY_UFUNC, signature=_numpy_binary_op_bool_signature, ), "lexsort": ConstFunctionIntr(), "linalg": { "norm": FunctionIntr(args=('x', 'ord', 'axis'), defaults=(None, None)), "matrix_power": ConstFunctionIntr(), }, "linspace": ConstFunctionIntr(), "log": ConstFunctionIntr(), "log10": ConstFunctionIntr(), "log1p": ConstFunctionIntr(), "log2": ConstFunctionIntr(), "logaddexp": UFunc(BINARY_UFUNC), "logaddexp2": UFunc(BINARY_UFUNC), "logspace": ConstFunctionIntr(), "logical_and": UFunc( BINARY_UFUNC, signature=_numpy_int_binary_op_signature ), "logical_not": ConstFunctionIntr(), "logical_or": UFunc( BINARY_UFUNC, signature=_numpy_int_binary_op_signature ), "logical_xor": UFunc( BINARY_UFUNC, signature=_numpy_int_binary_op_signature ), "longlong": ConstFunctionIntr(signature=_int_signature), "max": ConstMethodIntr(signature=_numpy_unary_op_axis_signature), "maximum": UFunc( REDUCED_BINARY_UFUNC, signature=_numpy_binary_op_signature ), "mean": ConstMethodIntr(immediate_arguments=[4]), "median": ConstFunctionIntr( signature=_numpy_unary_op_sum_axis_signature ), "min": ConstMethodIntr(signature=_numpy_unary_op_axis_signature), "minimum": UFunc( REDUCED_BINARY_UFUNC, signature=_numpy_binary_op_signature ), "mod": UFunc(BINARY_UFUNC), "multiply": UFunc( REDUCED_BINARY_UFUNC, signature=_numpy_binary_op_signature, ), "nan": ConstantIntr(), "nan_to_num": ConstFunctionIntr(), "nanargmax": ConstFunctionIntr(), "nanargmin": ConstFunctionIntr(), "nanmax": ConstFunctionIntr(), "nanmin": ConstFunctionIntr(), "nansum": ConstFunctionIntr(), "ndenumerate": ConstFunctionIntr(), "ndarray": ClassWithConstConstructor(CLASSES["ndarray"]), "ndindex": ConstFunctionIntr(), "ndim": ConstFunctionIntr(return_range=interval.positive_values), "negative": ConstFunctionIntr(signature=_numpy_unary_op_signature), "newaxis": ConstantIntr(), "nextafter": UFunc(BINARY_UFUNC), "NINF": ConstantIntr(), "nonzero": ConstMethodIntr(), "not_equal": UFunc(BINARY_UFUNC), "ones": ConstFunctionIntr(signature=_numpy_ones_signature), "ones_like": ConstFunctionIntr(signature=_numpy_ones_like_signature), "outer": ConstFunctionIntr(), "pi": ConstantIntr(), "place": FunctionIntr(), "power": UFunc( BINARY_UFUNC, signature=_numpy_binary_op_signature ), "prod": ConstMethodIntr(), "product": ConstFunctionIntr(), "ptp": ConstMethodIntr(), "put": MethodIntr(), "putmask": FunctionIntr(), "rad2deg": ConstFunctionIntr( signature=_numpy_float_unary_op_float_signature ), "radians": ConstFunctionIntr( signature=_numpy_float_unary_op_float_signature ), "fft": { "fft": FunctionIntr(args=("a", "n", "axis", "norm"), defaults=(None, -1, None), global_effects=True), "ifft": FunctionIntr(args=("a", "n", "axis", "norm"), defaults=( None, -1, None), global_effects=True), "rfft": FunctionIntr(args=('a','n','axis','norm'), defaults=(None,-1,-1,None),global_effects=True), "irfft": FunctionIntr(args=('a','n','axis','norm'), defaults=(None,-1,-1,None),global_effects=True), "hfft": FunctionIntr(args=('a','n','axis','norm'), defaults=(None,-1,-1,None),global_effects=True), "ihfft": FunctionIntr(args=('a','n','axis','norm'), defaults=(None,-1,-1,None),global_effects=True), }, "random": { "binomial": FunctionIntr(args=('n', 'p', 'size'), global_effects=True), "bytes": FunctionIntr(args=('length',), global_effects=True), "chisquare": FunctionIntr(args=('df', 'size',), global_effects=True), "choice": FunctionIntr(args=('a', 'size', 'replace', 'p'), global_effects=True), "dirichlet": FunctionIntr(args=('alpha', 'size',), global_effects=True), "exponential": FunctionIntr(args=('scale', 'size',), defaults=(1.0, None,), global_effects=True), "f": FunctionIntr(args=('dfnum', 'dfden', 'size'), global_effects=True), "gamma": FunctionIntr(args=('shape', 'scale', 'size',), defaults=(None, 1.0, None,), global_effects=True), "geometric": FunctionIntr(args=('p', 'size',), global_effects=True), "pareto": FunctionIntr(args=('a', 'size',), global_effects=True), "gumbel": FunctionIntr(args=('loc', 'scale', 'size',), defaults=(0.0, 1.0, None,), global_effects=True), "poisson": FunctionIntr(args=('lam', 'size',), defaults=(1.0, None,), global_effects=True), "negative_binomial": FunctionIntr(args=('n', 'p', 'size',), global_effects=True), "normal": FunctionIntr(args=('loc', 'scale', 'size',), defaults=(0.0, 1.0, None,), global_effects=True), "laplace": FunctionIntr(args=('loc', 'scale', 'size',), defaults=(0.0, 1.0, None,), global_effects=True), "logistic": FunctionIntr(args=('loc', 'scale', 'size',), defaults=(0.0, 1.0, None,), global_effects=True), "lognormal": FunctionIntr(args=('mean', 'sigma', 'size',), defaults=(0.0, 1.0, None,), global_effects=True), "logseries": FunctionIntr(args=('p', 'size',), global_effects=True), "power": FunctionIntr(args=('a', 'size',), global_effects=True), "rand": FunctionIntr(args=(), global_effects=True), "ranf": FunctionIntr(args=('size',), global_effects=True), "randint": FunctionIntr(args=("low", "high", "size"), defaults=(None, None), global_effects=True), "randn": FunctionIntr(args=(), global_effects=True), "random": FunctionIntr(args=('size',), global_effects=True), "random_integers": FunctionIntr(args=("low", "high", "size"), global_effects=True), "random_sample": FunctionIntr(args=('size',), global_effects=True), "rayleigh": FunctionIntr(args=('scale', 'size',), defaults=(1.0, None,), global_effects=True), "sample": FunctionIntr(args=('size',), global_effects=True), "seed": FunctionIntr(global_effects=True), "shuffle": FunctionIntr(global_effects=True), "standard_exponential": FunctionIntr(args=('size',), global_effects=True), "standard_gamma": FunctionIntr(args=('shape', 'size',), global_effects=True), "standard_normal": FunctionIntr(args=('size',), global_effects=True), "uniform": FunctionIntr(args=('low', 'high', 'size',), defaults=(0.0, 1.0, None,), global_effects=True), "weibull": FunctionIntr(args=('a', 'size',), global_effects=True), }, "ravel": ConstMethodIntr( return_alias=lambda args: {args[0]} ), "real": FunctionIntr(), "reciprocal": ConstFunctionIntr(), "remainder": UFunc(BINARY_UFUNC), "repeat": ConstMethodIntr(), "resize": ConstMethodIntr(), "right_shift": UFunc( BINARY_UFUNC, signature=_numpy_int_binary_op_signature, ), "rint": ConstFunctionIntr(), "roll": ConstFunctionIntr(), "rollaxis": ConstFunctionIntr(), "rot90": ConstFunctionIntr(), "round": ConstMethodIntr(), "round_": ConstMethodIntr(), "searchsorted": ConstMethodIntr(), "select": ConstFunctionIntr(), "setdiff1d": ConstFunctionIntr(), "shape": ConstFunctionIntr(), "short_": ConstFunctionIntr(signature=_int_signature), "sign": ConstFunctionIntr(), "signbit": ConstFunctionIntr(), "sin": ConstFunctionIntr(signature=_numpy_unary_op_float_signature), "sinh": ConstFunctionIntr(signature=_numpy_unary_op_float_signature), "size": ConstFunctionIntr(return_range=interval.positive_values), "sometrue": ConstFunctionIntr(), "sort": ConstFunctionIntr(), "sort_complex": ConstFunctionIntr(), "spacing": ConstFunctionIntr(), "split": ConstFunctionIntr(), "sqrt": ConstFunctionIntr(signature=_numpy_unary_op_float_signature), "square": ConstFunctionIntr(), "stack": ConstFunctionIntr(), "std": ConstMethodIntr(), "subtract": UFunc( BINARY_UFUNC, signature=_numpy_binary_op_signature, ), "sum": ReadOnceMethodIntr( signature=_numpy_unary_op_sum_axis_signature), "swapaxes": ConstMethodIntr(), "short": ConstFunctionIntr(signature=_int_signature), "take": ConstMethodIntr(), "tan": ConstFunctionIntr(signature=_numpy_unary_op_float_signature), "tanh": ConstFunctionIntr(signature=_numpy_unary_op_float_signature), "tile": ConstFunctionIntr(), "trace": ConstMethodIntr(), "transpose": ConstMethodIntr(), "tri": ConstMethodIntr(), "tril": ConstMethodIntr(), "trim_zeros": ConstMethodIntr(), "triu": ConstMethodIntr(), "true_divide": UFunc(BINARY_UFUNC), "trunc": ConstFunctionIntr(), "ubyte": ConstFunctionIntr(signature=_int_signature), "uint16": ConstFunctionIntr(signature=_int_signature), "uint32": ConstFunctionIntr(signature=_int_signature), "uint64": ConstFunctionIntr(signature=_int_signature), "uintc": ConstFunctionIntr(signature=_int_signature), "uintp": ConstFunctionIntr(signature=_int_signature), "uint8": ConstFunctionIntr(signature=_int_signature), "ulonglong": ConstFunctionIntr(signature=_int_signature), "union1d": ConstFunctionIntr(), "unique": ConstFunctionIntr(immediate_arguments=[1, 2, 3]), "unwrap": ConstFunctionIntr(), "unravel_index": ConstFunctionIntr(), "ushort": ConstFunctionIntr(signature=_int_signature), "var": ConstMethodIntr(), "vdot": ConstMethodIntr(), "vstack": ConstFunctionIntr(), "where": ConstFunctionIntr(), "zeros": ConstFunctionIntr(args=('shape', 'dtype'), defaults=("numpy.float64",), signature=_numpy_ones_signature, ), "zeros_like": ConstFunctionIntr(signature=_numpy_ones_like_signature), }, "time": { "sleep": FunctionIntr( signature=Fun[[float], None], global_effects=True ), "time": FunctionIntr( signature=Fun[[], float], global_effects=True ), }, "math": { "isinf": ConstFunctionIntr(signature=Fun[[float], bool]), "modf": ConstFunctionIntr(signature=Fun[[float], Tuple[float, float]]), "frexp": ConstFunctionIntr(signature=Fun[[float], Tuple[float, int]]), "factorial": ConstFunctionIntr(signature=Fun[[int], int]), "gamma": ConstFunctionIntr(signature=Fun[[float], float]), "lgamma": ConstFunctionIntr(signature=Fun[[float], float]), "trunc": ConstFunctionIntr(signature=Fun[[float], int]), "erf": ConstFunctionIntr(signature=Fun[[float], float]), "erfc": ConstFunctionIntr(signature=Fun[[float], float]), "asinh": ConstFunctionIntr(signature=Fun[[float], float]), "atanh": ConstFunctionIntr(signature=Fun[[float], float]), "acosh": ConstFunctionIntr(signature=Fun[[float], float]), "radians": ConstFunctionIntr(signature=Fun[[float], float]), "degrees": ConstFunctionIntr(signature=Fun[[float], float]), "hypot": ConstFunctionIntr(signature=Fun[[float, float], float]), "tanh": ConstFunctionIntr(signature=Fun[[float], float]), "cosh": ConstFunctionIntr(signature=Fun[[float], float]), "sinh": ConstFunctionIntr(signature=Fun[[float], float]), "atan": ConstFunctionIntr(signature=Fun[[float], float]), "atan2": ConstFunctionIntr(signature=Fun[[float, float], float]), "asin": ConstFunctionIntr(signature=Fun[[float], float]), "tan": ConstFunctionIntr(signature=Fun[[float], float]), "log": ConstFunctionIntr(signature=Fun[[float], float]), "log1p": ConstFunctionIntr(signature=Fun[[float], float]), "expm1": ConstFunctionIntr(signature=Fun[[float], float]), "ldexp": ConstFunctionIntr(signature=Fun[[float, int], float]), "fmod": ConstFunctionIntr(signature=Fun[[float, float], float]), "fabs": ConstFunctionIntr(signature=Fun[[float], float]), "copysign": UFunc(BINARY_UFUNC), "acos": ConstFunctionIntr(signature=Fun[[float], float]), "cos": ConstFunctionIntr(signature=Fun[[float], float]), "sin": ConstFunctionIntr(signature=Fun[[float], float]), "exp": ConstFunctionIntr(signature=Fun[[float], float]), "sqrt": ConstFunctionIntr(signature=Fun[[float], float]), "log10": ConstFunctionIntr(signature=Fun[[float], float]), "isnan": ConstFunctionIntr(signature=Fun[[float], bool]), "ceil": ConstFunctionIntr(signature=Fun[[float], float]), "floor": ConstFunctionIntr(signature=Fun[[float], float]), "pow": ConstFunctionIntr(signature=Fun[[float, float], float]), "pi": ConstantIntr(signature=float), "e": ConstantIntr(signature=float), }, "functools": { "partial": FunctionIntr( signature=Union[ # no arg Fun[[Fun[[], T0]], Fun[[], T0]], # 1 arg Fun[[Fun[[T0], T1]], Fun[[T0], T1]], Fun[[Fun[[T0], T1], T0], Fun[[], T1]], # 2 args Fun[[Fun[[T0, T1], T2]], Fun[[T0, T1], T2]], Fun[[Fun[[T0, T1], T2], T0], Fun[[T1], T2]], Fun[[Fun[[T0, T1], T2], T0, T1], Fun[[], T2]], # 3 args Fun[[Fun[[T0, T1, T2], T3]], Fun[[T0, T1, T2], T3]], Fun[[Fun[[T0, T1, T2], T3], T0], Fun[[T1, T2], T3]], Fun[[Fun[[T0, T1, T2], T3], T0, T1], Fun[[T2], T3]], Fun[[Fun[[T0, T1, T2], T3], T0, T1, T2], Fun[[], T3]], # 4 args Fun[[Fun[[T0, T1, T2, T3], T4]], Fun[[T0, T1, T2, T3], T4]], Fun[[Fun[[T0, T1, T2, T3], T4], T0], Fun[[T1, T2, T3], T4]], Fun[[Fun[[T0, T1, T2, T3], T4], T0, T1], Fun[[T2, T3], T4]], Fun[[Fun[[T0, T1, T2, T3], T4], T0, T1, T2], Fun[[T3], T4]], Fun[[Fun[[T0, T1, T2, T3], T4], T0, T1, T2, T3], Fun[[], T4]], # 5 args Fun[[Fun[[T0, T1, T2, T3, T4], T5]], Fun[[T0, T1, T2, T3, T4], T5]], Fun[[Fun[[T0, T1, T2, T3, T4], T5], T0], Fun[[T1, T2, T3, T4], T5]], Fun[[Fun[[T0, T1, T2, T3, T4], T5], T0, T1], Fun[[T2, T3, T4], T5]], Fun[[Fun[[T0, T1, T2, T3, T4], T5], T0, T1, T2], Fun[[T3, T4], T5]], Fun[[Fun[[T0, T1, T2, T3, T4], T5], T0, T1, T2, T3], Fun[[T4], T5]], Fun[[Fun[[T0, T1, T2, T3, T4], T5], T0, T1, T2, T3, T4], Fun[[], T5]], # 6 args Fun[[Fun[[T0, T1, T2, T3, T4, T5], T6]], Fun[[T0, T1, T2, T3, T4, T5], T6]], Fun[[Fun[[T0, T1, T2, T3, T4, T5], T6], T0], Fun[[T1, T2, T3, T4, T5], T6]], Fun[[Fun[[T0, T1, T2, T3, T4, T5], T6], T0, T1], Fun[[T2, T3, T4, T5], T6]], Fun[[Fun[[T0, T1, T2, T3, T4, T5], T6], T0, T1, T2], Fun[[T3, T4, T5], T6]], Fun[[Fun[[T0, T1, T2, T3, T4, T5], T6], T0, T1, T2, T3], Fun[[T4, T5], T6]], Fun[[Fun[[T0, T1, T2, T3, T4, T5], T6], T0, T1, T2, T3, T4], Fun[[T5], T6]], Fun[[Fun[[T0, T1, T2, T3, T4, T5], T6], T0, T1, T2, T3, T4, T5], Fun[[], T6]], ] ), "reduce": ReadOnceFunctionIntr(signature=_functools_reduce_signature), }, "bisect": { "bisect_left": ConstFunctionIntr( signature=Union[ Fun[[List[T0], T0], int], Fun[[List[T0], T0, int], int], Fun[[List[T0], T0, int, int], int], ], return_range=interval.positive_values ), "bisect_right": ConstFunctionIntr( signature=Union[ Fun[[List[T0], T0], int], Fun[[List[T0], T0, int], int], Fun[[List[T0], T0, int, int], int], ], return_range=interval.positive_values ), "bisect": ConstFunctionIntr( signature=Union[ Fun[[List[T0], T0], int], Fun[[List[T0], T0, int], int], Fun[[List[T0], T0, int, int], int], ], return_range=interval.positive_values ), }, "cmath": { "cos": FunctionIntr( signature=Union[ Fun[[float], complex], Fun[[complex], complex], ], ), "sin": FunctionIntr( signature=Union[ Fun[[float], complex], Fun[[complex], complex], ], ), "exp": FunctionIntr( signature=Union[ Fun[[float], complex], Fun[[complex], complex], ], ), "sqrt": FunctionIntr( signature=Union[ Fun[[float], complex], Fun[[complex], complex], ], ), "log10": FunctionIntr( signature=Union[ Fun[[float], complex], Fun[[complex], complex], ], ), "isnan": FunctionIntr( signature=Union[ Fun[[float], bool], Fun[[complex], bool], ], ), "pi": ConstantIntr(signature=float), "e": ConstantIntr(signature=float), }, 'io': { '_io': { "TextIOWrapper": ClassWithConstConstructor( CLASSES['file'], global_effects=True) } }, "itertools": { "count": ReadOnceFunctionIntr( signature=Union[ Fun[[], Generator[int]], Fun[[int], Generator[int]], Fun[[int, int], Generator[int]], ] ), "islice": ReadOnceFunctionIntr(), "product": ConstFunctionIntr( signature=Union[ Fun[[], Generator[T0]], Fun[[Iterable[T0]], Generator[Tuple[T0]]], Fun[[Iterable[T0], Iterable[T1]], Generator[Tuple[T0, T1]]], Fun[[Iterable[T0], Iterable[T1], Iterable[T2]], Generator[Tuple[T0, T1, T2]]], Fun[[Iterable[T0], Iterable[T1], Iterable[T2], Iterable[T3]], Generator[Tuple[T0, T1, T2, T3]]], ], ), "combinations": ConstFunctionIntr( signature=Fun[[Iterable[T0], int], Generator[List[T0]]]), "permutations": ConstFunctionIntr( signature=Union[ Fun[[Iterable[T0]], Generator[List[T0]]], Fun[[Iterable[T0], int], Generator[List[T0]]], ], ), "repeat": ConstFunctionIntr( signature=Union[ Fun[[T0], Iterable[T0]], Fun[[T0, int], Iterable[T0]], ], ), }, "random": { "seed": FunctionIntr( signature=Union[ Fun[[], None], Fun[[T0], None], ], global_effects=True ), "random": FunctionIntr( signature=Fun[[], float], global_effects=True ), "randint": FunctionIntr( signature=Fun[[int, int], int], global_effects=True ), "randrange": FunctionIntr( signature=Union[ Fun[[int], int], Fun[[int, int], int], Fun[[int, int, int], int] ], global_effects=True ), "gauss": FunctionIntr( signature=Fun[[float, float], float], global_effects=True ), "uniform": FunctionIntr( signature=Fun[[float, float], float], global_effects=True ), "expovariate": FunctionIntr( signature=Fun[[float], float], global_effects=True ), "sample": FunctionIntr( signature=Fun[[Iterable[T0], int], List[T0]], global_effects=True ), "choice": FunctionIntr( signature=Fun[[Iterable[T0]], T0], global_effects=True ), "shuffle": FunctionIntr( signature=Union[ Fun[[List[T0]], None], Fun[[List[T0], Fun[[], float]], None], ], global_effects=True ), }, "omp": { "set_num_threads": FunctionIntr(global_effects=True), "get_num_threads": FunctionIntr(global_effects=True), "get_max_threads": FunctionIntr(global_effects=True), "get_thread_num": FunctionIntr(global_effects=True), "get_num_procs": FunctionIntr(global_effects=True), "in_parallel": FunctionIntr(global_effects=True), "set_dynamic": FunctionIntr(global_effects=True), "get_dynamic": FunctionIntr(global_effects=True), "set_nested": FunctionIntr(global_effects=True), "get_nested": FunctionIntr(global_effects=True), "init_lock": FunctionIntr(global_effects=True), "destroy_lock": FunctionIntr(global_effects=True), "set_lock": FunctionIntr(global_effects=True), "unset_lock": FunctionIntr(global_effects=True), "test_lock": FunctionIntr(global_effects=True), "init_nest_lock": FunctionIntr(global_effects=True), "destroy_nest_lock": FunctionIntr(global_effects=True), "set_nest_lock": FunctionIntr(global_effects=True), "unset_nest_lock": FunctionIntr(global_effects=True), "test_nest_lock": FunctionIntr(global_effects=True), "get_wtime": FunctionIntr(global_effects=True), "get_wtick": FunctionIntr(global_effects=True), }, "operator": { "lt": ConstFunctionIntr(signature=_operator_eq_signature), "le": ConstFunctionIntr(signature=_operator_eq_signature), "eq": ConstFunctionIntr(signature=_operator_eq_signature), "ne": ConstFunctionIntr(signature=_operator_eq_signature), "ge": ConstFunctionIntr(signature=_operator_eq_signature), "gt": ConstFunctionIntr(signature=_operator_eq_signature), "__lt__": ConstFunctionIntr(signature=_operator_eq_signature), "__le__": ConstFunctionIntr(signature=_operator_eq_signature), "__eq__": ConstFunctionIntr(signature=_operator_eq_signature), "__ne__": ConstFunctionIntr(signature=_operator_eq_signature), "__ge__": ConstFunctionIntr(signature=_operator_eq_signature), "__gt__": ConstFunctionIntr(signature=_operator_eq_signature), "not_": ConstFunctionIntr(), "__not__": ConstFunctionIntr(), "truth": ConstFunctionIntr(), "is_": ConstFunctionIntr(), "is_not": ConstFunctionIntr(), "abs": ConstFunctionIntr(), "__abs__": ConstFunctionIntr(), "add": ConstFunctionIntr(signature=_operator_add_signature), "__add__": ConstFunctionIntr(signature=_operator_add_signature), "and_": ConstFunctionIntr(), "__and__": ConstFunctionIntr(), "floordiv": ConstFunctionIntr(signature=_numpy_binary_op_signature), "__floordiv__": ConstFunctionIntr( signature=_numpy_binary_op_signature ), "inv": ConstFunctionIntr(), "invert": ConstFunctionIntr(), "__inv__": ConstFunctionIntr(), "__invert__": ConstFunctionIntr(), "lshift": ConstFunctionIntr(signature=_numpy_int_binary_op_signature), "__lshift__": ConstFunctionIntr( signature=_numpy_int_binary_op_signature ), "matmul": ConstFunctionIntr(signature=_operator_mul_signature), "__matmul__": ConstFunctionIntr(signature=_operator_mul_signature), "mod": ConstFunctionIntr(signature=_operator_mod_signature), "__mod__": ConstFunctionIntr(signature=_operator_mod_signature), "mul": ConstFunctionIntr(signature=_operator_mul_signature), "__mul__": ConstFunctionIntr(signature=_operator_mul_signature), "neg": ConstFunctionIntr(), "__neg__": ConstFunctionIntr(), "or_": ConstFunctionIntr(), "__or__": ConstFunctionIntr(), "pos": ConstFunctionIntr(signature=_numpy_unary_op_signature), "__pos__": ConstFunctionIntr(signature=_numpy_unary_op_signature), "rshift": ConstFunctionIntr(signature=_numpy_int_binary_op_signature), "__rshift__": ConstFunctionIntr( signature=_numpy_int_binary_op_signature ), "sub": ConstFunctionIntr(signature=_operator_sub_signature), "__sub__": ConstFunctionIntr(signature=_operator_sub_signature), "truediv": ConstFunctionIntr(), "__truediv__": ConstFunctionIntr(), "xor": ConstFunctionIntr(), "__xor__": ConstFunctionIntr(), "concat": ConstFunctionIntr(), "__concat__": ConstFunctionIntr(), "iadd": MethodIntr(update_effects), "__iadd__": MethodIntr(update_effects), "iand": MethodIntr(update_effects), "__iand__": MethodIntr(update_effects), "iconcat": MethodIntr(update_effects), "__iconcat__": MethodIntr(update_effects), "ifloordiv": MethodIntr(update_effects), "__ifloordiv__": MethodIntr(update_effects), "ilshift": MethodIntr(update_effects), "__ilshift__": MethodIntr(update_effects), "imod": MethodIntr(update_effects), "__imod__": MethodIntr(update_effects), "imul": MethodIntr(update_effects), "__imul__": MethodIntr(update_effects), "ior": MethodIntr(update_effects), "__ior__": MethodIntr(update_effects), "ipow": MethodIntr(update_effects), "__ipow__": MethodIntr(update_effects), "irshift": MethodIntr(update_effects), "__irshift__": MethodIntr(update_effects), "isub": MethodIntr(update_effects), "__isub__": MethodIntr(update_effects), "itruediv": MethodIntr(update_effects), "__itruediv__": MethodIntr(update_effects), "ixor": MethodIntr(update_effects), "__ixor__": MethodIntr(update_effects), "contains": MethodIntr( update_effects, signature=_operator_contains_signature ), "__contains__": MethodIntr( update_effects, signature=_operator_contains_signature ), "countOf": ConstFunctionIntr(), "delitem": FunctionIntr( argument_effects=[UpdateEffect(), ReadEffect()]), "__delitem__": FunctionIntr( argument_effects=[UpdateEffect(), ReadEffect()]), "getitem": ConstFunctionIntr(signature=_operator_getitem_signature), "__getitem__": ConstFunctionIntr( signature=_operator_getitem_signature ), "indexOf": ConstFunctionIntr(), "__theitemgetter__": ConstFunctionIntr(), "itemgetter": MethodIntr( return_alias=lambda _: { MODULES['operator']['__theitemgetter__']} ), }, "string": { "ascii_lowercase": ConstantIntr(signature=str), "ascii_uppercase": ConstantIntr(signature=str), "ascii_letters": ConstantIntr(signature=str), "digits": ConstantIntr(signature=str), "hexdigits": ConstantIntr(signature=str), "octdigits": ConstantIntr(signature=str), }, "os": { "path": { "join": ConstFunctionIntr( signature=Union[ Fun[[str], str], Fun[[str, str], str], Fun[[str, str, str], str], Fun[[str, str, str, str], str], Fun[[str, str, str, str, str], str], ] ), } }, # conflicting method names must be listed here "__dispatch__": { "clear": MethodIntr(signature=Fun[[T0], None]), "conjugate": ConstMethodIntr(), "copy": ConstMethodIntr(signature=Fun[[T0], T0]), "count": ConstMethodIntr( signature=Union[ Fun[[Iterable[T0], T0], int], Fun[[Iterable[T0], T0, int], int], Fun[[Iterable[T0], T0, int, int], int], ], return_range=interval.positive_values ), "index": ConstMethodIntr( signature=Union[ Fun[[Iterable[T0], T0], int], Fun[[Iterable[T0], T0, int], int], Fun[[Iterable[T0], T0, int, int], int], ], return_range=interval.positive_values ), "pop": MethodIntr(), "remove": MethodIntr(), "sort": MethodIntr(), "update": MethodIntr(update_effects), }, } if sys.version_info < (3, 5): del MODULES['operator']['matmul'] del MODULES['operator']['__matmul__'] # VMSError is only available on VMS if 'VMSError' in sys.modules['builtins'].__dict__: MODULES['builtins']['VMSError'] = ConstExceptionIntr() # WindowsError is only available on Windows if 'WindowsError' in sys.modules['builtins'].__dict__: MODULES['builtins']['WindowsError'] = ConstExceptionIntr() # detect and prune unsupported modules for module_name in ["omp", "scipy", "scipy.special"]: try: __import__(module_name) except: logger.info( "Pythran support for package '{}' will be reduced: " "this module is not available at runtime.".format(module_name) ) # check and delete unimplemented numpy methods for method in list(MODULES['numpy'].keys()): if not hasattr(numpy, method): del MODULES['numpy'][method] # if openmp is available, check its version and populate the API accordingly try: omp_version = getattr(__import__('omp'), 'VERSION', 45) if omp_version >= 30: MODULES['omp'].update({ "set_schedule": FunctionIntr(global_effects=True), "get_schedule": FunctionIntr(global_effects=True), "get_thread_limit": FunctionIntr(global_effects=True), "set_max_active_levels": FunctionIntr(global_effects=True), "get_max_active_levels": FunctionIntr(global_effects=True), "get_level": FunctionIntr(global_effects=True), "get_ancestor_thread_num": FunctionIntr(global_effects=True), "get_team_size": FunctionIntr(global_effects=True), "get_active_level": FunctionIntr(global_effects=True), "in_final": FunctionIntr(global_effects=True), }) except ImportError: pass # populate argument description through introspection def save_arguments(module_name, elements): """ Recursively save arguments name and default value. """ for elem, signature in elements.items(): if isinstance(signature, dict): # Submodule case save_arguments(module_name + (elem,), signature) else: # use introspection to get the Python obj try: themodule = __import__(".".join(module_name)) obj = getattr(themodule, elem) while hasattr(obj, '__wrapped__'): obj = obj.__wrapped__ # first try to gather info through getfullargspec spec = inspect.getfullargspec(obj) args = [ast.Name(arg, ast.Param(), None, None) for arg in spec.args] defaults = list(spec.defaults or []) args += [ast.Name(arg, ast.Param(), None, None) for arg in spec.kwonlyargs] defaults += [spec.kwonlydefaults[kw] for kw in spec.kwonlyargs] # Sanity check if signature.args.args: logger.warning( "Overriding pythran description with argspec " "information for: {}".format( ".".join(module_name + (elem,)))) # Avoid use of comprehension to fill "as much args/defauls" as # possible signature.args.args = args[:-len(defaults) or None] signature.args.defaults = [] for arg, value in zip(args[-len(defaults):], defaults): signature.args.defaults.append(to_ast(value)) signature.args.args.append(arg) except (AttributeError, ImportError, TypeError, ToNotEval): pass save_arguments((), MODULES) # Fill return_type field for constants def fill_constants_types(module_name, elements): """ Recursively save arguments name and default value. """ for elem, intrinsic in elements.items(): if isinstance(intrinsic, dict): # Submodule case fill_constants_types(module_name + (elem,), intrinsic) elif isinstance(intrinsic, ConstantIntr): # use introspection to get the Python constants types cst = getattr(__import__(".".join(module_name)), elem) intrinsic.signature = type(cst) fill_constants_types((), MODULES) # a method name to module binding # {method_name : ((full module path), signature)} methods = {} duplicated_methods = {} def save_method(elements, module_path): """ Recursively save methods with module name and signature. """ for elem, signature in elements.items(): if isinstance(signature, dict): # Submodule case save_method(signature, module_path + (elem,)) elif isinstance(signature, Class): save_method(signature.fields, module_path + (elem,)) elif signature.ismethod(): # in case of duplicates, there must be a __dispatch__ record # and it is the only recorded one if elem in MODULES['__dispatch__'] and module_path[0] != '__dispatch__': duplicated_methods.setdefault(elem, []).append((module_path, signature)) if elem in methods and module_path[0] != '__dispatch__': assert elem in MODULES['__dispatch__'] path = ('__dispatch__',) methods[elem] = (path, MODULES['__dispatch__'][elem]) else: methods[elem] = (module_path, signature) save_method(MODULES, ()) # a function name to module binding # {function_name : [((full module path), signature)]} functions = {} def save_function(elements, module_path): """ Recursively save functions with module name and signature. """ for elem, signature in elements.items(): if isinstance(signature, dict): # Submodule case save_function(signature, module_path + (elem,)) elif signature.isstaticfunction(): functions.setdefault(elem, []).append((module_path, signature,)) elif isinstance(signature, Class): save_function(signature.fields, module_path + (elem,)) save_function(MODULES, ()) # a attribute name to module binding # {attribute_name : ((full module path), signature)} attributes = {} def save_attribute(elements, module_path): """ Recursively save attributes with module name and signature. """ for elem, signature in elements.items(): if isinstance(signature, dict): # Submodule case save_attribute(signature, module_path + (elem,)) elif signature.isattribute(): assert elem not in attributes # we need unicity attributes[elem] = (module_path, signature,) elif isinstance(signature, Class): save_attribute(signature.fields, module_path + (elem,)) save_attribute(MODULES, ()) # patch beniget with pythran-specific builtins import beniget beniget.beniget.Builtins['builtins'] = __import__('builtins') beniget.beniget.Builtins['__dispatch__'] = object() for k, v in MODULES['builtins'].items(): if k not in beniget.beniget.Builtins: beniget.beniget.Builtins[k] = v pythran-0.10.0+ds2/pythran/tests/000077500000000000000000000000001416264035500165765ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/tests/__init__.py000066400000000000000000000463761416264035500207270ustar00rootroot00000000000000""" Base file for all Pythran tests. """ import importlib from numpy import float32, float64, complex128 try: from numpy import float128, complex256 except ImportError: float128 = float64 complex256 = complex128 from numpy import intp, intc, number, bool_ from numpy import ndarray, isnan, isinf, isneginf, complex128, complex64 from textwrap import dedent from threading import Thread import copy import math import glob import numpy.testing as npt import os import numbers import sys import unittest from functools import reduce from operator import add import logging import pytest from pythran import compile_pythrancode, spec_parser, load_specfile, frontend from pythran.backend import Python from pythran.middlend import refine from pythran.passmanager import PassManager from pythran.toolchain import _parse_optimization from pythran.spec import Spec logger = logging.getLogger("pythran") logger.setLevel(logging.INFO) def harmonize_containers(value): if isinstance(value, list): def flatten(l, flat): for e in l: if isinstance(e, list): flatten(e, flat) else: flat.append(e) flat = [] flatten(value, flat) if not flat: return if any(not isinstance(v, numbers.Number) for v in flat): return common_type = type(reduce(add, flat)) def rec_visit_values(l): for i, e in enumerate(l): if isinstance(e, list): rec_visit_values(e) else: l[i] = common_type(e) rec_visit_values(value) class TestEnv(unittest.TestCase): """ Test environment to validate a pythran execution against python. """ module = pytest.mark.module # default options used for the c++ compiler PYTHRAN_CXX_FLAGS = ['-O0', '-Wall', '-Werror', '-UNDEBUG', '-Wno-unused-function', '-Wno-int-in-bool-context', '-Wno-unknown-warning-option', '-Wno-unused-local-typedefs', '-Wno-absolute-value', '-Wno-missing-braces', '-Wno-unknown-pragmas', ] if sys.platform != "win32" else [] TEST_RETURNVAL = "TEST_RETURNVAL" def check_type(self, ref, res): """ Check if type between reference and result match. """ print("Type of Pythran res : ", type(res)) print("Type of Python ref : ", type(ref)) # cope with subtle situation under Windows where numpy.uint32 have same # name but different value if type(res).__name__ == type(ref).__name__: return if isinstance(ref, ndarray): # res can be an ndarray of dim 0 because of isneginf call if ref.ndim == 0 and (not isinstance(res, ndarray) or res.ndim != 0): self.check_type(ref.item(0), res) else: self.assertIsInstance(res, type(ref)) elif isinstance(ref, (float, float64)): self.assertIsInstance(res, (float, float64)) elif isinstance(ref, (complex, complex128)): self.assertIsInstance(res, (complex, complex128)) elif isinstance(ref, (bool, bool_)): self.assertIsInstance(res, (bool, bool_)) elif hasattr(ref, 'dtype'): if hasattr(res, 'dtype'): self.assertEqual(ref.dtype.itemsize, res.dtype.itemsize) self.assertEqual(ref.dtype.type(-1), res.dtype.type(-1)) else: self.assertIsInstance(res, int) else: self.assertIsInstance(res, type(ref)) def assertAlmostEqual(self, ref, res): """ Improved version of assertAlmostEqual. This new version supports nan, complex and ndarray. """ self.check_type(ref, res) if isinstance(res, (list, tuple)): self.assertEqual(len(res), len(ref)) for res_v, ref_v in zip(res, ref): self.assertAlmostEqual(ref_v, res_v) elif isinstance(ref, ndarray): npt.assert_array_almost_equal(ref, res) elif isinstance(ref, float): if isinf(ref) or isinf(res): self.assertEqual(isinf(ref), isinf(res)) self.assertEqual(isneginf(ref), isneginf(res)) elif isnan(ref) or isnan(res): self.assertEqual(isnan(ref), isnan(res)) elif ref == res: pass else: # Check float equality using upl : # http://numscale.github.io/nstest/design_rationale.html (m_ref, e_ref) = math.frexp(ref) (m_res, e_res) = math.frexp(res) expo = max(e_ref, e_res) n_ref = math.ldexp(ref, -expo) n_res = math.ldexp(res, -expo) e = (m_ref - m_res) if e_ref == e_res else (n_ref - n_res) self.assertLessEqual(abs(e) / sys.float_info.epsilon, 3.) elif isinstance(ref, (complex, complex64, complex128)): self.assertAlmostEqual(ref.real, res.real) self.assertAlmostEqual(ref.imag, res.imag) else: self.assertEqual(ref, res) def run_python(self, code, runas, prelude=None, check_exception=False): """ Run test with Python to have a reference to compare. runas may be a string to run or a tuple : (function name, list of parameters). """ # Caller may requires some cleaning prelude and prelude() # Produce the reference, python-way, run in an separated 'env' env = {"builtins": __import__("builtins")} # Compare if exception raised in python and in pythran are the same err_msg = "Excepected exception but none raise." try: if isinstance(runas, tuple): exec(code, env) ret_val = env[runas[0]](*runas[1]) else: exec((code + "\n" + runas), env) ret_val = env[self.TEST_RETURNVAL] if check_exception: raise AssertionError(err_msg) return ret_val except BaseException as e: if not check_exception or (e.args and e.args[0] == err_msg): raise return type(e) def run_pythran(self, modname, module_path, runas, prelude=None, check_exception=False): """ Run Pythran code and clean Pythran dynamic library. runas may be a string to run or a tuple : (function name, list of parameters). """ # Caller may requires some cleaning prelude and prelude() loader = importlib.machinery.ExtensionFileLoader(modname, module_path) spec = importlib.machinery.ModuleSpec(name=modname, loader=loader, origin=module_path) pymod = importlib._bootstrap._load(spec) err_msg = "Excepected exception but none raise." try: if isinstance(runas, tuple): ret_val = getattr(pymod, runas[0])(*runas[1]) else: # Produce the pythran result, exec in the loaded module ctx exec(runas, pymod.__dict__) ret_val = getattr(pymod, self.TEST_RETURNVAL) if check_exception: raise AssertionError(err_msg) return ret_val except BaseException as e: if not check_exception or e.args[0] == err_msg: raise return type(e) def cleanup_pythran(self, module_path): # Clean temporary DLL # FIXME: We can't remove this file while it is used in an import # through the exec statement (Windows constraints...) if sys.platform != "win32": os.remove(module_path) def run_test_case(self, code, module_name, runas, module_dir=None, **interface): """ Test if a function call return value is equal for Pythran and Pythran. Args: code (str): python (pythran valid) module to test. module_name (str): name of the compiled module runas (str): command line to run to check output interface (dict): pythran interface for the module to test. Each key is the name of a function to call, the value is a list of the arguments' type. Returns: nothing. Raises: AssertionError by 'unittest' if return value differ. SyntaxError if code is not python valid. pythran.CompileError if generated code can't be compiled. ...possibly others... """ # Extract special keys from interface. if runas: # runas is a python code string to run the test. By convention # the last statement of the sequence is the value to test. # We insert ourselves a variable to capture this value: # "a=1; b=2; myfun(a+b,a-b)" => "a=1; b=2; RES=myfun(a+b,a-b)" runas_commands = runas.split(";") begin = ";".join(runas_commands[:-1]) # this tests the runas initialisation syntax exec(code + "\n" + begin, {}) last = self.TEST_RETURNVAL + '=' + runas_commands[-1] runas = begin + "\n" + last # We run test for each exported function (not for each possible # signature. for i, name in enumerate(sorted(interface.keys())): # If no module name was provided, create one modname = (module_name or ("test_" + name)) + str(i) # Compile the code using pythran cxx_compiled = compile_pythrancode( modname, code, interface, module_dir=module_dir, extra_compile_args=self.PYTHRAN_CXX_FLAGS) if not runas: continue python_ref = self.run_python(code, runas) pythran_res = self.run_pythran(modname, cxx_compiled, runas) self.cleanup_pythran(cxx_compiled) print("Python result: ", python_ref) print("Pythran result: ", pythran_res) harmonize_containers(python_ref) self.assertAlmostEqual(python_ref, pythran_res) def run_test(self, code, *params, **interface): """ Test if a function call return value is equal for Pythran and Pythran. Args: code (str): python (pythran valid) module to test. params (tuple): arguments to pass to the function to test. prelude (fct): function to call between 'code' and the c++ generated code interface (dict): pythran interface for the module to test. Each key is the name of a function to call, the value is a list of the arguments' type. Special keys are 'prelude' and 'check_exception'. Returns: nothing. Raises: AssertionError by 'unittest' if return value differ. SyntaxError if code is not python valid. pythran.CompileError if generated code can't be compiled. ...possibly others... """ # Extract special keys from interface. prelude = interface.pop('prelude', None) check_exception = interface.pop('check_exception', False) thread_count = interface.pop('thread_count', 1) assert len(interface) == 1 name = next(iter(interface.keys())) modname = "test_" + name code = dedent(code) cxx_compiled = compile_pythrancode( modname, code, interface, extra_compile_args=self.PYTHRAN_CXX_FLAGS) # FIXME Check should be done on input parameters after function call python_ref = self.run_python(code, (name, copy.deepcopy(params)), prelude, check_exception) run_pythran_args = (modname, cxx_compiled, (name, params), prelude, check_exception) pythran_res = self.run_pythran(*run_pythran_args) if check_exception: if not issubclass(python_ref, pythran_res): raise AssertionError( "expected exception was %s, but received %s" % (python_ref, pythran_res)) else: return print("Python result: ", python_ref) print("Pythran result: ", pythran_res) harmonize_containers(python_ref) self.assertAlmostEqual(python_ref, pythran_res) if thread_count > 1: threads = [Thread(target=self.run_pythran, args=run_pythran_args) for _ in range(1, thread_count)] for thread in threads: thread.start() for thread in threads: thread.join() self.cleanup_pythran(cxx_compiled) @staticmethod def check_ast(code, ref, optimizations): """ Check if a final node is the same as expected. Parameters ---------- code : str code we want to check after refine and optimizations ref : str The expected dump for the AST optimizations : [optimization] list of optimisation to apply Raises ------ is_same : AssertionError Raise if the result is not the one expected. """ pm = PassManager("testing") ir, _ = frontend.parse(pm, dedent(code)) optimizations = [_parse_optimization(opt) for opt in optimizations] refine(pm, ir, optimizations) content = pm.dump(Python, ir) if content != dedent(ref).strip(): raise AssertionError( "AST is not the one expected. Reference was %s," "but received %s" % (repr(dedent(ref).strip()), repr(content))) class TestFromDir(TestEnv): """ Enable tests for a full test cases directory. This class load test from individual .py in a directory and expose them to the unittest framework. Methods are added to the class (not the instance object) because py.test will collect tests by introspection before eventually instantiating the class for each test. It is intended to be subclassed and then initialized using the static populate() method. A few class attributes defined the behavior: check_output -- Trigger code execution and match return value for Pythran compiled code against pure python. If set to False, only the compilation step is checked. files -- list of files to load, if empty path is used (see below) path -- path where every .py will be loaded interface -- method returning the Pythran interface to use (dict) """ check_output = True files = None path = "defined_by_subclass" runas_markers = ('#runas ', '# runas') @staticmethod def interface(name=None, file_=None): """ Return Pythran specs.""" # Look for an extra spec file spec_file = os.path.splitext(file_)[0] + '.pythran' if os.path.isfile(spec_file): return load_specfile(spec_file) elif file_ is None: return Spec({name: []}) else: with open(file_) as fd: return spec_parser(fd.read()) @staticmethod def extract_runas(name, filepath): with open(filepath) as runas_fd: runas_list = [line for line in runas_fd.readlines() if any(line.startswith(marker) for marker in TestFromDir.runas_markers)] return runas_list or [None] def __init__(self, *args, **kwargs): """ Dynamically add methods for unittests, second stage. """ TestFromDir.populate(self, stub=False) super(TestFromDir, self).__init__(*args, **kwargs) class TestFunctor(object): """ Class to holds test information for a given test file. This Functor holds for test_* dynamically added method, one per input file. It takes at initialization all the informations required for a straightforward dispatch to TestEnv.run_test() """ def __init__(self, test_env, module_name, module_code, module_dir, runas=None, **specs): self.test_env = test_env self.module_name = module_name self.module_code = module_code self.module_dir = module_dir self.runas = runas self.specs = specs def __name__(self): return self.module_name def __call__(self): if "unittest.skip" in self.module_code: return self.test_env.skipTest("Marked as skippable") if "unittest.python3.skip" in self.module_code: return self.test_env.skipTest("Marked as skippable") # resolve import locally to where the tests are located sys.path.insert(0, self.test_env.path) self.test_env.run_test_case(self.module_code, self.module_name, self.runas, module_dir=self.module_dir, **self.specs) # restore import path sys.path.pop(0) @staticmethod def populate(target, stub=True): """ Add unittests methods to `target`. The python unittest framework detect method named test_* by introspection on the class before instantiation. Unfortunately to access the TestEnv instance from the method the Functor has to be initialized after `target` instantiation. Thus there is a two-stage initialization: first we populate the class with 'stub' functions, just to satisfy python unittest collect, and then at instantiation the stub are replace with the Functor properly initialized with a reference to "self". """ if not target.files: # No explicit list of files, default to load the whole directory target.files = glob.glob(os.path.join(target.path, "*.py")) for filepath in target.files: # Module name is file name and external interface is default value name, _ = os.path.splitext(os.path.basename(filepath)) specs = target.interface(name, filepath).functions runas_list = target.extract_runas(name, filepath) for n, runas in enumerate(runas_list): if runas: # Remove the runas marker runas = runas[runas.find(' ', 2) + 1:] suffix = "_run" else: suffix = '_norun' if stub: # First stage, we add dummy function. def func(): """ Useless function to populate TestCase. """ return else: # Second stage, we change dummy function by real one. with open(filepath) as fd: func = TestFromDir.TestFunctor( target, name + suffix + str(n), fd.read(), os.path.dirname(filepath), runas=runas, **specs) setattr(target, "test_" + name + suffix + str(n), func) pythran-0.10.0+ds2/pythran/tests/cases/000077500000000000000000000000001416264035500176745ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/tests/cases/Scribus.gif000066400000000000000000000520671416264035500220070ustar00rootroot00000000000000GIF89a,%çÿ   )&8#1#'$#!1J.A-,*$1>120/35?_157T*=HF;'<=:JpK>%0ESLmP|KhUA!&I_R~T€GFAZDMF7Su WƒW}gIY†.Tm$Vx[ˆa“]‰dQ"tNEUa-ZySTP=Xl_Œc•zR iš%a‰\XIcX=f™„V 3bƒpY2$gŽnŸi›0eŽkž‰Zp¢]#m ‡^8jwa0,o–‰`(p£‹au§reIRk,s§lhXe""x«‘f%x±:t¢1vª&{­)z´~lE7y›·–jƒmlþ BªªÓ¨S«FÊÒè>l”ذÀ€³CÁ˜Ì›+ÚËÀa&(+ Â>®~Aæ­ù°ÕУKGU*T&D±I,¸]µÀo¢»{þ‹Wú8¸ùªÞ5I–9sڨɗo­¹¹WÓóë§^ª)D`h·UMg Rœ§àCT `@Y|B |ñÍ7_}ÍQ£ 3ûu¸š ú‡ Ty5¨¢W ¶hO? °@ 𨒡…8Zƒa|öáçá~!b(ØiÇ]O¹‘ÔÀŠL墋éù$ÀiÈÍ8f¹c…ÍEóc~B†d(—üÄGª„bHM¶™à“ÀÁØSqiLXa–Zc!–jcÍ}_~(æ c†B¤h¦‰’Hm6 g\™ÍA¬ð‰'žÍÝYŸŽõy(j„†ÚŸ¡¤’j&š=}”@oÐÀ«þÄ Á« HE@xãAðhX‘®dÀ²tsç¥â8¬Ž~rª  _Š*j©Ð’š $˜¸’MF@Hàí·à†kàªí®OÉ™Rq3,±ðÒÇé¼Éjóœ‡Îí¾ÐˆjJl$ÙÝz{A [ÄG! oD „+Á¬ˆ‡nNê^tW •Æëñ…ó†¬l7æ;(¿(GÛ ¦Q[ ¼E1úøÃ?ûàƒÏ=õÄO;àˆBF\ð-óVÙÅ/õšÒiØøñÇ"W!³Ò™,fÊ\G+ˆ ¶©ä[x«9þ¤ÝÎùì\Ï@^ËXF¼BH ÷™°>ÐßüFU?ÈÝoxù‹¡ä¬uä¤+èÖ(œw³}´zþ¡Ãž0´Ç=ïA0‘„@°:¾¹Nƒ NJ08b…P„V¼âá¢qBeín…,la×^(¼š1_@JtE®”Mšãœô<:ë-pˆ TÅ# BøÁr0¼µª¶dŠQ‰và®K]‘„$´Ð#—QµeDã’\좎ºñŠù‰ñqdÌÄGy I\èløpsD D% €D|)ÍIR#šÎ|&%9eÉjZÚ×ì†É¶ÉµPzÓŒà¥äÔ¸¨l¥[Ü\ø¶ÐÝ’tÛsàéùË5œÁ V¸Á>ˆCDÊ®.Y ÆA ºS…FÒ§!äT5¹ˆÉkrŠ“Î¢hÊÈxÑfô¢¨T%M à-t¦­‡9ÃGIÛiK<ê‚óô# ï S(0A¦ÞºàV™|Â@«O/Ô¡ÚÕ¡B½$îàw2¥òË¢M l )AXJ(B ½HRº¥ƒš]U–@´#ÝPúÕy²ôž/µˆ@„ ’u’Ýù4–hÈõ´tè]WkM¼ÇØþš_÷ÅÔÀ>õ¢…-ì$þJÅ…ª°êód©ÕêíŽDT)ý¨9¼!³få,^°ºÉ0MŠÉ€+zŠÚ¸²ö»E}¨kq‡BÍZ€l`s«ÛI¸wJ áD³8v¸>t[-ç†Ë”’b——})L7+]"¨À[ã#ºÖ•Óîž¼ïx÷j ú½Ý´­zÙK‰÷¾W“`Ã,€J`‡r< qÁ!Ääêq¥bÅlY \`&J@­JÙU[?á`ïBøÇv YxCÖTTçÂ¥ª­z×Ë^;y’Ä!dp¤ÄÌ÷©~ãÆ !æñ¿+íesÝsÖ³³®þ‹26‘à€=žk5CäçU¯%ò0,­ 7õ¶Þäð“AüaI(âÐag¼UˆtÎ1²@ë²WUñb Òs¬Ð¥±™ÌO˜µ(%à3âüÌ¡^±Î–°^‡¬#ØòYÉKÆm“Lh(GùиVopå8Ú¬s³¬¥)«Ç]BÌÀœ±™Í܉á8) Ú1©'iêS¯–Ì@µªƒ,2hÙ¯°Ž5)Mkß:×¹F íhÿ,Òï¤ô+qi[ ˆB"¤{ì<)É€,¦mmºR󨯮6ªÁ›çy-çÛŸ ·¸ÏHns›ÛÐèθ"v øº‡žÛjWþ‰ æ°þ±¥Ê6³!páéÚøÙH1Ïñ²p ‚ÓÙà©ExÂþ] s @ÿÄë—Þ‰S¼âî­µ­5®q;T@q)d· ´É&׈–³½ñmf.ࢭà:š¬<à äµÍq~Åc2á w;Ïïª# Û½¦£ÄI èQ"=ég:º¡DÄ ãq¼°yæ3wŽ.—Å>bŒ“}oMs½èÅ-î ݤ9´–A‰[mNçgÂýípÏöéçd»»¾ÈDß»Ñ9Üa‹ZðLgÃÓ£þØG‹œ¿“Æ:½™kOè.Û ¸(†æaA…3R2ß‹è]AzÓ§>©Ï¾ê³þývÖGÃõw¯… ýügÁþÊÇý¡ ëC ‚ãp”zVºl<~{Þ3v=ÉZùe·Bù·€ûÆYÍæJ 'NCW}”}ا}§·zrÇsàtØ ~%{e~§tQ†qê—k‡@xºuí¶e’oXÇK¾$g`|f–Å€È|œE]@S\‘€` B8„ø€ ç}tÀ°TäwQ}wt¹5h‚‚Ç~ë—k&€xŽFG'7ö‡Ròæ ’·™Öuɧy5( \GDvn=xo†Zà WÌ0 Ã@„z¨}×aIhwÄðWèT8k6…þ!8xV¨l0P„_$µ_-–R+ØG-èReµl  |hx °Ðrçlø!‡pWxˆ‡>u‡«¸‡°x}¨w„¬õ‡€ˆaMÈw……¶—tç†{‹8‚éæ~! ™#Rs4K]8l_V˜ˆi)']ÈGƒ èfÖo9XŠNoV‡>µŠÞÈŠmޱXŽn'‹«e‹à×g¹8JOhF´÷dˆ¨~Áh…£Á~`ðtÖ{“D*8o—¥‰÷ffP𙀠ÈËÖ†ÚHÜ ÞøLàX‡à(„iŽæh„´˜ŽêèzÖ1ˆùóŽƒu~óŒ"¸ˆˆÀ~ˆ Åh_)Æ…\Æþ@¦Si&HÏœu3X y ¸iÞ¢fMÁ©(Ž!4‘i‘¯˜‡ ‹—„ŽwÕ‘à÷ YJ»Ø^òhkTÈtõ8Œ‡6ˆ Žˆb¿öh´dRñViô¦u¹†œ¼à‰ nÉoiÕ6!g‰”HéŠxwLÙ”M‰IÐpR™„TYFÆd†(‚]‰k)ù•`‰€Ð’‘X@YUd=•O4ùŒÎ…OýW`2Ø ÔÈyËF>Àm1”ÁÄЗ~IŽä˜pƒI˜MiwØ–˜H•‚%’ùåæ‹[©q‹¨(©’—YxQPUòçnî”–†liy^þ7ƒŸ¨†­ÉYLd€láÙU›·yž¯È›ê w‡ù›ÀœÙTÆ™•‘I'é•…w™`ù AàXX¥3žùn]uuòDš”WfÒÕ‰ÜYƒ¬ù8sn(1¿€žè¹žú›ìùžŠé„WIXRx{ö)‚ÃHxú™Ÿ£QÞr ò×ÕIl—H|mifr°“Ýùv™ãylƲ€¡~¹¡Dš}ú¡ß´a9h¿˜ˆÌÙœ– –* –xП!e3ýŒÚN ôÎcü§ DÀÿ'—·Àr:ÊY¯)n¸V6Áfà Bz‘EZ§Ìp¤8 蘼؋KW¢íw¢SŠþƒê~ˆpÅàh0ê…ýO×Ù‚¦YfæÕРøiêrŸ·7¡1° s˜vZ¤xj‹zk$YhÉ)™ø9¥… –lП[°™ê„‚ð| ¦e(]Éhˆ Í·©Ö©¾‘—‘Ñ0ªCHª¥Zާš§ÄYœKŠ~$j’&J™Â˜¢úi¨£ðÇŒŠ‚×ã¥Y'©^àbJ†p©Ôø“›úržZ758˜ª}xx üú¬æ­ȧ"zœ¾è¤Ùú•«ú¹°€ »7®ÆbÖù¥õÔ–šFqi¦tI¬Åʦ=ú£E{9§ȯýê¯Dtˆ °Hþ v~تƒ7³Úꜣ«—Ù°+YŒ't$lu†‘VZGf– îJƒš¦8(ž\‘$%q p¡ꊂ9 &{ (;„¾y§,›„.ëwÕ³¨5¥„Ú­ú©³+©ìö’šahi½H5Z`\°“4蛊f 5¯/¡1r:ªƒ‰µY›}'û¬®çµ_[ßP Pˆ•‡x­Ûœ6›¶—é­ß –‚ }ÀƘe‹'rØó¨CÛGl‰“D€´8z¦¢˜¦kzcµ‘v„»¯Yk² —»¥Ú¸ïù¸ÔÚ§“[°eËœ‚ª°™»¶œÛ¹€ ,\Ó‰«;ij0þ(Õà®h( Äê6¦ƒI±&#²öНj¸¹{²¼›¸ê»ú ©³%é¤(Y™ƒª¶9»¼‚м‚0«þ‰erû´Wrψr§)]­ ½š‡ ¹©žç±eæ9§é{¸éË º»¡‹ë¾ù ¦ ¼€G¹Ux°…—°Ï‰¿`ɶ»¿/ü‡'Õ½‘娺 AkÙ‚.¸‰æÙ‹©šº©œÖ¦áK¯QAŠÁÜļ;„Z‹*»² œ#\{~ZŸ•›nö»°É›¿£ñÂüûÂjðt[K¾7À÷ ù7†™˜®bZ ŠÃª£M«`|¢†¡NÜÇ̾þ‡ ‹íIÅU¬˜@° ¨OzŸÈ –š‹™ú;Æ/¼¿<ëkœI®Àg :Œ‰Íu·Ò•ALƒðÚš~‹cÇ4Ö€ž~ÜÊOÌÁÜ›Œ[È®7 Ðð ‡ ™MÊ•(|h»° :+Æ“\Ì`Àh\@!×…wDrú L\À sv°Ûl²»Ñ¸¤çéÊାŒ‘´Œª@× –`­Ä«È ‹°h‹³-<ÌÌ[ÌÅÜ-iÉSG‰æŠD4z½œÅ ¡¬|¸°´­ÁÅTb)@õ*p ~Îá Ëã ­åÜ‘Ó@ ´¶ËÊÙË]ìÅ÷¸¶ò Ãô<É€ð¼dÀÊÈþeܰƧ{lcešbš·I«yÜË´ßrèÐ Ñ<Ë  œß` Èéªí¥÷ûÅ`LÌ#-Æ€ðbП±¿'·3š‰þ¼º­y¯KʃµO!µT;‘=]ÖQ,ÑQ|Ö³Ô¨: ð«Å'l¶–y¹³!-É#-̰×3\ÃÈ8u¤•8“»ZO7Ír½Ú{ 5­£Øä °Óf ÑØ¶¾'ËÖïéÖ¦ ³(ÜÎ)üËuÝÂË‹×ô¬×{ýù(g ±g£š|®œÕ ÌÀœ¦œæY‘Êæ‹¾• Îh½Áš½ÙnÝ Šl¼s-Ú£-Ìa<ÏMͼOÚ{þð•ÌCÉHŽçb…íK‡-¦–Êš7Ê×8HK³ÛqKŒ»¿½ÞY &;Üà7 ò ò]ßß` ¼|ÔøÉ­ŽÌ°Í-ÒÏÚ{½ÓýÑùˆhLåJ½É–þ ºÞ½ ¬:»ÒWÀ:ÍÞž¾ð]ËõâõÝ „J³rÚI­Ô˜y×Ï-Æ©ýžÚþë’/©UŸY )5š“׫œÕ®àí“iÁbQ¯P›~äïíÞÇ0Ü"Þänm &¾ÈÚúË)NÚÍmÚÅ,à0^੨ \e©à ôN§ |—ÕÃ)]V@Í lÍ߉ Óþ H^çJ~ ÄÀÖNîäþð{âÈí΃ Ï+þßXžå/Nà\¾×%x¥a¾Ì\…\ÛÃÆäÆöÓn ÐniúØr^8Pç N y £^Å{~ê"¼œ–è£ ÉWÞâûÝžèþ}… ®'xÙl‰F£éšæœEŠ£­€ÛfØÕf²êG.ê@'ꥧ¾ç÷ÝËöX×Á\Ú…îâÒ㇞ÚSàˆ¼WãT×¥9¼\dˆ“HЛ§£hÖam4çìì-íø>í_k vWí{Î~«Îßuí±ë²¾å/^ë©] {Œ»^ÕûÌÉd¥®n™ØÅ®|¢`y5æ-CÞÍKlïfïÒÂüÞþï¶ìï!þ žÂ—+设’ßâè Oë`Òh £Ç5±ên·»ÀeÙ¨c.£Æ–ºg_`i¯½ Pám?`‘1@  O÷¤þ_ú¥°Í0ý«_ßÉÐò‚û1?ûßnó]Ÿè{`Õýð+&ù Þ‚ ªil®½·ýœv>žo ÝüNüüöúÓOýÕïÖ¬ðúýmõ!H  ü“ðφ îqøÐ!Ã>}rH0Ê߯üøíÛ‡ï^½yñÚ©÷ì™0Z´b©REʦH„ùÑóf/P JÄP5¢ÕŠõ*ö(èR 70Hh@jU­^ÅšUëÖ«)¤~@ìØX²eÍžE›V-1¶mݾ…ËÚ\ºuíÚm–Wo³i}ýþì×['DˆF|qa@€ $xð  ‰ #^†¨bÅ0$þéÇÑŸG"Išl—rå.—0ebªé¦9rÎx±ò3(^E‹ýÆÅ…éÒ |ÀUùråÈ¥òQ]úô±lÅÅ÷îv¼{õØÛ¡Ã‹/vüx äÉ•3_žX¹bŸ=xLH¸PMôè!G–”˜8w%‹Ní\ÏYîr‰â`İV­r­`ÈéJWo¡$í‹É•âǶùqÉ~/Ã…VuŒÄàw6ÃÍ‚(D«ôìgCbI´DæÑEŠ„\_Æa ©m¯jкø<×ÅkÕÁZsðÚÎW˜&!C#ëXèB9Äne»ÑE Á$ §`À£¾ð….h€ ±¢/RB#$À þyÈ‚%rßø†7¤V™,.D>!%·†‡(L@…ØÈxšqŠJ§–ÌÕÆ¥B•ì “»r‡|!³ìB<»à€= ±Mð%ÐÒ’¼éshÄD$ä¾qL[hϑ͌¤$)YÉiBª|)зºUÔ Jî[!MZè‡Ú´1†LàÅ9#ÄÊUÁËL²¤¥<¿Y!G—·Brȱð³¦D‹uzåO_tXÆL'$sPHzï{’lhjI<Ä#·ÈfúüÃÍM¡ÍSjk!ÛxB? ˜Óvr\Õî8†”va¥a@«ÞC•—þC_4ÅéMmj·”ˆ§sòéOÚþ—k‚GÎÚœ;·P<0Õb†£¶ F,©4"Pj¤D¥«ÎD£¤Und…¾¬8ÔŸr R´–¶ a §K_š/äà3Ÿý¢éYÚb×»¢¨lÉëê VŠ™FeÔÃ:—Tñ-Õ°lðšÀÆ­|ŒªgK¡§Þ·¶Qõ$†Dà„HÓ‹;´r‡MˆgiÑzZ´ºà–mÅÕk‰DºÖµ¶†Ëíwø²×Á4 ‹Å Ö>H\/×bpÀC¬‰Í°y ²¨Çd]ò’×\–£¦„B Õ¥Ýb´‚KLÑÚ)Þ0w¼OØ›jùøÖ×ú²Wüz/\œG§øzg¯ßp†Ã~¤þz‹ õïµà°c8„á¡-ð ¡ûÍéfµº^JvωªD€v¦åpxçYÏ{%À9®-q\× Œ·Åy+¶m‹©ÔkðöaV+ªp‘JXãnÇn*FŠ‘MÒ87² Fcƒã÷`nÆp Tà´¯i»`^°M3Ͳ>‘È.¿—E`³^’´è핯֛±÷eãðḿnæ±ÅÀ`ÍÅÒù#c+[ÈÀÕ)Œ²0A BrP@A OHfO1¢;L^yv! ·Ì¥£‘c€Hó3½¬4O/¢d¸EÓÉØô;íé¾x#ÆhÞ©÷À_/žZÇ©¶X’»\NþNõÎ ~Ia³ÆRÂÐϼ†£:)Ú#ˆ7¼Ãާ¾*¯IÎÙ@-¶ÜìgßÓ'ªv¶±=q€Nžö†™:jýÞXšæÆÃ›Å7_3›Ú´3(WÒ’´Ù:e<É5PVïUâ0‡;¬Â°Ç+O÷\ (¸šÚ†T@›áÑ®íé½é‰c;¯÷Æ5ÆqQCk¸l¹È?‡9táÇ®Þæ½©B5ÞÚÞ@‘C5(Ì]ú ŽÞyÀ}ÞsÝCÈ€ L££¸ËÃ62šîtŸbüeNFݳfSÖ°!ß±ø¸>‡98Us»»U^2¹œ6·aÐRfèëþGfŠ{-ç>÷KåÊþÊ&±w†ÿÝpƒoúÓç›ÈÃÞ›ÈbÇKýqÇ£:ò[§<åÛjK1IóílûÂIïsÅÜ 4Gg/|=&Bs ç<_½Ï¯p…Ø}9ÇAŽ .Ñ ¾g9.J·=[pŸûܳ÷¾÷†Œ¿_¬ÿ¿ÇçšÉ£¼Ã$ ð€MBŸ‰J•#ÎÓ¨[#• ˜!ì“}Ž™1-P½ðë¹+è90?âù ƒKÈ£k¸·`‘ø‹‹ù#¼§³¿ÞË8“˜«Ã±i²$ンL¾¤¼"“»”|‰2³誵y3²ê[ŠëÃ>T!©ü‰—"àÀôþÀüŸF³ç0€?¸„K@Áäy¿dÁ·pÁlƒÁ=9<¾º¿´ž«Ù¢áë¢6û?Ô±œœ› c®Óy®‘á<¹:²^¼¨éÂIþ® c´Üù5 {Ä¡üËÙ|Á¬@8J¸JàMNô;_YL/“Ê <Á›¿ªŒÌTœÌŒs†¬ÁÌþË‘ãLy,ËÏôLËËŵ4¡‘ø¤nBB˜h«·¹ Þ`Íìë°š#ŒÈ+ôËÙ„Æ&ˆÆiDÊ5¹çÞÔÏmü»á$ÎQDãôÈŠ‹¬ÌJ‘Ü„h:Iè<7•¤Å<@Ï´Îê<‚{d¬ªÒDpz qú3²É“¡> %H¶³9`SS6eS7}‚'¨dã¼»ÓW½ÓT³à|/>uÁÂ+PÉ,ÔFPªDE7®aÔÎtÔê„Ôσäòð4°f}„©B°-ñ¬¾â¬ó̾@³9:­R}ÑøDÕTE$ DäH¶»ûŠX½ÓN˜†Ä Eb°Õ%ÐAÕÕå”$õ?%þ•Îä‹G …Ò(…Ô(=ŒÐѨfmVÈÈ2›Y;M¬’ŸœüTl-Ó}‹ pÑSMUqE‚µÍ¯`+¡CŽ xÕu}Õd ÕÚšWÜKëÄÊBÁM(7Ç‹NäóW'R(-Ö³„Ô1ˆe]Xgõ$èK°}\°i-ŽÊ¬˜›°ºì…[h2›+Ž 8‚¿üÖøüØ'ׯ}ú” ƒÃÏIPY´•I°…x•×—¥¿Š364Pšõ¶­¡C~ýœ`¥ÎžõÙbØÏ,ØŒ@…=ZP@Í‹XNÍ*˜ŸëU0!ѹIFÞ)Ê®mÓ¯ [±}‚à‰Ó­¨Ñ¯P•][þE0]S¸­ø3N¸¥¸CÅ54P"uÃqØ„ÌüÕÿšNê<ËGXÀ5›ñ¢åQ8Ú…UnŠ>_”7j=;"ø3ª½ ³9 ŒÏS ×ÍÛ%Ð^ ¸Íªh“0P„Ò5]ÓE„jû»eݸ}ºØ­Ûåt4,E¥ÅVbÚG¼©PÂåˆr(ÞfE’0›^d¹$„Ä ÈµØíªÚŒ„­®­‚p[ìýXíÅàè^·Â;<ß6ݶM±ÀK_Š T)bßöLq°ÙÂjÇÛ½ÃíÁúýÙÏôÝ߃߽ä}0ÜÃEÞ#¼(yóÐñ H.0Ƶ»«=ÑÝþZ½Þå\ Æ`$¨縓ýŠ.á6óå)[]] ÐŽž&ÔB5ÒlÐ%ÝÌäÙÝå]Æ_vI{”€8Vøßf=ÞfÀ–#—[{Ü$KbªÚ8:Q½|pýØq•⯥bíM‚J>6äÈbt•Š0Œ.6ÝCã<ýO2Fßâ¼ÕÜBc´[VÐAÉËÝú \:–c¼R¼¹ÅMÒ)èãGâ-]^zÆ#>dD¾XÀËáPÆÌYÎ}‚I^r­dH®Äs•ÓØódE EaQ† >]Ý2¦WŸJeU¦Ùk½•EmT¿ãŸuIZ~g¼Y>ÑX…>–‚?þÞ¼v@”ù<˜»®][à£XbÆ0`{d NUg†æh†æ‘Ñj–Š ¸æñí„P^ öÑ2ãÖÍ«qÆ¿UNIWÞY'•cû­áxÎáü½£‹¸vë]þ_^~·M·,é3´fô\âí;Hö¬`fVhKN‚qêzù\ܼÆlÆæ&PÞÓ‰Ûè¦ÐRîSTöh»Í8q•|cVv6iwVéZ¾£y)Ùÿý_)`Y 䘸ɗÓ,ÑËé‹]â[ðI)” @èfžä…jÀ®d²LÁÏ.VêÃ(ºæéfÖjŽçTŽ:¬öYXÒùmI°Že±Æá”îìßñþÁ%¡x„>† †XqéÐNýR$h½kõüI 8‚ìýkK†d¡Öíò;ê«]©¦þ`9„Ô½=2ƽÇVßCzÝ«¶ÛtàUÜÅÖì“®cxVé;ò¡cM …éÃuKÄ(B”Ë€$‚уÞ%6æu%I¾mÜlÝN‚Ôzh6q%ød¥¶òµ…–5×Uî¬8&çBý`éF-é;áß!kÐÖnћ〄~o†–ï—Ѳ5l¼Þïò@„N0îqMnÖ•"gp†¼àñævnš=Fèê<¤î·nüÅþîÏ¾§âáþ-Þ O\蘆}•œhÞó®ëíºë[ è¥È0jøNñ3çÞÞî#¼ Èqd9„_˜p.ápΓoçq/pš}ßéôÌ€­áÇî²6ô<ú+(Ÿ-صàz0BÓlÚ¸f·Y 9Hâ%órõæéu’¨b¡&ê3êjTsïÅ;68Œl^ G„T°è· ê:_îáq½Ðs>ÿh¬ÞÖ:ÞìÀÍîorí¾£/Èý`XX^²•`‰X`0t!€Œ¹óf‡×ñb¾ZâVxfQoR·äø NuAV'_dI §ÆŽZþOߨ.<Ïó]çuÊÎ8s†ÐužÐ°&tÏfòC×n1è‚à£~ÈxLÕP$DÍ”±ô Àô.hõ†…¼“Ðâr/wÝ6õs‚’W÷¯ÐÿŠ`xwyV˜ub w¸ù×sgàs~ŸAËŽã€OòÝup”>víwšk:Xލp)x,î%’i ×ÌÚòUØöºÌøbÞxö.´#@qs¯ä‘û(ù’o)”Wy©`y—G@w·˜y伸p\¿ù}×yq`„¯øa÷Ý ÏnCúX‚§.¸eô)í`»‘ô §·3ðT"¸ø×¾kX°|Ù6S8‚Q7wþ±O²/ûÐ?{©HyæpŽÀ¶G€yQP©Žê'Eï¸yœÇ{w8Øv&öÎ6v›qòƒG)yâC|ô{°4hzŠbü¢.Ï7¸i™»úµëvˇd¬Üðü3÷|týð}(}ˆH}ÕW–Cx×·íÈ©®uÁSîèÑ÷œ×yoV awþû%|dˆ1Åüò¥ Â._€HpËDˆ«lÛ·ß½zóâµS.Û3a»hÅRuŠ”'L•" ò£GΙ3^¬@!b“väv’«æ³Z±b½n½ë(•›J—j`" Ô¨R“üúcÖ¬Z±¶þàÕ«ŽbÇ’ûÕ«<ˆÖ²e ˆ­ A€ÙJf÷.޻Ȓ!ëÛ7/`¼Í.<ØâkŠ3n¼Ø›bo’'S®œ.P›6f6›ÉìYóçÍÇp63úôi1© Lø%LRè‹è¯Ÿ4ø,bÜÈQ·ÂhÑR¥ %&L‘ùñ#çLšL”®Ò¹ó'P¡C‰Eºô;‘†O§F­ZþêÖôC\œ¶,üí ôi»ö-¢¸rõ ²ÔŒ/^~éå×_f‚Í$¦Ø‚Ž9YeRæŒf›}–Ygž‰&Pi¨y¨Úh‰aPB ÁÛH‡mþ”cC.¼e´QGà'Iþ&!·s/­]M7q! O=ýÔvÜ5JRà)Õ ä™Õè©§{g½Yó©å\rŸ\uéÅ×^ÚU y%hbÎ,ÖàƒŽIx§7çlbá…b¸a‡¦¶Úˆ$&t"l]ä Áì´˜‡øäƒFõpÔŽÜàK,'¥¤sÎɱÆLPLw'Ö]ç’IvË(A:IÄ ±`•UV^‰e{[r)–—oá§ß˜dê矚j¦‰f`˺ §3„EË`œrÖÙžŠã8˜õširöá !²Vâkˆv†H C?¶Õc(ûLzÏ<4zôLH#•ä©r˹4jþt§ÚÄ9ªb”PE½Êä¬6Õ* Tæº+Æëõúk—g ÀF[qÍe,™¶8 ›m>{Xœ .X­µ×>–-eâØ,Ž3p„n¸ :¡µ†¢a¬ Û*ŽÒ"(6Ü3)>õXÊ8àôKR§ÈÚÜs¤J§TªE.ܰ’ÝAñ Là”Téš±zlÌ1°³Al±$‹üÇü§2àÂ<øà23F³d7ß|+‚;šÏ!®¶š¡¥{âшÆÀ¨=¶é#Å#öVª¯Õ!÷/r‘°äÒ?Îd0N ó46ÙF=Üä¬*HPT¾ ·z=È­%Ýuõñ\yë ß|ïÁþJà͘ áÕÇl8â‰+~3#}:.j mn¡CgžhÑ5‘[´Œ êܫѥU_ÍéqŸ®þÒ¨¥ÂöìŽT»WÁêl¹“@x@àe¬ÞW|õ«ö zãóš·‡ Â*K™€c=ÂîpˆÛÞͶa …‹4à+×@Ä7†s¹&}é;Z‚ Ûð#yÝŒ~óÓå`ªk‰ ¦+°Caì…«Ž"Š;¸AV³*À@Œ9°‹˜ÝÚ# ^0ƒÎÛ`ó ×,ž /ÑbÙK¡¢Ðf³øSÏ^(¹=ư|%²¡ºŽ†̦6qE.’Kµþ£F÷Ó‘þZâœÖùlª2«8EÜEì& ¶x¥.v‘hOñŒç€dfüÃ÷мæb lSaFH'™e[u´Ùžò¸!ÎP†¬1 1'È„La(D‹¡›ù‘$ÃÉ)R·?˜xáuK¬Æ%Ÿ¸Q¸¡“NjJ(E™R:î¤P™J1¶ƒ³Œå,ó¹ ¨Ê¥.åx^þRq×0ùø¡cþ±h7LTB´ …Í5Ês6X…"é×Hûm ’*Ù&7¿†ªp>ñä4笚bu=xç; ÏâµJ Ö=јÏ|â’` e=öÒþ—݆"7¾R®rçs¨Ñ2Q‰¶KAˆWD‚!…§õF_ܸqŒ“Í”q`2ñæM¨ ’f§¸H„Qº 8E”.…)L0SºA =.˜KNe¹Ó|ÊÒ á™$T• Ç,ô¸Ô¥ŠHh …*D!ŠªJ4iXZDú†z®~7ò—Gy40׉Ô&©rb&EÁ¹ÎÊt])ðîŠ×¼î5ŒgauzOÂæS{ñgP]&­ê tŽ4{ìÍ4A.s5Õ|—ƒ*Ñ4ËYÎÊÆ;¤ ê¡È| ‘_×kêÄšj‰kÛ‘AÛˆÙ¶®\dçnó‹€Þr¬=þ$ˆ¥`«ÁÀ6‚ŸsbÌÜ9ñ2{еY:AÌV÷²˜ ¤f»°]ÎN}î‹?@‘jšVS¨ØJÖËÞ¥¢‰ôI+Ê9ßÚÒ•»Òm~ñʃðw‚gÙ€p<Ü{ö¡ÈÏ3p›ªÇ¡^c—vrðƒÅ‘³Ê–­!f£ªÌÍn˜ªEpæ#²)ƒRø"pNwœô"‘’­µÉ{_ÜŠÙÎxV7¨qðð›c»S=æR{ àAÇÒȱôi’uIÂ85¸fÛªL”ÅqŽmP˜˜ Åò…•™á.o¸»Xõ?‚†{”v£ÖDmþ>ªb -Å .^U5@×:/åÎþ°ñVô¼çwòà5ðóYR©J,Î#nN ½‡:à¡ÙûŒ¢=GÛŒÚQ>Ç,,MCËeYËÚå´D¯p‰*},qVafîK×,ÉX·Æ:¯µW)º ©|i±‰áÇ»¦A¯}]ƒ,}%Â–Çæ0èà*{Í~8Xñ7»D; ÓÖÞ£1~ís¤C“½4·Õ…áMƒ[ ã¦ê¢$P ×C õ(õ¾iÄm®ÞK‘ë{ C軓cAKIùo^œ58: ¾x– \Œl2Ãû`ˆC<ü¬¸Å­=HGú^OÇ&ª\(tu;»Qå2¸Oî+’6áþGˆ±îx”×ݧ;Iz¹Ö¿7¡:Eâ…Œ{ng[ }è‡éÑkðÎS.½é타7ØpªS~× Ö{qµë^çøAûèÔëmäg×pÉÕnrq7ó™ 38`>ÄŽÎ|½5/ÕR¦*^ÐYðv¾ÀXÐNÃó èFWüñkÏ`¼¯ga÷ lfSâu ¾,2¯y­Gú—Ÿÿ¼8D4vL£OÓ$O;gÅ}…*°Q GhE=w!fÃîØôYC%öÂŽ¯ÝI+XïuRÚH â!;%ž;éó ›´‡DÔmõU\_Õ!æiŸMF÷yß÷yRþ]YÈeZf§©žú±Ÿ ’R?ôP.ÈÞ©‘6­WLàÞ7Y‡¬ ª ü[ñ ò%_âÕ€ü™}ÅÀ’ôUàaàÃi`^ß³yàZ[JÚz3ŒÙ‰œù^É™\ú‰› º gAÓÛµ©á ÝuÄWÙŸXéþñ‹¹Ïa®Íž.`& ÛWܰ-@Jábà#Za³eŸ–vŸ×Ù̦ƒ+ éٕȲ ®¡ R”£Ü?¬(ÈÞiåHª¥ØÀ¬ØR¬U5ô¡¢Íî án¡Àa!Ý "€""ÜWÄV¡þõY!D#X]æmá%v¡~]Ç™¢ (¦^®Ÿ)®!¸ ¼¼Ý> :ôÆÇÕGªÉÛsIÿù;Hƒä"xøÀ ¼@ t]U/º“/"0câ)]ÁcM…Á²5c>ãÃI#<#4™´5bãölb:¬Ã&$ÓÙ©`—‘¢8ŽãzhÁ8äÁ=œ™½"꤄j%ZÙ›.x>Þ?¾4EC¥@¤ a"a0.ßB*¢ó}@d$B£4R%Æ‘Fn$Gjâç¥Wrå" `ù™!8RU ¢ä6AlŽtÎ îÃ#Ô`F´ã+Þ_rÌ$Ñþ[ÿÉÁ*Ìšïù€?ePî˜ ¸@ äñ$B¢&â1^€RaTNäTRÌ52Yca%G~_Wv%#”Þ–‘䆙¤Yž%û-»ÁÛñC= A=‘#ÑLŒí±ZîõŸàù%ÐU`f@¦"¦"!R"ä ÀÀ> "¶‡P^TRERÞeÎuÂ#h¦ÌlÃ@]czæg~d tã7¦)ž¦ ¢å3- Üí+FÍÔ8Ò¦Àr›ÕÛNoæT¦ñ§Q.æb΂ÎÀ #ã°¥…3^!eF#uZçu^&X^áÔÉ6pg‡nCâþh6‚gx®Ã:ØÁHŠ%ú‘e)¢§z¦gÈÆtÎ{ª£lÆÜ»Bí­–íç ”`Np²ÀÂTQ"ßQhâ%h‚#ö×Y€DäJgeB\u^¨…šÈF:™c|(ˆf% ‘h‰†ŸTM•y¦!z²ß‹ºi<ÁxØ òÃ>¨ƒKJÍ¥´Û‰i]î^ê£_vÀoš‘æXQ*¦’Ö“&( (¥ccD)\ $N§eZèD£¦ZˆT‹áˆ©âtÞˆnex~fø‘æv™f›¾i4AœÊ)CH,¨¢[ÃΞ¿èhšU7Ynöœ¨`‚Ò¡"j")þ.*£6* <+ 4¦W8¨X4@{X@J%…fª…rªur1w^O˜.™–©™–è: •š®ày¶ª)ªœ>Ô«l¤€=´å=ŒšFaŠÕtTª©ŽþAG% ग़À@öbãqc£"(´>k ðs2äY›•n+–jª·~k†”Fg°Âµp§¹ž«âx䩚¨‰²ë(†c›¢¤¼Êi½ÊiÐi[æ(äª5…Õš‘Õ6°ì'5ÄpÀ±æ×‘"Ò1kÄNìÄ:éÅ6§Ç„`jÇzì–ò‰Èz†ÌB ¡lʦkW²lËŠAê½kÌže¬ÒkÍÊi8,´þ%>ä:ô«AwÙÃ{â©ÔxXa“6õHÿx~ÖÙ ­Ú„ÀÂ2¬îÃbÄ&§âN¬´6èãþCøW…jíÖâŸìLf0‚8\‹çj¥Ê~dèŠîªªíiºê̺-¬®œ.A¹µO[æ(äÞÊäª×°_¢Ôî ¥Ânð'â:ëñ*îrFêã¶Ç´ÁDzìÇb.¸j. ™Á&\¯ƒdïrÜgv/ËnÃè¢!‹®í›þŽo½–¯ù.»@CÀÂ{ÖCh„W¥<ÆÛr´Y Î˜”€oV@þ"ëþ&é¢ïÿï0èòÛ]­¦nj§†læ~ qˆ,@0cdo6š*WZ°Ùbðê±ééÂhêªî¼Š°'ŒêƒîC0€Âûê펬ŽsÐo«Ñú®þ2­ðö/“öðÿ2ÞW@C®W´ï|¶[Žºpư9©@ ›@‘òZ8ïºr9ÿ/ ó×Ô\æô&1õþ‰<·ÐøXľrò Ö¦,òÍ™Ó jïþ.8çð²Ò±Äfô+ËÔY8îOêYÀXÈ;ò )Uøh‚çŽ`>¯4#¯:¨¼jqLOrWòÛt½Îª _þD'téx2C· –Jý*Å øf°À7/­á*õR3u·@ ð´¦óƒòôîò#”IWÚ‰AŸë÷™è©NqX³¬øs$Ÿu3?Z×4T|pT´.o<‚:T“ÌÕ.j³“ܯÚH´Ž%ª8³£vav H­wt”*A…h®.“V‡ÏjÌBV^6fƒ5#¿Ã;¬C Œã?¯®L«õZ—v½JÅxX>\„:<‚oС¦œW<þ©ÎÊ^7D_óÙþZôE×±nï6o;µ,?îø—.;Îc»Ð<ËdÓˆˆÁ6tæç™­ºn6tC÷" ¦Y“oh¶T˜öT¸@{Š(þ¬éÀïÌÙ.;‰í˜À ©m'|/)9Ï·âò6o£b+¯b/âŒt…”ôt} ýrz5f³,÷*ø‚¿Cƒ¿êgÇt„«õ„o7y€±x€;\ ¯vÝ™·Ÿ†J(Ó£“¼P¶7ѽ7³øÄº¸‹ûvbÏ8 €p{0EvŽ ¸ ¾Æ"|§ovX/¸&Ä«$'y%/9i7ù¬&oƒxÇ$Ó$ÍáæRpsï"*ª’b4™C«™_º}{…1ª9€<dǹØ¢ 4t}!žãùœ´Ÿ{1 7y…_8¥BÈÂ9<Ö§ús¹+¼ªu£uZÿ9[º¬W…ŒŠWÔ A;Î5ª¥q6ÓÄw¬7( a¤¶`;Ô&{²3nšsºA„úÏ0•èY—Hv+ªs{·ù,ĪL‹¶’—»¬ã T(¡S +â:À^9~j¹R`Ñlfá¦8ÄŽù±ó»™£€Ë·@òB5§7¥W<€iHV€ºS‰dT‰rwä;<ù‚ûBLOñsRÔQIåÌTREÕ-_­—¡--è¨V‘ÊœÐ$ÚR²mW-Ø\ó×*ªÐBa{"¶Xc…RCÅ™]ÖYg]ˆ±BƒÉ#Ñ¿S«QL•KR»¨a\8E—1u×í¯]wßÕ ­{ëMaÁ*İÕÌsxà^ >á„f8'‡†xe›óób•€ÐzYE,pžÁ¼I&ÄÛ7„¤¢¿;×åOaŽy¿™|›Sµ`UÑx~ßûõ`]FÚ¥ —N|é+>qêwH„Wþ¨¶øê‹{A‚hë©ñãEE¯ä¹ä˜Ô‰¾þ’ÏÜ·á~LnþènÁî»ñNçÐ è»ÞB` ÙR•h£Õ4œ`ÄO¾ŠÆ……ž $÷…j«/ÿ³†ã(t•G¾6K˜lca‹ÛHƒ<]øcÔ×ç¦{öÚmv`o.u¯ßà…¯xÜ >\iå“ׄ¦Ééq“œPµ”Šõ©zÕc_xQy`g,a ÙZDÒÉÁ|@R§<7öQÆ}vƒŸíFp»ÐÔ¯^§¡È”¿€í¾ú›¸&pX4àåA5°z™Û\;ê 4¨£‚b ß¶|d6ÒIÁHdþ]ë\7BÆÎ„'¼R Gp Ћ…ö (D¸]/C7 ›ð9ò°'8yÜë$OD…zD„NÉìIðPc)ËÉö£)VQSo¡#ÃÅšyñ`´äÈXFW`A1$IšÖhÃ7.-‡¤Œ£—ƒ„îÄyC¢<à1 ËùÑY€´å%Њz´ ÁPGwœØ£(ÊET”@H—å’RšÙû(iI0ž`^+Ô¤)ò¢ Œ`¢eJ‰°Ê•H çViG‡áQ(ò€%<Ð1…>Ò²*¶da²’‚rÌ#yÀè0è#HÍŘÈtÙ#!)ISQ²’Òãþ0I‘Ü]óU XP HÂM6"o”áç)Ÿ°œsÎq€wd§;íDzºÈž÷L$° æaÆ÷´å‰aúÁ˜*“Ö×ÌH*´J u¨%OkRÔo¡YÀZr¡º1€pªüBs]ólK ¨Â Séa#e¿•{ |‘\5/[ÓöÅBgT;ºsNÁXÐp­U“íÛç#AÅ [ËÖÜél•×Û÷Š £^–ô¤é­”+k œÂ,4½ïÿvÀ`°å´©-çè5íàÄ#¸sþÁ‘bÇp %>qøU\ʵÅxÆw½ (/Ø+þ¸V‡[o{+ð*'W.XYÎégoæ4 ¸’ÌŠËVð’x[[x~$ZÖã>o”‰~î-©éìv*°=\£ÈØ{*ù<§à ~¯<ëò ‡/Žó¯'FIØ\0h‘pЩe<=—À2¬á ú„q—ò¡C\w÷žqiŒý2Ô‰-õ©3ðj?øš ÏfzÐC`¸Ì7µ˜n!Ûœ° ø0ˆ !° èíêb¹=ï×=ô¢·'ß낊t¤{³¥IîÇEô»ð³§==>qàÅ'©ei•€#À!³‡ÎÃO]Û7OnþŠ'ßÃJ¥ÈΜ߷-@ Ué¼ODõ¦³®¦Ìbϼïûï¬Æo>ܦ>+8!~ol< ØÆÊí  îèϯ@O!òOwM!` TL¸¢îúVõÆJ ÐáÐû¦`Ô .÷–„1¦«FV ÄÁKGhÖh ù>Pùæ‡~FPÿðEÒVPä°O–¦Àh/ëfpöÂï•$}î£Ô !Ð@‚Iá»Â‹}bGvb«óŽðÖ˜O! ` ëgK,«B +g¦ˆìéf ³à!ñÈO™î#kZáæêÁÎG^‡­Äßp¶ìO!&Šuþ‡×x@«ø. ý®ÈÀŽàõdo½0CZì¬q*|8¡ç2EnÖðøÜð ÝŽŽ[hK@ T EqYše¬®B³¸Žz`LÐîŠ#c$ "領‹ÜóÖ…OÅy“Ëú²éŒ‚)ZðH±À^®Çè ¤q§À ª1DÀ/DáÁ‰T ˆ]8°ç¯ ¢¶äбŒ¶, –àùP—ñ¹D­ ïÑû>ÁÃñ< ¤ .à IšR!¥ìrwR“ìP*Òïø#­"à;’ÂáÉu`JþVa gñŒ$ aGi§%]²è"r&5‰wä(&uàxRüî ï]Ô—!8!dj1e&þÞ*£²þ¦’*«’×\ *rr+5rÚNæx€ÂR©q¼ >k¤`J+Ü,#Ûpèâ’è|±ùè’…"r` jâñrö²ëº0º¦À¥‘¡¦‹ÆpGòý¶È-r'Óܪ)Ý.“¢"Òj@+1ò3®o°|R0ÿÌ2J ЀvÄØ^Óø"³vf3î*S!èî6¯)"€–B/»Rü¸° ƒ>A§ 0)#P.À¤€Ðþ 1>À™ Sè$s:+.â:±3;·d;»s¥|³Çú²à‚S™Êqj J åò`Gl‘Z@óê“ód?ÍM?`û7ÿ“;“€ž”@e.8߆#³°  Ä` ’-ZÓ,t%£o4Ôó8T ?”©´“;;³D¡«@ÁN<?È3 }¡Aù#kØ3[á»8À)=0G mG{TÄ~´¿sHOÔHÕÇ1j€V´?ïòàva qX6cS:­4î°4KÛK´³0§K9«qPÉ’¼Ttö–tn`*J’†/SlÔ>åtNé4 yÔNë´XþŠOðK[f(ÃM>ÊÓûXÔ2úQÈ =QD3Ro­N)UÄ,uÿP #çQñpL=='£ HÓö#+.€ >ƒð?XõQ]•2%5Vu 2ó24ýTW÷ñS-c8G•2`j¤`lQJåFmfY=OšÕY×-"€®WÃó[¡¤ÃI#ÉüBР57@UÙ\QÈ\Ï5]Õu]íÐz žn/^]q^yu]‘^KX š3`G 6î–/"'õ`]@OÎKPC(¯^ç,T:¼5\Y²U;v!c2×DV×@`À¥l04C3LþéU 7£ÔV@\./Y9¶f‰Ž@Œ"ø3g3dàŒüäue‡v§$k0`)OCï“i™5"£Vj§–])8ƒVhõ#ÃÚe&`nýâ_ß’fÉö%3jÑvAV`8+kŠe¹vf\€>@÷:.óöón–½úv­rAÀjåìHÓ%b‡N+r#×êÓ2+7ÿþÖj€H!öm#îBÅVYG×Ü–OR=ôt—ðo 6 ÷pa÷nÇvv7ôi wÑÑ¢2“ £ cm2tñ–x-]AVw&Kð½š÷4=—hA^È‘zÓ‹ @RC6þ{d`(`uhàç^·QcwiÍ—v­7"o—}RDÄ ?êwJ”¶\ó·i×tý—*Ýw@uNS¯¢wE7•Ôw}ØÝ×â—1€×~åoz1ø¨>à6ÿ:¸GÝà—>IXxe÷„?o5±…cUyIV†Ùî1+˜|/‡ŠâpKæ‡õ…ÀN )Ç÷)M؈ÕKƒÝ·™ØN··w B+cŠkÍŠ‹Ýw‹Ñ€y Š…X3fö†É÷dµÕõ…pçó5ãø|ÍøŒíøt¡Õ‰c8Œ5öq‡÷ -AvÙ÷rÿ(þŒkTz«8Ž?À`ƒù’#ß÷>`U‰ŽŒØ“;”™¸˜’KY©øfR…`rÝ{]y‹aù…@(àê!où¯À‘¹—Y’ÝW‚y˜åï–7YƒYù“™”Õø™À> lOy`ø(à”Ù}Ï6›]Ù™cù(à›‹xYÉYtØ}RñÙ!Dù½ÜžÅ™i)À }÷Ù:ó9¡!b›ñXÀÀÞ™iç:ù X¡5ú|£b ¢ßù> .Išœ Úš=ZYx£]"~y¥I ¤Cú$šFú¤uz§É™e,€ ,úšEùž_ú¨GÃgdº¡Ú¡à©¡º©›z©_8‘úª¿„ªµz«·¤¥±ú«$º˜«ÉzK ¬ÑZw.€ËZ¦/À¨Ó:®5IÚZ”@ä:¯ëO©¯õš¢;pythran-0.10.0+ds2/pythran/tests/cases/_histogram.py000066400000000000000000000031351416264035500224040ustar00rootroot00000000000000#pythran export histogram_neq_edges_weights(int32[][], int32[], int32[][]) #runas import numpy as np; d = np.arange(10, dtype=np.int32).reshape(5,2); w = np.ones_like(d, dtype=np.int32); b = np.arange(4, dtype=np.int32); histogram_neq_edges_weights(d, b, w) import numpy as np def _histogram_neq_edges_weights(data, bin_edges, weights): _BLOCK = 65536 bin_edges_length = (len(bin_edges) - 1) hist = np.zeros(len(bin_edges), weights.dtype) # omp parallel for private(j, tmp_hist, i, value, bin_idx) for j in range(0, len(data), _BLOCK): tmp_hist = np.zeros(len(bin_edges), weights.dtype) tmp_data = data[j:j + _BLOCK] tmp_weights = weights[j:j + _BLOCK] for i in range(0, len(tmp_data)): value = tmp_data[i] # Ignore data if NaN or not within range if np.isnan(value) or not (bin_edges[0] <= value <= bin_edges[-1]): continue # Search for index bin_idx = 0 while bin_idx < bin_edges_length and bin_edges[bin_idx + 1] <= value: bin_idx += 1 tmp_hist[bin_idx] += tmp_weights[i] # omp critical hist += tmp_hist # Last bin element is inclusive hist[-2] += hist[-1] # Remove the redundant extra bin return hist[:-1], bin_edges def histogram_neq_edges_weights(data, bin_edges, weights): if weights.shape != data.shape: raise ValueError('weights should have the same shape as data.') weights = weights.ravel() data = data.ravel() return _histogram_neq_edges_weights(data, bin_edges, weights) pythran-0.10.0+ds2/pythran/tests/cases/allpairs.py000066400000000000000000000007521416264035500220610ustar00rootroot00000000000000import numpy as np #pythran export sqr_dists(float[:,:], float[:,:]) #pythran export sqr_dists_loops(float[:,:], float[:,:]) #bench u = 300; d = 300; import numpy as np; b = np.ones((u,d)); a = np.ones((u,d)); sqr_dists(a, b) def sqr_dists(X,Y): return np.array([[np.sum( (x-y) ** 2) for x in X] for y in Y]) def sqr_dists_loops(X,Y): m,n = X.shape[0], Y.shape[0] D = np.zeros((m,n)) for i in range(m): for j in range(n): D[i,j] = np.sum( (X[i] -Y[j]) ** 2) return D pythran-0.10.0+ds2/pythran/tests/cases/allpairs_distances.py000066400000000000000000000006221416264035500241120ustar00rootroot00000000000000#pythran export allpairs_distances(int) #runas allpairs_distances(100) #bench allpairs_distances(100) import numpy as np def dists(X,Y): return np.array([[np.sum( (x-y) ** 2) for x in X] for y in Y]) def allpairs_distances(d): #X = np.random.randn(1000,d) #Y = np.random.randn(200,d) X = np.arange(600*d).reshape((600,d)) Y = np.arange(200*d).reshape((200,d)) return dists(X,Y) pythran-0.10.0+ds2/pythran/tests/cases/allpairs_distances_loops.py000066400000000000000000000010011416264035500253160ustar00rootroot00000000000000#pythran export allpairs_distances_loops(int) #runas allpairs_distances_loops(100) #bench allpairs_distances_loops(100) import numpy as np def dists(X,Y): result = np.zeros( (X.shape[0], Y.shape[0]), X.dtype) for i in range(X.shape[0]): for j in range(Y.shape[0]): result[i,j] = np.sum( (X[i,:] - Y[j,:]) ** 2) return result def allpairs_distances_loops(d): #X = np.random.randn(1000,d) #Y = np.random.randn(200,d) X = np.ones((500,d)) Y = np.ones((200,d)) return dists(X,Y) pythran-0.10.0+ds2/pythran/tests/cases/another_quicksort.py000066400000000000000000000034451416264035500240200ustar00rootroot00000000000000#pythran export QuickSort(int list) #runas QuickSort(list(range(10))) #bench a = list(range(200000)); QuickSort(a) # swap two value of the list def swap (l, idx1, idx2): if (idx1 != idx2): tmp = l[idx1] l[idx1] = l[idx2] l[idx2] = tmp # partition the list using the value at pivot index size / 2 and return the # new pivot index def partition (l): size = len (l) # the pivot indfex pivot_idx = size // 2 # the pivot value val = l[pivot_idx] # the idx of last unsorted elemet idx = size - 1 # move the pivot to the end if (pivot_idx != idx): swap (l, pivot_idx, idx) # the pivot must stay at the end until the final swap idx = idx - 1 # go through the list of the elements to be sorted i = 0 while (i <= idx): if (l[i] > val) : while ((l[idx] > val) and (idx > i)): idx = idx - 1 if (idx != i): swap (l, i, idx) idx = idx - 1 else: break i = i+1 # finally bring the pivot at its final place assert ((idx == i) or (idx + 1 == i)) swap (l, i, size - 1) return i def QuickSort (l): size = len (l) if size > 1: # Get the lists of bigger and smaller items and final position of pivot idx = partition (l) l1 = [] l2 = [] for i in range (0, idx): l1.append (l[i]) for i in range (idx, size): l2.append (l[i]) # Recursively sort elements smaller than the pivot QuickSort(l1); # Recursively sort elements at least as big as the pivot QuickSort(l2); for i in range (0, len (l1)): l[i] = l1[i] for i in range (0, len (l2)): l[len (l1) + i] = l2[i] return l pythran-0.10.0+ds2/pythran/tests/cases/approximated_callgraph.py000066400000000000000000000004551416264035500247640ustar00rootroot00000000000000#pythran export approximated_callgraph(int) #runas approximated_callgraph(100) #bench approximated_callgraph(250) def call(i, j): return i+j def approximated_callgraph(size): out= list() for i in range(size): out.append(list(map(lambda j:call(i, j), range(size)))) return out pythran-0.10.0+ds2/pythran/tests/cases/arc_distance.py000066400000000000000000000014411416264035500226650ustar00rootroot00000000000000#pythran export arc_distance(float [:], float[], float[], float[]) #runas import numpy as np; arc_distance(np.array([12.4,0.5,-5.6,12.34,9.21]),np.array([-5.6,3.4,2.3,-23.31,12.6]),np.array([3.45,1.5,55.4,567.0,43.2]),np.array([56.1,3.4,1.34,-56.9,-3.4])) #bench import numpy.random; N=5000000; a, b, c, d = numpy.random.rand(N), numpy.random.rand(N), numpy.random.rand(N), numpy.random.rand(N); arc_distance(a, b, c, d) import numpy as np def arc_distance(theta_1, phi_1, theta_2, phi_2): """ Calculates the pairwise arc distance between all points in vector a and b. """ temp = np.sin((theta_2-theta_1)/2)**2+np.cos(theta_1)*np.cos(theta_2)*np.sin((phi_2-phi_1)/2)**2 distance_matrix = 2 * (np.arctan2(np.sqrt(temp),np.sqrt(1-temp))) return distance_matrix pythran-0.10.0+ds2/pythran/tests/cases/arc_distance_list.py000066400000000000000000000021261416264035500237210ustar00rootroot00000000000000#from https://bitbucket.org/FedericoV/numpy-tip-complex-modeling/src/806c968e3705/src/simulations/list_arc_distance.py?at=default from math import sin, cos, atan2, sqrt, pi from random import random #pythran export arc_distance_list( (float, float) list, (float, float) list) #runas arc_distance_list([(12.4,0.5),(-5.6,12.34),(9.21,-5.6),(3.4,2.3),(-23.31,12.6)],[(3.45,1.5),(55.4,567.0),(43.2,56.1),(3.4,1.34),(-56.9,-3.4)]) #bench import random; N=1000; a = [(random.random(), random.random()) for i in range(N)]; b = [(random.random(), random.random()) for i in range(N)]; arc_distance_list(a,b) def arc_distance_list(a, b): distance_matrix = [] for theta_1, phi_1 in a: temp_matrix = [ 2 * (atan2(sqrt(temp), sqrt(1 - temp))) for temp in [ sin((theta_2 - theta_1) / 2) ** 2 + cos(theta_1) * cos(theta_2) * sin((phi_2 - phi_1) / 2) ** 2 for theta_2, phi_2 in b ] ] distance_matrix.append(temp_matrix) return distance_matrix #print(arc_distance_list([(12.4,0.5),(-5.6,12.34),(9.21,-5.6),(3.4,2.3),(-23.31,12.6)],[(3.45,1.5),(55.4,567.0),(43.2,56.1),(3.4,1.34),(-56.9,-3.4)])) pythran-0.10.0+ds2/pythran/tests/cases/babylonian.py000066400000000000000000000007601416264035500223670ustar00rootroot00000000000000""" Checking if a number is a perfect square. """ # pythran export is_square(int) # runas is_square(12** 5) # from http://stackoverflow.com/questions/2489435/\ # how-could-i-check-if-a-number-is-a-perfect-square def is_square(a_positive_int): """ Check if it is a perfect square. """ x = a_positive_int // 2 seen = {x} while x * x != a_positive_int: x = (x + (a_positive_int // x)) // 2 if x in seen: return False seen.add(x) return True pythran-0.10.0+ds2/pythran/tests/cases/blacksholes.py000066400000000000000000000030041416264035500225350ustar00rootroot00000000000000#runas BlackScholes(list(range(1,100)), list(range(1,100)), list(range(1,100)), 0.5, 0.76, 12) #bench BlackScholes(list(range(1,400001)), list(range(1,400001)), list(range(1,400001)), 0.5, 0.76, 400000) #pythran export BlackScholes(float list, float list, float list, float, float, int) #pythran export BlackScholes(int list, int list, int list, float, float, int) import math def BlackScholes(stock_price, option_strike, option_years, Riskfree, Volatility, nb_opt): RSQRT2PI = 1 / math.sqrt(math.pi * 2) A1 = 0.31938153 A2 = -0.356563782 A3 = 1.781477937 A4 = -1.821255978 A5 = 1.330274429 call_result = [] put_result = [] for opt in range(0, nb_opt) : sqrtT = math.sqrt(option_years[opt]) d1 = math.log(stock_price[opt] / option_strike[opt]) d1 += (Riskfree + 0.5 * Volatility * Volatility) * option_years[opt] d1 /= (Volatility * sqrtT) d2 = d1 - Volatility * sqrtT K = 1.0 / (1.0 + 0.2316419 * abs(d1)) CNDD1 = RSQRT2PI * math.exp(-0.5 * d1 * d1) * (K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5))))) K = 1.0 / (1.0 + 0.2316419 * abs(d2)) CNDD2 = RSQRT2PI * math.exp(-0.5 * d2 * d2) * (K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5))))) expRT = math.exp(-Riskfree * option_years[opt]) call_result.append(stock_price[opt] * CNDD1 - option_strike[opt] * expRT * CNDD2) put_result.append(option_strike[opt] * expRT * (1.0 - CNDD2) - stock_price[opt] * (1.0 - CNDD1)) return call_result, put_result pythran-0.10.0+ds2/pythran/tests/cases/brownian.py000066400000000000000000000031721416264035500220700ustar00rootroot00000000000000#pythran export brownian_bridge(int, int, float, float, int) #runas brownian_bridge(1,5,1.35,2.65,4) #bench brownian_bridge(1,5,0.35,4.65,100000) import random from math import sqrt def linspace(begin, end, nbsteps): assert begin < end return [ begin + i*(end-begin)/nbsteps for i in range(nbsteps) ] def zeros(n): return [0.]*n # should be "from random import gauss as norm", but not reproducible... def norm(m,u): return ((m*u+0.15)%1) # moyenne du pont en t entre les points (t1,b1) et (t2,b2): def moy(t1,t2,b1,b2,t): return (1.*(t2*b1-t1*b2)+t*(b2-b1))/(t2-t1) def p(t): t=1 # variance du pont en t entre les points (t1,b1) et (t2,b2): def var(t1,t2,b1,b2,t): return (1.*t-t1)*(t2-t)/(t2-t1) def brownian_bridge(ti, tf, bi, bf, n): """ simulation d'un pont brownien sur [ti,tf], avec les valeurs extremes bi et bf et n points par unite de temps sortie : - T : positions temporelles des echantillons - B : valeurs des echantillons """ n = int(n*(tf-ti)) # nombre de points T = linspace(ti,tf,n) # points d'echantillonnage pas = (tf-ti)/(n-1.) # pas d'echantillonnage B = zeros(n) # initialisation du brownien B[0] = bi # valeur initiale B[n-1] = bf # valeur finale t1 = ti for k in range(1,n-1): # construction du pont en ti+k*pas m = moy(t1,tf,B[k-1],bf,t1+pas) # sur les intervalle [ti+(k-1)*pas,tf] v = var(t1,tf,B[k-1],bf,t1+pas) # avec les valeurs limites B[k-1],et bf B[k] = m+sqrt(v)*norm(0,1) t1 += pas return T, B pythran-0.10.0+ds2/pythran/tests/cases/bubble_sort.py000066400000000000000000000012361416264035500225520ustar00rootroot00000000000000#adapted from http://www.daniweb.com/software-development/python/code/216689/sorting-algorithms-in-python #pythran export bubble_sort(int list) #runas bubble_sort([-4,1,45,-6,123,4,6,1,34,-8,12]) #bench import random; in_ = random.sample(range(1000000000), 4000); bubble_sort(in_) def bubble_sort(list0): list1=[x for x in list0 ] # simulate copy for i in range(0, len(list1) - 1): swap_test = False for j in range(0, len(list1) - i - 1): if list1[j] > list1[j + 1]: list1[j], list1[j + 1] = list1[j + 1], list1[j] # swap swap_test = True if swap_test == False: break return list1 pythran-0.10.0+ds2/pythran/tests/cases/calculate_u.py000066400000000000000000000026341416264035500225340ustar00rootroot00000000000000# from the paper `using cython to speedup numerical python programs' #pythran export timeloop(float, float, float, float, float, float list list, float list list, float list list) #pythran export timeloop(float, float, float, float, float, int list list, int list list, int list list) #bench A=[list(range(70)) for i in range(100)] ; B=[list(range(70)) for i in range(100)] ; C=[list(range(70)) for i in range(100)] ; timeloop(1.,2.,.01,.1,.18, A,B,C ) #runas A=[list(range(10)) for i in range(5)] ; B=[list(range(10)) for i in range(5)] ; C=[list(range(10)) for i in range(5)] ; timeloop(1.,2.,.1,.1,.2, A,B,C ) def timeloop(t, t_stop, dt, dx, dy, u, um, k): while t <= t_stop: t += dt new_u = calculate_u(dt, dx, dy, u, um, k) um = u u = new_u return u def calculate_u(dt, dx, dy, u, um, k): up = [ [0.]*len(u[0]) for i in range(len(u)) ] "omp parallel for" for i in range(1, len(u)-1): for j in range(1, len(u[0])-1): up[i][j] = 2*u[i][j] - um[i][j] + \ (dt/dx)**2*( (0.5*(k[i+1][j] + k[i][j])*(u[i+1][j] - u[i][j]) - 0.5*(k[i][j] + k[i-1][j])*(u[i][j] - u[i-1][j]))) + \ (dt/dy)**2*( (0.5*(k[i][j+1] + k[i][j])*(u[i][j+1] - u[i][j]) - 0.5*(k[i][j] + k[i][j-1])*(u[i][j] - u[i][j-1]))) return up pythran-0.10.0+ds2/pythran/tests/cases/caxpy.py000066400000000000000000000024601416264035500213740ustar00rootroot00000000000000#pythran export CAXPY(int, complex, complex list, int, complex list, int) #runas CAXPY(2,complex(1.1,2.3),[complex(1,2),complex(2,3),complex(3,4),complex(5,6)],2,[complex(3,4),complex(1,2),complex(2,3),complex(5,6)],3) def CAXPY(N,CA,CX,INCX,CY,INCY): # Purpose # ======= # # CAXPY constant times a vector plus a vector. # # Further Details # =============== # # jack dongarra, linpack, 3/11/78. # modified 12/3/93, array(1) declarations changed to array(#) # # ===================================================================== # if N <= 0: return if (abs(CA) == 0.0E+0): return if (INCX == 1 and INCY == 1): "omp parallel for" for I in range(N): CY[I] = CY[I] + CA*CX[I] # # code for both increments equal to 1 # else: # # code for unequal increments or equal increments # not equal to 1 # IX = 0 IY = 0 if (INCX < 0): IX = (-N+1)*INCX if (INCY < 0): IY = (-N+1)*INCY for I in range(N): CY[IY] = CY[IY] + CA*CX[IX] IX = IX + INCX IY = IY + INCY return CY pythran-0.10.0+ds2/pythran/tests/cases/ccopy.py000066400000000000000000000020371416264035500213650ustar00rootroot00000000000000#pythran export CCOPY(int, complex list, int, complex list, int) #runas CCOPY(2,[complex(1,2),complex(2,3),complex(3,4),complex(5,6)],2,[complex(3,4),complex(1,2),complex(2,3),complex(5,6)],3) #bench sz = 20000000; in1 = map(complex, range(sz), range(sz)); in2 = map(complex, range(sz), range(sz));CCOPY(sz / 6,in1,2,in2,3) def CCOPY(N,CX,INCX,CY,INCY): # Purpose # ======= # # CCOPY copies a vector x to a vector y. # # Further Details # =============== # # # ===================================================================== # if N <= 0: return if (INCX==1 and INCY==1): # # code for both increments equal to 1 # for I in range(N): CY[I] = CX[I] else: # # code for unequal increments or equal increments # not equal to 1 # IX = 0 IY = 0 if (INCX < 0): IX = (-N+1)*INCX if (INCY < 0): IY = (-N+1)*INCY for I in range(N): CY[IY] = CX[IX] IX = IX + INCX IY = IY + INCY return pythran-0.10.0+ds2/pythran/tests/cases/cdotc.py000066400000000000000000000026601416264035500213460ustar00rootroot00000000000000#pythran export CDOTC(int, complex list, int, complex list, int) #runas CDOTC(2,[complex(1,2),complex(2,3),complex(3,4),complex(5,6)],2,[complex(3,4),complex(1,2),complex(2,3),complex(5,6)],3) #bench sz = 20000000; in1 = map(complex, range(sz), range(sz)); in2 = map(complex, range(sz), range(sz));CDOTC(sz / 6,in1,2,in2,3) def CDOTC(N,CX,INCX,CY,INCY): # .. Scalar Arguments .. # INTEGER INCX,INCY,N # .. # .. Array Arguments .. # COMPLEX CX(#),CY(#) # .. # # Purpose # ======= # # forms the dot product of two vectors, conjugating the first # vector. # # Further Details # =============== # # jack dongarra, linpack, 3/11/78. # modified 12/3/93, array(1) declarations changed to array(#) # # ===================================================================== # CTEMP = complex(0.0,0.0) CDOTC = complex(0.0,0.0) if (N <= 0): return if (INCX == 1 and INCY == 1): # # code for both increments equal to 1 # for I in range(N): CTEMP = CTEMP + (CX[I].conjugate())*CY[I] else: # # code for unequal increments or equal increments # not equal to 1 # IX = 0 IY = 0 if (INCX < 0): IX = (-N+1)*INCX if (INCY < 0): IY = (-N+1)*INCY for I in range(N): CTEMP = CTEMP + (CX[IX].conjugate())*CY[IY] IX = IX + INCX IY = IY + INCY return CTEMP pythran-0.10.0+ds2/pythran/tests/cases/cdotu.py000066400000000000000000000027131416264035500213670ustar00rootroot00000000000000#pythran export CDOTU(int, complex list, int, complex list, int) #runas CDOTU(2,[complex(1,2),complex(2,3),complex(3,4),complex(5,6)],2,[complex(3,4),complex(1,2),complex(2,3),complex(5,6)],3) #bench sz = 20000000; in1 = map(complex, range(sz), range(sz)); in2 = map(complex, range(sz), range(sz));CDOTU(sz / 6,in1,2,in2,3) def CDOTU(N,CX,INCX,CY,INCY): # .. Scalar Arguments .. # INTEGER INCX,INCY,N # .. # .. Array Arguments .. # COMPLEX CX(#),CY(#) # .. # # Purpose # ======= # # CDOTU forms the dot product of two vectors. # # Further Details # =============== # # jack dongarra, linpack, 3/11/78. # modified 12/3/93, array(1) declarations changed to array(#) # # ===================================================================== # # .. Local Scalars .. # COMPLEX CTEMP # INTEGER I,IX,IY # .. CTEMP = complex(0.0, 0.0) CDOTU = complex(0.0, 0.0) if (N <= 0): return if (INCX == 1 and INCY == 1): # # code for both increments equal to 1 # for I in range(N): CTEMP = CTEMP + CX[I] * CY[I] else: # # code for unequal increments or equal increments # not equal to 1 # IX = 0 IY = 0 if (INCX < 0): IX = (-N + 1) * INCX if (INCY < 0): IY = (-N + 1) * INCY for I in range(N): CTEMP = CTEMP + CX[IX] * CY[IY] IX = IX + INCX IY = IY + INCY return CTEMP pythran-0.10.0+ds2/pythran/tests/cases/check_mask.py000066400000000000000000000010541416264035500223360ustar00rootroot00000000000000#pythran export check_mask(bool[][], bool[]) #runas import numpy as np; db = np.array([[0,1,1,0], [1,0,1,1], [1,1,1,1]], dtype=bool); out = np.zeros(3,dtype=bool); check_mask(db, out) #from http://stackoverflow.com/questions/34500913/numba-slower-for-numpy-bitwise-and-on-boolean-arrays import numpy as np def check_mask(db, out, mask=(1, 0, 1)): for idx, line in enumerate(db): target, vector = line[0], line[1:] if (mask == np.bitwise_and(mask, vector)).all(): if target == 1: out[idx] = 1 return out pythran-0.10.0+ds2/pythran/tests/cases/clip.py000066400000000000000000000010051416264035500211710ustar00rootroot00000000000000#pythran export clip(complex128[], float64), clip(complex128[::], float64) #runas import numpy as np ; a = np.arange(2, dtype=complex); clip(a, .5), clip(a[2::4], .5) import numpy as np def limit1 (x, epsilon=1e-6): if abs(x) < epsilon: return 0 else: return x / abs(x) def clip (z, _max): out = np.empty (z.shape, dtype=z.dtype) for i in range (len(z)): if abs(z[i]) > _max: out[i] = limit1 (z[i]) * _max else: out[i] = z[i] return out pythran-0.10.0+ds2/pythran/tests/cases/clip2.py000066400000000000000000000007761416264035500212710ustar00rootroot00000000000000# pythran export clip(complex128[], float64), limit (complex128[],float?) # runas import numpy as np ; a = np.arange(2, dtype=complex); clip(a, .5), clip(a[2::4], .5) import numpy as np def limit (x, epsilon=1e-6): out = np.empty(shape=x.shape, dtype=x.dtype) mask1 = np.abs(x) < epsilon out[mask1] = 0 mask2 = np.logical_not(mask1) out[mask2] = x[mask2] / np.abs(x[mask2]) return out def clip (z, _max): mask = np.abs(z) > _max z[mask] = limit(z[mask]) * _max return z pythran-0.10.0+ds2/pythran/tests/cases/collatz.py000066400000000000000000000005521416264035500217200ustar00rootroot00000000000000import numpy as np #pythran export collatz(int) #runas collatz(10) def collatz(n_max): n = np.arange(3, n_max+1) count = np.ones_like(n) indices = n != 1 while np.any(indices): count[indices] += 1 x = n[indices] n[indices] = np.where(x & 1, 3 * x + 1, x // 2) indices = n != 1 max_i = np.argmax(count) return max_i + 3, count[max_i] pythran-0.10.0+ds2/pythran/tests/cases/collatz_modified.py000066400000000000000000000006511416264035500235600ustar00rootroot00000000000000#pythran export collatz_modified(int) #runas collatz_modified(10) def collatz_modified(target): start = 1 while True: i = start steps = 0 while True: if i == 1: break if i % 2 == 0: i = i // 2 else: i = 3 * i + 1 steps += 1 if steps == target: return start start += 1 pythran-0.10.0+ds2/pythran/tests/cases/comp_unrolling.py000066400000000000000000000004051416264035500232740ustar00rootroot00000000000000#pythran export list_comp(int list list) #runas list_comp([[], [], [1]]) def foo(cc, x, y): for a in cc: if a: return True return False def list_comp(cc): return [(x,y) for x in range(1) for y in range(2) if foo(cc, x, y)] pythran-0.10.0+ds2/pythran/tests/cases/compute_subpix_2d_gaussian2.py000066400000000000000000000023611416264035500256570ustar00rootroot00000000000000import numpy as np #runas import numpy as np; x = (np.arange(16., dtype=np.float32) - 8.).reshape(4,4); compute_subpix_2d_gaussian2(x, 1, 1) # pythran export compute_subpix_2d_gaussian2(float32[][], int, int) def compute_subpix_2d_gaussian2(correl, ix, iy): correl_crop = correl[iy-1:iy+2, ix-1:ix+2] # hoops, pythran crashes because of this line # correl_crop[correl_crop < 0] = 1e-6 # we write it like this to please pythran tmp = np.where(correl_crop < 0) for i0, i1 in zip(tmp[0], tmp[1]): correl_crop[i0, i1] = 1e-6 c10 = 0 c01 = 0 c11 = 0 c20 = 0 c02 = 0 for i in range(1): for j in range(1): c10 += (i-1)*np.log(correl_crop[j, i]) c01 += (j-1)*np.log(correl_crop[j, i]) c11 += (i-1)*(j-1)*np.log(correl_crop[j, i]) c20 += (3*(i-1)**2-2)*np.log(correl_crop[j, i]) c02 += (3*(j-1)**2-2)*np.log(correl_crop[j, i]) c00 = (5-3*(i-1)**2-3*(j-1)**2)*np.log(correl_crop[j, i]) c00, c10, c01, c11, c20, c02 = \ c00/9, c10/6, c01/6, c11/4, c20/6, c02/6 deplx = np.float32((c11*c01-2*c10*c02)/(4*c20*c02-c11**2)) deply = np.float32((c11*c10-2*c01*c20)/(4*c20*c02-c11**2)) return deplx, deply, correl_crop pythran-0.10.0+ds2/pythran/tests/cases/convnet.py000066400000000000000000000031321416264035500217210ustar00rootroot00000000000000#pythran export convnet(float32[:,:], float32[:,:], int, int, int) #runas import numpy as np; x = np.arange(16., dtype=np.float32).reshape(4,4); y= np.arange(16., dtype=np.float32).reshape(4,4); convnet(x, y, 4,1,1) import numpy as np def sigmoid(z): return 1 / (1 + np.exp(-z)) def convnet(conv_matrix, qcnn_filter, length, batch_size, state_size): state_size_2 = state_size * 2 state_size_3 = state_size * 3 state_size_4 = state_size * 4 conv_results = np.dot(conv_matrix, qcnn_filter).reshape(length, batch_size, state_size_4) conv_results[:, :, :state_size] = np.tanh(conv_results[:, :, :state_size]) conv_results[:, :, state_size:state_size_4] = sigmoid(conv_results[:, :, state_size:state_size_4]) state_results = np.zeros((batch_size, length, state_size_2), dtype=np.float32) state = np.zeros((batch_size, state_size), dtype=np.float32) for i in range(length): z = conv_results[i, :, :state_size] f = conv_results[i, :, state_size:state_size_2] o = conv_results[i, :, state_size_2:state_size_3] state = f * state + (1 - f) * z state_results[:, i, :state_size] = state * o state = np.zeros((batch_size, state_size), dtype=np.float32) for i in range(length - 1, -1, -1): z = conv_results[i, :, :state_size] f = conv_results[i, :, state_size_3:state_size_4] o = conv_results[i, :, state_size_2:state_size_3] state = f * state + (1 - f) * z state_results[:, i, state_size:] = state * o state_results = state_results.reshape((batch_size * length, state_size_2)) return state_results pythran-0.10.0+ds2/pythran/tests/cases/count_perm_sig.py000066400000000000000000000013101416264035500232560ustar00rootroot00000000000000#from: https://www.youtube.com/watch?v=LBht4RO3-qs&list=UU5-umfrfqPvDvWCYHJGYtpA #runas count_perm_sig(1000, 3, 3) #pythran export count_perm_sig(int, int, int) import random as rd def count_perm_sig(n, s, k): rd.seed(s) myset = set() count = 0 permutation = [i for i in range(k)] for i in range(n): rd.shuffle(permutation) sig = signature(permutation) if sig not in myset: myset.add(sig) count += 1 return myset, count def signature(perm): i = perm.index(0) sig_1 = perm[i:]+perm[:i] sig_2 = sig_1[0:1]+sig_1[1:][::-1] if sig_1[1] < sig_2[1]: sig = sig_1 else: sig = sig_2 return tuple(sig) pythran-0.10.0+ds2/pythran/tests/cases/create_grid.py000066400000000000000000000007331416264035500225210ustar00rootroot00000000000000#from: http://stackoverflow.com/questions/13815719/creating-grid-with-numpy-performance #pythran export create_grid(float []) #runas import numpy as np ; N = 1000 ; x = np.arange(0,1,1./N) ; create_grid(x) #bench import numpy as np ; N = 1000 ; x = np.arange(0,1,1./N) ; create_grid(x) import numpy as np def create_grid(x): N = x.shape[0] z = np.zeros((N, N, 3)) z[:,:,0] = x.reshape(-1,1) z[:,:,1] = x fast_grid = z.reshape(N*N, 3) return fast_grid pythran-0.10.0+ds2/pythran/tests/cases/cronbach.py000066400000000000000000000011501416264035500220220ustar00rootroot00000000000000#from: http://stackoverflow.com/questions/20799403/improving-performance-of-cronbach-alpha-code-python-numpy #pythran export cronbach(float [][]) #runas import numpy as np ; N = 800 ; items = np.arange(N*N, dtype=float).reshape(N,N) ; cronbach(items) #bench import numpy as np ; N = 800 ; items = np.arange(N*N, dtype=float).reshape(N,N) ; cronbach(items) def cronbach(itemscores): itemvars = itemscores.var(1, None, None, 1)#(axis=1, ddof=1) tscores = itemscores.sum(0)#(axis=0) nitems = len(itemscores) return nitems / (nitems-1) * (1 - itemvars.sum() / tscores.var(None, None, None, 1))#(ddof=1)) pythran-0.10.0+ds2/pythran/tests/cases/crotg.py000066400000000000000000000014351416264035500213670ustar00rootroot00000000000000#pythran export CROTG(complex, complex, float, complex) #runas CROTG(complex(1,2),complex(5,6),3.4,complex(10,-3)) import math def CROTG(CA,CB,C=0,S=0): # .. Scalar Arguments .. # COMPLEX CA,CB,S # REAL C # .. # # Purpose # ======= # # CROTG determines a complex Givens rotation. # # ===================================================================== # # .. Local Scalars .. # COMPLEX ALPHA # REAL NORM,SCALE # .. if (abs(CA) == 0.): C = 0. S = complex(1.,0.) CA = CB else: SCALE = abs(CA) + abs(CB) NORM = SCALE*math.sqrt((abs(CA/SCALE))**2+ (abs(CB/SCALE))**2) ALPHA = CA/abs(CA) C = abs(CA)/NORM S = ALPHA*(CB.conjugate())/NORM CA = ALPHA*NORM return (CA,CB,C,S) pythran-0.10.0+ds2/pythran/tests/cases/d2q9_nxnyns.py000066400000000000000000000061371416264035500224510ustar00rootroot00000000000000#from loic gouarin #pythran export one_time_step(float64 [][][], float64 [][][]) #runas import numpy as np ; r = np.ones((50,50,9)); one_time_step(r,r) #bench import numpy as np ; r = np.ones((500,500,9)); one_time_step(r,r) import numpy as np def m2f_loc(m, f): c0 = 1./6 c1 = 1./9 c2 = 1./18 c3 = 1./36 c4 = 1./12 f[0] = c1*m[0] - c1*m[3] + c1*m[4] f[1] = c1*m[0] + c0*m[1] - c3*m[3] - c2*m[4] - c0*m[5] + 0.25*m[7] f[2] = c1*m[0] + c0*m[2] - c3*m[3] - c2*m[4] - c0*m[6] - 0.25*m[7] f[3] = c1*m[0] - c0*m[1] - c3*m[3] - c2*m[4] + c0*m[5] + 0.25*m[7] f[4] = c1*m[0] - c0*m[2] - c3*m[3] - c2*m[4] + c0*m[6] - 0.25*m[7] f[5] = c1*m[0] + c0*m[1] + c0*m[2] + c2*m[3] + c3*m[4] + c4*m[5] + c4*m[6] + 0.25*m[8] f[6] = c1*m[0] - c0*m[1] + c0*m[2] + c2*m[3] + c3*m[4] - c4*m[5] + c4*m[6] - 0.25*m[8] f[7] = c1*m[0] - c0*m[1] - c0*m[2] + c2*m[3] + c3*m[4] - c4*m[5] - c4*m[6] + 0.25*m[8] f[8] = c1*m[0] + c0*m[1] - c0*m[2] + c2*m[3] + c3*m[4] + c4*m[5] - c4*m[6] - 0.25*m[8] def f2m_loc(f, m): m[0] = f[0] + f[1] + f[2] + f[3] + f[4] + f[5] + f[6] + f[7] + f[8] m[1] = f[1] - f[3] + f[5] - f[6] - f[7] + f[8] m[2] = f[2] - f[4] + f[5] + f[6] - f[7] - f[8] m[3] = -4.*f[0] - f[1] - f[2] - f[3] - f[4] + 2.*f[5] + 2.*f[6] + 2.*f[7] + 2.*f[8] m[4] = 4.*f[0] - 2.*f[1] - 2.*f[2] - 2.*f[3] - 2.*f[4] + f[5] + f[6] + f[7] + f[8] m[5] = -2.*f[1] + 2.*f[3] + f[5] - f[6] - f[7] + f[8] m[6] = -2.*f[2] + 2.*f[4] + f[5] + f[6] - f[7] - f[8] m[7] = f[1] - f[2] + f[3] - f[4] m[8] = f[5] - f[6] + f[7] - f[8] def getf(f, floc, i, j): floc[0] = f[i, j, 0] floc[1] = f[i-1, j, 1] floc[2] = f[i, j-1, 2] floc[3] = f[i+1, j, 3] floc[4] = f[i, j+1, 4] floc[5] = f[i-1, j-1, 5] floc[6] = f[i+1, j-1, 6] floc[7] = f[i+1, j+1, 7] floc[8] = f[i-1, j+1, 8] def setf(f, floc, i, j): f[j, i, 0] = floc[0] f[j, i, 1] = floc[1] f[j, i, 2] = floc[2] f[j, i, 3] = floc[3] f[j, i, 4] = floc[4] f[j, i, 5] = floc[5] f[j, i, 6] = floc[6] f[j, i, 7] = floc[7] f[j, i, 8] = floc[8] def relaxation_loc(m): m[3] += 1.1312217194570136*(-2*m[0] + 3.0*m[1]*m[1] + 3.0*m[2]*m[2] - m[3]) m[4] += 1.1312217194570136*(m[0] + 1.5*m[1]*m[1] + 1.5*m[2]*m[2] - m[4]) m[5] += 1.1312217194570136*(-m[1] - m[5]) m[6] += 1.1312217194570136*(-m[2] - m[6]) m[7] += 1.8573551263001487*(m[1]*m[1] - m[2]*m[2] - m[7]) m[8] += 1.8573551263001487*(m[1]*m[2] - m[8]) def periodic_bc(f): nx, ny, ns = f.shape for j in range(ny): for k in range(ns): f[0, j, k] = f[nx-2, j, k] f[nx-1, j, k] = f[1, j, k] for i in range(nx): for k in range(ns): f[i, 0, k] = f[i, ny-2, k] f[i, ny-1, k] = f[i, 1, k] def one_time_step(f1, f2): nx, ny, ns = f1.shape floc = np.zeros(ns) mloc = np.zeros(ns) periodic_bc(f1) for i in range(1, nx-1): for j in range(1, ny-1): getf(f1, floc, i, j) f2m_loc(floc, mloc) relaxation_loc(mloc) m2f_loc(mloc, floc) setf(f2, floc, i, j) return f2 pythran-0.10.0+ds2/pythran/tests/cases/deriv.py000066400000000000000000000023621416264035500213620ustar00rootroot00000000000000#pythran export deriv(int, float, complex, complex list, complex list, complex list, complex list list, float) #runas deriv(3,4.5,complex(2,3),[complex(3,4),complex(1,2),complex(2,3),complex(5,6)],[complex(1,2),complex(2,3),complex(5,6),complex(3,4)],[complex(2,3),complex(3,4),complex(1,2),complex(5,6)],[[complex(2,3),complex(3,4),complex(1,2),complex(5,6)],[complex(2,3),complex(3,4),complex(1,2),complex(5,6)],[complex(2,3),complex(3,4),complex(1,2),complex(5,6)],[complex(2,3),complex(3,4),complex(1,2),complex(5,6)],[complex(2,3),complex(3,4),complex(1,2),complex(5,6)]],3.5) def deriv(n,sig,alp,dg,dh1,dh3,bin,nu): dh2=[complex(0,0) for _ in dh1] ci = complex(0.0,1.0) dh1[0]=complex(0.5,0)*ci*complex(sig,0) exp1 = complex(-0.5,0) dh2[0]=alp; exp2 = complex(-1.0,0) dh3[0]=-2.0*nu exp3 = complex(-1.0,0) for i in range(1,n): dh1[i]=dh1[i-1]*exp1 exp1=exp1-1.0 dh2[i]=dh2[i-1]*exp2 exp2=exp2-1.0 dh3[i]=-nu*dh3[i-1]*exp3 exp3=exp3-1.0 dg[0]=complex(1.0) dg[1]=dh1[0]+dh2[0]+dh3[0] for i in range(2,n+1): dg[i]=dh1[i-1]+dh2[i-1]+dh3[i-1] for j in range(1,i): dg[i]=dg[i]+bin[j-1][i-1]*(dh1[j-1]+dh2[j-1]+dh3[j-1])*dg[i-j] return dg pythran-0.10.0+ds2/pythran/tests/cases/descent.py000066400000000000000000000014021416264035500216700ustar00rootroot00000000000000#pythran export np_descent(float64[], float64[], float, int) #from https://realpython.com/numpy-tensorflow-performance/#using-tensorflow #runas import numpy as np; np.random.seed(444); N = 10000; sigma = 0.1; noise = sigma * np.random.randn(N); x = np.linspace(0, 2, N); d = 3 + 2 * x + noise; mu = 0.001; N_epochs = 10000; np_descent(x, d, mu, N_epochs) import itertools as it import numpy as np def np_descent(x, d, mu, N_epochs): N = len(x) f = 2 / N y = np.zeros(N) err = np.zeros(N) w = np.zeros(2) grad = np.empty(2) for _ in it.repeat(None, N_epochs): #np.subtract(d, y, out=err) err[:] = d - y grad[:] = f * np.sum(err), f * (np.dot(err, x)) w += mu * grad y = w[0] + w[1] * x return w pythran-0.10.0+ds2/pythran/tests/cases/deuxd_convolution.py000066400000000000000000000014561416264035500240240ustar00rootroot00000000000000#pythran export conv(float[][], float[][]) #runas import numpy as np ; x = np.tri(300,300)*0.5 ; w = np.tri(5,5)*0.25 ; conv(x,w) #bench import numpy as np ; x = np.tri(150,150)*0.5 ; w = np.tri(5,5)*0.25 ; conv(x,w) import numpy as np def clamp(i, offset, maxval): j = max(0, i + offset) return min(j, maxval) def reflect(pos, offset, bound): idx = pos+offset return min(2*(bound-1)-idx,max(idx,-idx)) def conv(x, weights): sx = x.shape sw = weights.shape result = np.zeros_like(x) for i in range(sx[0]): for j in range(sx[1]): for ii in range(sw[0]): for jj in range(sw[1]): idx = clamp(i, ii-sw[0]//2, sw[0]), clamp(j, jj-sw[0]//2, sw[0]) result[i, j] += x[idx] * weights[ii, jj] return result pythran-0.10.0+ds2/pythran/tests/cases/diffusion_numpy.py000066400000000000000000000011701416264035500234630ustar00rootroot00000000000000#pythran export diffuseNumpy(float [][], float [][], int) #runas import numpy as np;lx,ly=(2**7,2**7);u=np.zeros([lx,ly],dtype=np.double);u[lx//2,ly//2]=1000.0;tempU=np.zeros([lx,ly],dtype=np.double);diffuseNumpy(u,tempU,500) import numpy as np def diffuseNumpy(u, tempU, iterNum): """ Apply Numpy matrix for the Forward-Euler Approximation """ mu = .1 for n in range(iterNum): tempU[1:-1, 1:-1] = u[1:-1, 1:-1] + mu * ( u[2:, 1:-1] - 2 * u[1:-1, 1:-1] + u[0:-2, 1:-1] + u[1:-1, 2:] - 2 * u[1:-1, 1:-1] + u[1:-1, 0:-2]) u[:, :] = tempU[:, :] tempU[:, :] = 0.0 pythran-0.10.0+ds2/pythran/tests/cases/diffusion_pure_python.py000066400000000000000000000021361416264035500246720ustar00rootroot00000000000000# Reference: http://continuum.io/blog/the-python-and-the-complied-python #pythran export diffusePurePython(float [][], float [][], int) #runas import numpy as np;lx,ly=(2**7,2**7);u=np.zeros([lx,ly],dtype=np.double);u[int(lx/2),int(ly/2)]=1000.0;tempU=np.zeros([lx,ly],dtype=np.double);diffusePurePython(u,tempU,500) #bench import numpy as np;lx,ly=(2**6,2**6);u=np.zeros([lx,ly],dtype=np.double);u[int(lx/2),int(ly/2)]=1000.0;tempU=np.zeros([lx,ly],dtype=np.double);diffusePurePython(u,tempU,55) import numpy as np def diffusePurePython(u, tempU, iterNum): """ Apply nested iteration for the Forward-Euler Approximation """ mu = .1 row = u.shape[0] col = u.shape[1] for n in range(iterNum): for i in range(1, row - 1): for j in range(1, col - 1): tempU[i, j] = u[i, j] + mu * ( u[i + 1, j] - 2 * u[i, j] + u[i - 1, j] + u[i, j + 1] - 2 * u[i, j] + u[i, j - 1]) for i in range(1, row - 1): for j in range(1, col - 1): u[i, j] = tempU[i, j] tempU[i, j] = 0.0 pythran-0.10.0+ds2/pythran/tests/cases/emin.py000066400000000000000000000030271416264035500212000ustar00rootroot00000000000000#from https://gist.github.com/andersx/6061586 #runas run() #bench run() #pythran export run() # A simple energy minimization program that uses steepest descent # and a force field to minimize the energy of water in internal coordinates. # Written by Jan H. Jensen, 2013 def Eandg(rOH,thetaHOH): """" Arguments: (internal coordinates of the water molecule) rOH O-H bond distance thetaHOH H-O-H bond angle Returns: E Molecular force field energy grOH O-H bond stretch gradient grthetaHOH H-O-H bond angle bend gradient Force field parameters: kOH Harmonic force constant, O-H bond strech rOHe Equilibrium distance, O-H kHOH Harmonic angle bend force constant, H-O-H angle bend thetaHOHe Equilibrium angle, H-O-H """ kOH = 50.0 rOHe = 0.95 kHOH = 50.0 thetaHOHe = 104.5 E = 2 * kOH * (rOH - rOHe)**2 + kHOH * (thetaHOH - thetaHOHe)**2 grOH = 2 * kOH * (rOH - rOHe) grthetaHOH = 2 * kHOH * (thetaHOH - thetaHOHe) return (E, grOH, grthetaHOH) def run(): c = 0.005 n_steps = 1000000 #starting geometry rOH = 10.0 thetaHOH = 180.0 for i in range(n_steps): (E,grOH,gthetaHOH) = Eandg(rOH,thetaHOH) if (abs(grOH) >0.001/c or abs(gthetaHOH) > 0.01/c ): rOH = rOH - c*grOH thetaHOH = thetaHOH - c*gthetaHOH converged = (abs(grOH) >0.001/c or abs(gthetaHOH) > 0.01/c ) return converged, E,rOH,thetaHOH pythran-0.10.0+ds2/pythran/tests/cases/empirical.py000066400000000000000000000012461416264035500222160ustar00rootroot00000000000000#from https://github.com/serge-sans-paille/pythran/issues/1229 #runas import numpy as np; x = np.arange(3., 10.); empirical(x, 3., .5) import numpy as np #pythran export empirical(float[:], float, float) def empirical(ds, alpha, x): sds = np.sort(ds) ds_to_the_alpha = sds**alpha fractions = ds_to_the_alpha #/ sum (ds_to_the_alpha) thresholds = np.cumsum(fractions) thresholds /= thresholds[-1] i = find_first (thresholds, lambda u: x < u) return i #pthran export find_first(float[:], bool (float)) def find_first (seq, pred): for i,x in enumerate (seq): print(i, x, pred(x)) if pred(x): return i return None pythran-0.10.0+ds2/pythran/tests/cases/euclidean_distance_square.py000066400000000000000000000007531416264035500254360ustar00rootroot00000000000000#from: https://stackoverflow.com/questions/50658884/why-this-numba-code-is-6x-slower-than-numpy-code #runas import numpy as np; x1 = np.array([[1.,2,3]]) ; x2 = np.array([[1.,2,3],[2,3,4], [3,4,5], [4,5,6], [5,6,7]]); euclidean_distance_square(x1, x2) #pythran export euclidean_distance_square(float64[1,:], float64[:,:]) import numpy as np def euclidean_distance_square(x1, x2): return -2*np.dot(x1, x2.T) + np.sum(np.square(x1), axis=1)[:, np.newaxis] + np.sum(np.square(x2), axis=1) pythran-0.10.0+ds2/pythran/tests/cases/euler_challenge13.py000066400000000000000000000011441416264035500235300ustar00rootroot00000000000000""" Euler chalenge number 13. """ # pythran export solve(int) # runas solve(0) def solve(v): """ Sum big numbers and get first digits. """ # no gmp support, pruning table # int32 support: trunc digits t = ( 37107, #2875339021027, 46376, #9376774900097, 74324, #9861995247410, 91942, #2133635741615, 23067, #5882075393461, 89261, #6706966236338, 28112, #8798128499794, 44274, #2289174325203, 47451, #4457360013064, 70386, #4861058430254, ) # prevent constant evaluation return str(sum(t) + v)[0:10] pythran-0.10.0+ds2/pythran/tests/cases/euler_challenge14.py000066400000000000000000000017611416264035500235360ustar00rootroot00000000000000#!/usr/bin/env python # taken from http://www.ripton.net/blog/?p=51 #pythran export euler14(int) #runas euler14(1000) #bench euler14(650000) """Project Euler, problem 14 The following iterative sequence is defined for the set of positive integers: n -> n / 2 (n is even) n -> 3n + 1 (n is odd) Which starting number, under one million, produces the longest chain? """ def next_num(num): if num & 1: return 3 * num + 1 else: return num // 2 def series_length(num, lengths): if num in lengths: return lengths[num] else: num2 = next_num(num) result = 1 + series_length(num2, lengths) lengths[num] = result return result def euler14(MAX_NUM): num_with_max_length = 1 max_length = 0 lengths = {1: 0} for ii in range(1, MAX_NUM): length = series_length(ii, lengths) if length > max_length: max_length = length num_with_max_length = ii return num_with_max_length, max_length pythran-0.10.0+ds2/pythran/tests/cases/extrema.py000066400000000000000000000017241416264035500217170ustar00rootroot00000000000000#runas run_extrema(10,[1.2,3.4,5.6,7.8,9.0,2.1,4.3,5.4,6.5,7.8]) #bench import random; n=3000000; a = [random.random() for i in range(n)]; run_extrema(n, a) #pythran export run_extrema(int, float list) from functools import reduce def extrema_op(a, b): a_min_idx, a_min_val, a_max_idx, a_max_val = a b_min_idx, b_min_val, b_max_idx, b_max_val = b if a_min_val < b_min_val: if a_max_val > b_max_val: return a else: return a_min_idx, a_min_val, b_max_idx, b_max_val else: if a_max_val > b_max_val: return b_min_idx, b_min_val, a_max_idx, a_max_val else: return b def extrema_id(x): return -1, 1., 1, 0. def indices(A): return range(len(A)) def extrema(x, x_id): return reduce(extrema_op, zip(indices(x), x, indices(x), x), x_id) def run_extrema(n,a): #import random #a = [random.random() for i in range(n)] a_id = extrema_id(0.) return extrema(a, a_id) pythran-0.10.0+ds2/pythran/tests/cases/factorize_naive.py000066400000000000000000000014201416264035500234130ustar00rootroot00000000000000#taken from http://eli.thegreenplace.net/2012/01/16/python-parallelizing-cpu-bound-tasks-with-multiprocessing/ #pythran export factorize_naive(int) #runas factorize_naive(12222) def factorize_naive(n): """ A naive factorization method. Take integer 'n', return list of factors. """ if n < 2: return [] factors = [] p = 2 while True: if n == 1: return factors r = n % p if r == 0: factors.append(p) n = n / p elif p * p >= n: factors.append(n) return factors elif p > 2: # Advance in steps of 2 over odd numbers p += 2 else: # If p == 2, get to 3 p += 1 assert False, "unreachable" pythran-0.10.0+ds2/pythran/tests/cases/fannkuch.py000066400000000000000000000021441416264035500220440ustar00rootroot00000000000000#imported from https://bitbucket.org/pypy/benchmarks/src/846fa56a282b0e8716309f891553e0af542d8800/own/fannkuch.py?at=default # the export line is in fannkuch.pythran #runas fannkuch(9);fannkuch2(9) #bench fannkuch(9) def fannkuch(n): count = list(range(1, n+1)) max_flips = 0 m = n-1 r = n check = 0 perm1 = list(range(n)) perm = list(range(n)) while 1: if check < 30: #print("".join(str(i+1) for i in perm1)) check += 1 while r != 1: count[r-1] = r r -= 1 if perm1[0] != 0 and perm1[m] != m: perm = perm1[:] flips_count = 0 k = perm[0] while k: perm[:k+1] = perm[k::-1] flips_count += 1 k = perm[0] if flips_count > max_flips: max_flips = flips_count while r != n: perm1.insert(r, perm1.pop(0)) count[r] -= 1 if count[r] > 0: break r += 1 else: return max_flips def fannkuch2(n): fannkuch(n) pythran-0.10.0+ds2/pythran/tests/cases/fannkuch.pythran000066400000000000000000000001301416264035500230720ustar00rootroot00000000000000# this is a comment export fannkuch(int) # this is another comment export fannkuch2(int)pythran-0.10.0+ds2/pythran/tests/cases/fbcorr.py000066400000000000000000000031661416264035500215310ustar00rootroot00000000000000# from https://github.com/numba/numba/blob/master/examples/fbcorr.py #pythran export fbcorr(float list list list list, float list list list list) #pythran export fbcorr(int list list list list, int list list list list) #runas imgs = [ [ [ [ i+j+k for i in range(3) ] for j in range(16) ] for j in range(16) ] for k in range(16) ]; filters = [ [ [ [ i+2*j-k for i in range(3) ] for j in range(5) ] for j in range(5) ] for k in range(6) ] ; fbcorr(imgs, filters) #bench imgs = [ [ [ [ i+j+k for i in range(11) ] for j in range(16) ] for j in range(16) ] for k in range(16) ]; filters = [ [ [ [ i+2*j-k for i in range(11) ] for j in range(5) ] for j in range(5) ] for k in range(6) ] ; fbcorr(imgs, filters) def fbcorr(imgs, filters): n_imgs, n_rows, n_cols, n_channels = (len(imgs), len(imgs[0]), len(imgs[0][0]), len(imgs[0][0][0])) n_filters, height, width, n_ch2 = (len(filters), len(filters[0]), len(filters[0][0]), len(filters[0][0][0])) output = [ [ [ [ 0 for i in range(n_cols - width + 1) ] for j in range(n_rows - height + 1) ] for k in range(n_filters) ] for l in range(n_imgs) ] for ii in range(n_imgs): for rr in range(n_rows - height + 1): for cc in range(n_cols - width + 1): for hh in range(height): for ww in range(width): for jj in range(n_channels): for ff in range(n_filters): imgval = imgs[ii][rr + hh][cc + ww][jj] filterval = filters[ff][hh][ww][jj] output[ii][ff][rr][cc] += imgval * filterval return output pythran-0.10.0+ds2/pythran/tests/cases/fbcorr_numpy.py000066400000000000000000000020071416264035500227520ustar00rootroot00000000000000""" This file demonstrates a filterbank correlation loop. """ #pythran export fbcorr(float[][][][], float[][][][], float[][][][]) #bench import numpy; in_ = numpy.arange(10*20*30*7.).reshape(10,20,30,7); filter = numpy.arange(2*3*4*7.).reshape(2,3,4,7); out = numpy.empty((10,2,18,27), dtype=numpy.float); fbcorr(in_, filter, out) def fbcorr(imgs, filters, output): n_imgs, n_rows, n_cols, n_channels = imgs.shape n_filters, height, width, n_ch2 = filters.shape "omp parallel for" for ii in range(n_imgs): for rr in range(n_rows - height + 1): for cc in range(n_cols - width + 1): for hh in range(height): for ww in range(width): for jj in range(n_channels): for ff in range(n_filters): imgval = imgs[ii, rr + hh, cc + ww, jj] filterval = filters[ff, hh, ww, jj] output[ii, ff, rr, cc] += imgval * filterval pythran-0.10.0+ds2/pythran/tests/cases/fdtd.py000066400000000000000000000021231416264035500211650ustar00rootroot00000000000000#from http://stackoverflow.com/questions/19367488/converting-function-to-numbapro-cuda #pythran export fdtd(float[][], int) #runas import numpy ; a = numpy.ones((1000,1000)); fdtd(a,20) #bench import numpy ; a = numpy.arange(10000.).reshape(100,100); fdtd(a,25) import numpy as np def fdtd(input_grid, steps): grid = input_grid.copy() old_grid = np.zeros_like(input_grid) previous_grid = np.zeros_like(input_grid) l_x = grid.shape[0] l_y = grid.shape[1] for i in range(steps): np.copyto(previous_grid, old_grid) np.copyto(old_grid, grid) for x in range(l_x): for y in range(l_y): grid[x,y] = 0.0 if 0 < x+1 < l_x: grid[x,y] += old_grid[x+1,y] if 0 < x-1 < l_x: grid[x,y] += old_grid[x-1,y] if 0 < y+1 < l_y: grid[x,y] += old_grid[x,y+1] if 0 < y-1 < l_y: grid[x,y] += old_grid[x,y-1] grid[x,y] /= 2.0 grid[x,y] -= previous_grid[x,y] return grid pythran-0.10.0+ds2/pythran/tests/cases/fft.py000066400000000000000000000007311416264035500210260ustar00rootroot00000000000000#pythran export fft(complex []) #runas from numpy import ones ; a = ones(2**10, dtype=complex) ; fft(a) #bench from numpy import ones ; a = ones(2**14, dtype=complex) ; fft(a) import math, numpy as np def fft(x): N = x.shape[0] if N == 1: return np.array(x) e=fft(x[::2]) o=fft(x[1::2]) M=N//2 l=[ e[k] + o[k]*math.e**(-2j*math.pi*k/N) for k in range(M) ] r=[ e[k] - o[k]*math.e**(-2j*math.pi*k/N) for k in range(M) ] return np.array(l+r) pythran-0.10.0+ds2/pythran/tests/cases/fibo.py000066400000000000000000000007121416264035500211650ustar00rootroot00000000000000#pythran export test(int) #runas test(12) #bench test(33) def rfibo(n): if n < 2: return n else: n_1 = rfibo(n-1) n_2 = rfibo(n-2) return n_1 + n_2 def fibo(n): if n < 10: return rfibo(n) else: n_1 = 0 "omp task shared(n,n_1)" n_1 = fibo(n-1) n_2 = fibo(n-2) "omp taskwait" return n_1 + n_2 def test(n): "omp parallel" "omp single" f = fibo(n) return f pythran-0.10.0+ds2/pythran/tests/cases/fibo_seq.py000066400000000000000000000003201416264035500220300ustar00rootroot00000000000000""" Nom recursive version of fibo. """ # pythran export fibo(int) # runas fibo(7) def fibo(n): """ fibonaccie compuation. """ a, b = 1, 1 for _ in range(n): a, b = a + b, a return a pythran-0.10.0+ds2/pythran/tests/cases/frequent_itemsets.py000066400000000000000000000033321416264035500240150ustar00rootroot00000000000000#pythran export frequent_itemsets(str list list) #runas frequent_itemsets([["er", "re", "er"], ["er", "rre", "eeer"], ["e"], ["er"]*8]) import itertools def frequent_itemsets(sentences): # Counts sets with Apriori algorithm. SUPP_THRESHOLD = 100 supps = [] supp = {} for sentence in sentences: for key in sentence: if key in supp: supp[key] += 1 else: supp[key] = 1 print("|C1| = " + str(len(supp))) supps.append({k:v for k,v in supp.items() if v >= SUPP_THRESHOLD}) print("|L1| = " + str(len(supps[0]))) supp = {} for sentence in sentences: for combination in itertools.combinations(sentence, 2): if combination[0] in supps[0] and combination[1] in supps[0]: key = ','.join(combination) if key in supp: supp[key] += 1 else: supp[key] = 1 print("|C2| = " + str(len(supp))) supps.append({k:v for k,v in supp.items() if v >= SUPP_THRESHOLD}) print("|L2| = " + str(len(supps[1]))) supp = {} for sentence in sentences: for combination in itertools.combinations(sentence, 3): if (combination[0]+','+combination[1] in supps[1] and combination[0]+','+combination[2] in supps[1] and combination[1]+','+combination[2] in supps[1]): key = ','.join(combination) if key in supp: supp[key] += 1 else: supp[key] = 1 print("|C3| = " + str(len(supp))) supps.append({k:v for k,v in supp.items() if v >= SUPP_THRESHOLD}) print("|L3| = " + str(len(supps[2]))) return supps pythran-0.10.0+ds2/pythran/tests/cases/gauss.py000066400000000000000000000023121416264035500213660ustar00rootroot00000000000000#nopythran export gauss(int, complex list list, complex list) #pythran export gauss(int, float list list, float list) #runas gauss(4,[[10.0,-6.0,3.5,3.2],[6.7,2.8,-.65,1.2],[9.2,3.0,5.4,1.3],[1.6,8.3,2.5,5.2]],[33.4,4.5,-5.4,-13.4]) def pivot(n,i,a,b): i0=i amp0=abs(a[i-1][i-1]) for j in range(i+1,n+1): amp=abs(a[i-1][j-1]) if amp>amp0: i0=j amp0=amp if i==i0: return temp=b[i-1] b[i-1]=b[i0-1]; b[i0-1]=temp; for j in range(i,n+1): temp=a[j-1][i-1] a[j-1][i-1]=a[j-1][i0-1] a[j-1][i0-1]=temp def gauss(n,a,b): # Downward elimination. for i in range(1,n+1): if i idx: return 0 else: return idx - radius def window_ceil(idx, ceil, radius): if idx + radius > ceil: return ceil else: return idx + radius def python_kernel(image, state, state_next, window_radius): changes = 0 sqrt_3 = math.sqrt(3.0) height = image.shape[0] width = image.shape[1] for j in range(width): for i in range(height): winning_colony = state[i, j, 0] defense_strength = state[i, j, 1] for jj in range(window_floor(j, window_radius), window_ceil(j+1, width, window_radius)): for ii in range(window_floor(i, window_radius), window_ceil(i+1, height, window_radius)): if (ii == i and jj == j): continue d = image[i, j, 0] - image[ii, jj, 0] s = d * d for k in range(1, 3): d = image[i, j, k] - image[ii, jj, k] s += d * d gval = 1.0 - math.sqrt(s)/sqrt_3 attack_strength = gval * state[ii, jj, 1] if attack_strength > defense_strength: defense_strength = attack_strength winning_colony = state[ii, jj, 0] changes += 1 state_next[i, j, 0] = winning_colony state_next[i, j, 1] = defense_strength return changes #pythran export test(int) def test(N): image = np.zeros((N, N, 3)) state = np.zeros((N, N, 2)) state_next = np.empty_like(state) # colony 1 is strength 1 at position 0,0 # colony 0 is strength 0 at all other positions state[0, 0, 0] = 1 state[0, 0, 1] = 1 return python_kernel(image, state, state_next, 10) pythran-0.10.0+ds2/pythran/tests/cases/guerre.py000066400000000000000000000027231416264035500215430ustar00rootroot00000000000000#pythran export guerre(complex list, int, complex, float, int) #runas guerre([complex(1,2),complex(3,4),complex(5,6),complex(7,8)],2,complex(5.6,4.3),-3.4,20) #bench guerre([complex(1,2),complex(3,4),complex(5,6),complex(7,8)],2,complex(5.6,4.3),-3.4,400000) def guerre(a,n,z,err,nter): az = [complex(0,0) for i in range(50)] azz = [complex(0,0) for i in range(50)] ci=complex(0.0,1.0) eps=1.0e-20 # The coefficients of p'[z] and p''[z]. for i in range(1,n+1): az[i-1]=float(i)*a[i] for i in range(1,n): azz[i-1]=float(i)*az[i] dz=err+1 itera=0 jter=0 while abs(dz)>err and iteraamp2: dz=float(-n)/(f+h) else: dz=float(-n)/(f-h) itera=itera+1 # Rotate by 90 degrees to avoid limit cycles. jter=jter+1 if jter==10: jter=1 dz=dz*ci z=z+dz if jter==100: raise RuntimeError("Laguerre method not converging") return z pythran-0.10.0+ds2/pythran/tests/cases/harris.py000077500000000000000000000013531416264035500215430ustar00rootroot00000000000000#from parakeet testbed #runas import numpy as np ; M, N = 4, 6 ; I = np.arange(M*N, dtype=np.float64).reshape(M,N) ; harris(I) #bench import numpy as np ; M, N = 6000, 4000 ; I = np.arange(M*N, dtype=np.float64).reshape(M,N) ; harris(I) #pythran export harris(float64[][]) import numpy as np def harris(I): m,n = I.shape dx = (I[1:, :] - I[:m-1, :])[:, 1:] dy = (I[:, 1:] - I[:, :n-1])[1:, :] # # At each point we build a matrix # of derivative products # M = # | A = dx^2 C = dx * dy | # | C = dy * dx B = dy * dy | # # and the score at that point is: # det(M) - k*trace(M)^2 # A = dx * dx B = dy * dy C = dx * dy tr = A + B det = A * B - C * C k = 0.05 return det - k * tr * tr pythran-0.10.0+ds2/pythran/tests/cases/hasting.py000066400000000000000000000007371416264035500217120ustar00rootroot00000000000000#from http://wiki.scipy.org/Cookbook/Theoretical_Ecology/Hastings_and_Powell #pythran export fweb(float [], float, float, float, float, float, float, float) import numpy as np def fweb(y, t, a1, a2, b1, b2, d1, d2): yprime = np.empty((3,)) yprime[0] = y[0] * (1. - y[0]) - a1*y[0]*y[1]/(1. + b1 * y[0]) yprime[1] = a1*y[0]*y[1] / (1. + b1 * y[0]) - a2 * y[1]*y[2] / (1. + b2 * y[1]) - d1 * y[1] yprime[2] = a2*y[1]*y[2]/(1. + b2*y[1]) - d2*y[2] return yprime pythran-0.10.0+ds2/pythran/tests/cases/histogram.py000066400000000000000000000007661416264035500222540ustar00rootroot00000000000000#pythran export histogram(float list, int) #runas histogram([ (i*1.1+j*2.3)%10 for i in range(100) for j in range(100) ],10) #bench histogram([ (i*1.1+j*2.3)%10 for i in range(1000) for j in range(2000) ],10) def histogram(data, bin_width): lower_bound, upper_bound = min(data), max(data) out_data=[0]*(1+bin_width) for i in data: out_data[ int(bin_width * (i - lower_bound) / ( upper_bound - lower_bound)) ]+=1 out_data[-2]+=out_data[-1] out_data.pop() return out_data pythran-0.10.0+ds2/pythran/tests/cases/hyantes_core.py000066400000000000000000000014701416264035500227330ustar00rootroot00000000000000#pythran export run(float, float, float, float, float, float, int, int, float list list) #bench run(0.,0.,90.,90., 1., 100., 80, 80, [ [i/10., i/10., i/20.] for i in range(160) ]) #runas run(0.,0.,90.,90., 1., 100., 80, 80, [ [i/10., i/10., i/20.] for i in range(80) ]) import math def run(xmin, ymin, xmax, ymax, step, range_, range_x, range_y, t): pt = [ [0]*range_y for _ in range(range_x)] "omp parallel for" for i in range(range_x): for j in range(range_y): s = 0 for k in t: tmp = 6368.* math.acos( math.cos(xmin+step*i)*math.cos( k[0] ) * math.cos((ymin+step*j)-k[1])+ math.sin(xmin+step*i)*math.sin(k[0])) if tmp < range_: s+=k[2] / (1+tmp) pt[i][j] = s return pt pythran-0.10.0+ds2/pythran/tests/cases/hyantes_core_numpy.py000066400000000000000000000017561416264035500241720ustar00rootroot00000000000000# unittest.skip Incorrect values computed on windows ... # pythran export run(float, float, float, float, float, float, int, int, float[][]) # bench import numpy ; run(0,0,90,90, 1, 100, 80, 80, numpy.array([ [i/10., i/10., i/20.] for i in range(160)],dtype=numpy.double)) # runas import numpy ; run(0,0,90,90, 1, 100, 80, 80, numpy.array([ [i/10., i/10., i/20.] for i in range(80)],dtype=numpy.double)) import numpy as np def run(xmin, ymin, xmax, ymax, step, range_, range_x, range_y, t): X, Y = t.shape pt = np.zeros((X, Y)) "omp parallel for" for i in range(X): for j in range(Y): for k in t: tmp = 6368. * np.arccos(np.cos(xmin + step * i) * np.cos(k[0]) * np.cos((ymin + step * j) - k[1]) + np.sin(xmin + step * i) * np.sin(k[0])) if tmp < range_: pt[i, j] += k[2] / (1+tmp) return pt pythran-0.10.0+ds2/pythran/tests/cases/insertion_sort.py000066400000000000000000000006521416264035500233320ustar00rootroot00000000000000#pythran export insertion_sort(float list) #runas insertion_sort([1.3,5.6,-34.4,34.4,32,1.2,0,0.0,3.4,1.3]) #bench import random; in_ = random.sample(range(10000000), 6000) + [4.5]; insertion_sort(in_) def insertion_sort(list2): for i in range(1, len(list2)): save = list2[i] j = i while j > 0 and list2[j - 1] > save: list2[j] = list2[j - 1] j -= 1 list2[j] = save pythran-0.10.0+ds2/pythran/tests/cases/ising.py000066400000000000000000000016521416264035500213630ustar00rootroot00000000000000#runas import numpy as np; x = np.array([[1,0],[0, 1]], dtype=np.intp); ising_step(x) #pythran export ising_step(intp[:,:]) import numpy as np def ising_step(field, beta=0.4): N, M = field.shape for n_offset in range(2): for m_offset in range(2): for n in range(n_offset, N, 2): for m in range(m_offset, M, 2): _ising_update(field, n, m, np.float32(beta)) return field def clamp(v, n): if v < 0: return v + n elif v < n: return v else: return v - n def _ising_update(field, n, m, beta): total = 0 N, M = field.shape for i in range(n-1, n+2): for j in range(m-1, m+2): if i == n and j == m: continue total += field[clamp(i, N), clamp(j, M)] dE = 2 * field[n, m] * total if dE <= 0: field[n, m] *= -1 elif np.exp(-dE * beta) > 0.5: #np.random.rand(): field[n, m] *= -1 pythran-0.10.0+ds2/pythran/tests/cases/julia_pure_python.py000066400000000000000000000020251416264035500240050ustar00rootroot00000000000000# --- Python / Numpy imports ------------------------------------------------- import numpy as np from time import time #pythran export compute_julia(float, float, int, float?, float?, int?) def kernel(zr, zi, cr, ci, lim, cutoff): ''' Computes the number of iterations `n` such that |z_n| > `lim`, where `z_n = z_{n-1}**2 + c`. ''' count = 0 while ((zr*zr + zi*zi) < (lim*lim)) and count < cutoff: zr, zi = zr * zr - zi * zi + cr, 2 * zr * zi + ci count += 1 return count def compute_julia(cr, ci, N, bound=1.5, lim=1000., cutoff=1e6): ''' Pure Python calculation of the Julia set for a given `c`. No NumPy array operations are used. ''' julia = np.empty((N, N), np.uint32) grid_x = np.linspace(-bound, bound, N) t0 = time() "omp parallel for default(none) shared(grid_x, cr, ci, lim, cutoff, julia)" for i, x in enumerate(grid_x): for j, y in enumerate(grid_x): julia[i,j] = kernel(x, y, cr, ci, lim, cutoff) return julia, time() - t0 pythran-0.10.0+ds2/pythran/tests/cases/kmeans.py000066400000000000000000000036651416264035500215360ustar00rootroot00000000000000#pythran export test() #norunas test() because of random input #bench test() import math, random from functools import reduce # a point is a tuple # a cluster is a list of tuple and a point (the centroid) def calculateCentroid(cluster): reduce_coord = lambda i: reduce(lambda x,p : x + p[i], cluster,0.0) centroid_coords = [reduce_coord(i)/len(cluster) for i in range(len(cluster[0]))] return centroid_coords def kmeans(points, k, cutoff): initial = random.sample(points, k) clusters = [[p] for p in initial] centroids = [ calculateCentroid(c) for c in clusters ] while True: lists = [ [] for c in clusters] for p in points: smallest_distance = getDistance(p,centroids[0]) index = 0 for i in range(len(clusters[1:])): distance = getDistance(p, centroids[i+1]) if distance < smallest_distance: smallest_distance = distance index = i+1 lists[index].append(p) biggest_shift = 0.0 for i in range(len(clusters)): if lists[i]: new_cluster, new_centroid = (lists[i], calculateCentroid(lists[i])) shift = getDistance(centroids[i], new_centroid) clusters[i] = new_cluster centroids[i] = new_centroid biggest_shift = max(biggest_shift, shift) if biggest_shift < cutoff: break return clusters def getDistance(a, b): ret = reduce(lambda x,y: x + pow((a[y]-b[y]), 2),range(len(a)),0.0) return math.sqrt(ret) def makeRandomPoint(n, lower, upper): return [random.uniform(lower, upper) for i in range(n)] def test(): num_points, dim, k, cutoff, lower, upper = 500, 10, 50, 0.001, 0, 2000 points = [ makeRandomPoint(dim, lower, upper) for i in range(num_points) ] clusters = kmeans(points, k, cutoff) #for c in clusters: # print(c) return clusters pythran-0.10.0+ds2/pythran/tests/cases/l2norm.py000066400000000000000000000006741416264035500214660ustar00rootroot00000000000000#from http://stackoverflow.com/questions/7741878/how-to-apply-numpy-linalg-norm-to-each-row-of-a-matrix/7741976#7741976 #pythran export l2_norm(float64[][]) #runas import numpy as np ; N = 100 ; x = np.arange(N*N, dtype=np.float64).reshape((N,N)) ; l2_norm(x) #bench import numpy as np ; N = 10000 ; x = np.arange(N*N, dtype=np.float64).reshape((N,N)) ; l2_norm(x) import numpy as np def l2_norm(x): return np.sqrt(np.sum(np.abs(x)**2, 1)) pythran-0.10.0+ds2/pythran/tests/cases/lap.py000066400000000000000000000015101416264035500210170ustar00rootroot00000000000000#pythran export laplacian(float [], float) #runas import numpy as np; from lap import laplacian; var = np.arange(100, dtype=float) ; dh2=.01 ; laplacian(var, dh2) #from http://stackoverflow.com/questions/32485935/cythonize-a-partial-differential-equation-integrator import numpy def laplacian(var, dh2): """ (1D array, dx^2) -> laplacian(1D array) periodic_laplacian_1D_4th_order Implementing the 4th order 1D laplacian with periodic condition """ lap = numpy.zeros_like(var) lap[1:] = (4.0/3.0)*var[:-1] lap[0] = (4.0/3.0)*var[1] lap[:-1] += (4.0/3.0)*var[1:] lap[-1] += (4.0/3.0)*var[0] lap += (-5.0/2.0)*var lap[2:] += (-1.0/12.0)*var[:-2] lap[:2] += (-1.0/12.0)*var[-2:] lap[:-2] += (-1.0/12.0)*var[2:] lap[-2:] += (-1.0/12.0)*var[:2] return lap / dh2 pythran-0.10.0+ds2/pythran/tests/cases/lapl2d.py000066400000000000000000000006541416264035500214310ustar00rootroot00000000000000 #pythran export lapl2d(float[,],float[,],int) #runas import numpy as np; x = np.arange(16.).reshape(4,4); lapl2d(x, x.T, 3) def lapl2d(In,Out,niter): siz=In.shape[0] h2= (1./siz)**2 for it in range(0,niter): Out[1:siz-1,1:siz-1]= h2*( In[0:siz-2,1:siz-1 ] + In[1:siz-1,0:siz-2]- 4.0*In[1:siz-1,1:siz-1]+ In[2:siz,1:siz-1]+In[1:siz-1,2:siz]) In,Out=Out,In pythran-0.10.0+ds2/pythran/tests/cases/lapl3d.py000066400000000000000000000007131416264035500214260ustar00rootroot00000000000000#runas: import numpy as np ; N = 500 ; X = np.random.randn(N,N,3); laplacien(X) #pythran export laplacien(float64[][][3]) import numpy as np def laplacien(image): out_image = np.abs(4*image[1:-1,1:-1] - image[0:-2,1:-1] - image[2:,1:-1] - image[1:-1,0:-2] - image[1:-1,2:]) valmax = np.max(out_image) valmax = max(1.,valmax)+1.E-9 out_image /= valmax return out_image pythran-0.10.0+ds2/pythran/tests/cases/laplace.py000066400000000000000000000007641416264035500216560ustar00rootroot00000000000000#runas calc(60,100) #bench calc(120,200) #pythran export calc(int, int) def update(u): dx = 0.1 dy = 0.1 dx2 = dx*dx dy2 = dy*dy nx, ny = len(u), len(u[0]) for i in range(1,nx-1): for j in range(1, ny-1): u[i][j] = ((u[i+1][ j] + u[i-1][ j]) * dy2 + (u[i][ j+1] + u[i][ j-1]) * dx2) / (2*(dx2+dy2)) def calc(N, Niter=100): u = [ [0]*N for _ in range(N)] u[0] = [1] * N for i in range(Niter): update(u) return u pythran-0.10.0+ds2/pythran/tests/cases/ldpc_decoder.py000066400000000000000000000067441416264035500226700ustar00rootroot00000000000000# coding: utf-8 import numpy as np from math import log, tanh #pythran export phi0(float) def phi0(x): x = abs(x) if (x < 9.08e-5 ): return( 10 ); else: return -log (tanh (x/2)) #pythran export G(float[:]) def G(Lq): X = sum (phi0(e) for e in Lq) s = np.prod(np.sign(Lq)) return s * phi0(X) #pythran export BinaryProduct(int[:,:], int[:]) def BinaryProduct(X,Y): """ Binary Matrices or Matrix-vector product in Z/2Z. Works with scipy.sparse.csr_matrix matrices X,Y too.""" A = X.dot(Y) return A%2 #pythran export InCode(int[:,:], int[:]) def InCode(H,x): """ Computes Binary Product of H and x. If product is null, x is in the code. Returns appartenance boolean. """ return (BinaryProduct(H,x)==0).all() #pythran export Decoding_logBP(int[:,:], int list list, int list list, float[:,:], float[:], int) def Decoding_logBP(H,Bits,Nodes,Lq,Lc,max_iter=1): """ Decoding function using Belief Propagation algorithm (logarithmic version) IMPORTANT: if H is large (n>1000), H should be scipy.sparse.csr_matrix object to speed up calculations (highly recommanded. ) ----------------------------------- Parameters: H: 2D-array (OR scipy.sparse.csr_matrix object) Parity check matrix, shape = (m,n) y: n-vector recieved after transmission in the channel. (In general, returned by Coding Function) Signal-Noise Ratio: SNR = 10log(1/variance) in decibels of the AWGN used in coding. max_iter: (default = 1) max iterations of the main loop. Increase if decoding is not error-free. """ m,n=H.shape if not len(Lc)==n: raise ValueError('La taille de y doit correspondre au nombre de colonnes de H') if m>=n: raise ValueError('H doit avoir plus de colonnes que de lignes') # var = 10**(-SNR/10) # ### ETAPE 0: initialisation # Lc = 2*y/var # Lq=np.zeros(shape=(m,n)) Lr = np.zeros(shape=(m,n)) count=0 # prod=np.prod # tanh = np.tanh # log = np.log Lq += Lc #Bits,Nodes = BitsAndNodes(H) while(True): count+=1 #Compteur qui empêche la boucle d'être infinie .. #### ETAPE 1 : Horizontale for i in range(m): Ni = Bits[i] for j in Ni: Nij = list(Ni) Nij.remove(j) # X = sum (phi0(e) for e in Lq[i,Nij]) # s = prod(np.sign(Lq[i,Nij])) # Lr[i,j] = s * phi0(X) Lr[i,j] = G(Lq[i][Nij]) Lr = np.clip (Lr, -100, 100) #### ETAPE 2 : Verticale for j in range(n): Mj = Nodes[j] for i in Mj: Mji = list(Mj) Mji.remove(i) Lq[i,j] = Lc[j]+sum(Lr[Mji][:,j]) #### LLR a posteriori: # L_posteriori = np.zeros(n) # for j in range(n): # Mj = Nodes[j] # L_posteriori[j] = Lc[j] + sum(Lr[Mj,j]) extrinsic = np.empty(n) for j in range(n): Mj = Nodes[j] extrinsic[j] = sum(Lr[Mj][:,j]) L_posteriori = extrinsic + Lc #x = np.array(L_posteriori <= 0).astype(int) x = np.array(extrinsic <= 0).astype(int) product = InCode(H,x) #print(count, product) if product or count >= max_iter: break # print(count) return np.array(L_posteriori <= 0).astype(int), Lq - Lc, extrinsic, product pythran-0.10.0+ds2/pythran/tests/cases/ln.py000066400000000000000000000004461416264035500206630ustar00rootroot00000000000000#pythran export ln(float64[], float64[]) #runas import numpy; a = numpy.arange(0, 1., 1./100); b = numpy.empty_like(a) ; ln(a,b) def ln(X, Y): Y[:] = (X-1) - (X-1)**2 / 2 + (X-1)**3 / 3 - (X-1)**4 / 4 + (X-1)**5 / 5 - (X-1)**6 / 6 + (X-1)**7 / 7 - (X-1)**8 / 8 + (X-1)**9 / 9 return Y pythran-0.10.0+ds2/pythran/tests/cases/log_likelihood.py000066400000000000000000000006671416264035500232430ustar00rootroot00000000000000#runas import numpy as np ; N = 10000 ; a = np.arange(float(N)); b = 0.1; c =1.1; log_likelihood(a, b, c) #from http://arogozhnikov.github.io/2015/09/08/SpeedBenchmarks.html import numpy #pythran export log_likelihood(float64[], float64, float64) def log_likelihood(data, mean, sigma): s = (data - mean) ** 2 / (2 * (sigma ** 2)) pdfs = numpy.exp(- s) pdfs /= numpy.sqrt(2 * numpy.pi) * sigma return numpy.log(pdfs).sum() pythran-0.10.0+ds2/pythran/tests/cases/loopy_jacob.py000066400000000000000000000207601416264035500225530ustar00rootroot00000000000000#pythran export loopy(int list list, int, int, int) #runas data = [[1, 45, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 60, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]] ; loopy(data, 0, 100, 100) #skip.bench data = [[1, 45, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]] + [[0, 60, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]] * 200 ; loopy(data, 0, 100, 100) TOO_SLOW def _WarningErrorHandler(msg,fatal, _WarningCount): if _WarningCount > 200: raise RuntimeError(msg) else: return _WarningCount +1 def loopy(_PopulationSetInfo_Data, _WarningCount, _NumberOfTriesToGenerateThisIndividual, _NumberOfTriesToGenerateThisSimulationStep): #### Functions Allowed in Expressions #### IndividualID = 0 Repetition = 0 Time = 0 _ResultsInfo_Data = [] #### Create State Handler Functions and State Classification Vector ##### ############### Execute Simulation ############### ####### Subject Loop ####### _Subject = 0 while _Subject < (len(_PopulationSetInfo_Data)): IndividualID = IndividualID +1 # Comment/Uncomment the next line to disable/enable printing of verbose information #print("Simulating Individual #" + str(IndividualID)) _NumberOfTriesToGenerateThisIndividual = 1 ##### Repetition Loop ##### Repetition = 0 while Repetition < (1000): # Reset repeat individual repetition flag in case it was set _RepeatSameIndividualRepetition = False #Init all parameters - Resetting them to zero # Comment/Uncomment the next line to disable/enable printing of verbose information #print(" Repetition = " + str(Repetition)) Gender, Age, State0, State1, State2, State3Terminal, Example_6___Main_Process, Example_6___Main_Process_Entered, State0_Entered, State1_Entered, State2_Entered, State3Terminal_Entered = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 # Init parameters from population set [Gender, Age, State0, State1, State2, State3Terminal, Example_6___Main_Process, Example_6___Main_Process_Entered, State0_Entered, State1_Entered, State2_Entered, State3Terminal_Entered] = _PopulationSetInfo_Data[IndividualID-1] # Init parameters from Initialization Phase # Reset time and load first vector into results Time = 0 # Load the initial condition into the results vector for this individual _ResultsInfoForThisIndividual = [ [IndividualID, Repetition, Time ,Gender, Age, State0, State1, State2, State3Terminal, Example_6___Main_Process, Example_6___Main_Process_Entered, State0_Entered, State1_Entered, State2_Entered, State3Terminal_Entered] ] _Terminate_Time_Loop = False or State3Terminal != 0 _NumberOfTriesToGenerateThisSimulationStep = 0 _RepeatSameSimulationStep = False ##### Time Loop ##### while Time < 3: if _RepeatSameSimulationStep: # if repeating the same simulation step, reset the flag to avoid infinite loops _RepeatSameSimulationStep = False # Load the previous time step results into the results vector for this individual [_IgnoreIndividualID, _IgnoreRepetition, _IgnoreTime ,Gender, Age, State0, State1, State2, State3Terminal, Example_6___Main_Process, Example_6___Main_Process_Entered, State0_Entered, State1_Entered, State2_Entered, State3Terminal_Entered] = _ResultsInfoForThisIndividual[-1] _Terminate_Time_Loop = False elif _Terminate_Time_Loop: # If the time loop has to be terminated break else: # If not repeating the same simulation step, nor terminating, increase the time counter Time = Time + 1 # Comment/Uncomment the next line to disable/enable printing of verbose information #print(" Time Step = " + str(Time)) # Reset Warning/Error Count _WarningCountBeforeThisSimulationStep = _WarningCount # Increase the number of Tries counter _NumberOfTriesToGenerateThisSimulationStep = _NumberOfTriesToGenerateThisSimulationStep + 1 ##### Phase 1 - Pre State Transition ##### # Processing the rule: "Affected Parameter: Age; Simulation Phase: Pre-stateOccurrence Probability: 1; Applied Formula: Age +1; Rule Notes: Age Increase; ; _LastExpressionString = "Processing the expression: _Threshold = 1 ." # This expression should expand to: _Threshold = 1 try: # Building Step #0: _Threshold = 1 _Temp = 1 if not (-1e-14 <= _Temp <= 1.00000000000001): _WarningCount = _WarningErrorHandler("The occurrence probability threshold defined by a rule does not evaluate to a number between 0 and 1 within a tolerance specified by the system option parameter SystemPrecisionForProbabilityBoundCheck. The occurrence probability was evaluated to: " + str(_Temp) + " for the rule: " + 'Affected Parameter: Age; Simulation Phase: Pre-stateOccurrence Probability: 1; Applied Formula: Age +1; Rule Notes: Age Increase; ; ', True, _WarningCount) except: _WarningCount = _WarningErrorHandler(_LastExpressionString, True, _WarningCount) # Expression building complete - assign to destination parameter _Threshold = _Temp if 0.5 < _Threshold: _LastExpressionString = "Processing the expression: Age = Age +1 ." # This expression should expand to: Age = Age +1 try: # Building Step #0: Age = Age _Temp0 = Age # Building Step #1: Age = Age +1 _Temp = _Temp0 +1 except: _WarningCount = _WarningErrorHandler(_LastExpressionString, True, _WarningCount) # Expression building complete - assign to destination parameter Age = _Temp pass ##### End of Rule Processing ##### ##### Error Handlers ##### if _WarningCount <= _WarningCountBeforeThisSimulationStep: # Load New results to the results vector _ResultsInfoForThisIndividual.append([IndividualID, Repetition, Time ,Gender, Age, State0, State1, State2, State3Terminal, Example_6___Main_Process, Example_6___Main_Process_Entered, State0_Entered, State1_Entered, State2_Entered, State3Terminal_Entered]) _NumberOfTriesToGenerateThisSimulationStep = 0 else: #print(" Repeating the same simulation step due to an error - probably a bad validity check") _RepeatSameSimulationStep = True if _NumberOfTriesToGenerateThisSimulationStep >= 5: if _NumberOfTriesToGenerateThisIndividual < 2: # Repeat the calculations for this person _RepeatSameIndividualRepetition = True break else: _WarningCount = _WarningErrorHandler("The simulation was halted since the number of tries to recalculate the same person has been exceeded. If this problem consistently repeats itself, check the formulas to see if these cause too many out of bounds numbers to be generated. Alternatively, try raising the system option NumberOfTriesToRecalculateSimulationOfIndividualFromStart which is now defined as 2 . ", True, _WarningCount) if _RepeatSameIndividualRepetition: #print(" Repeating the same repetition for the same individual due to exceeding the allowed number of simulation steps recalculations for this individual") _NumberOfTriesToGenerateThisIndividual = _NumberOfTriesToGenerateThisIndividual + 1 else: # If going to the next individual repetition, save the results and increase the counter # Load New results to the results vector _ResultsInfo_Data.extend(_ResultsInfoForThisIndividual) Repetition = Repetition + 1 _Subject = _Subject + 1 # Comment/Uncomment the next lines to disable/enable dumping output file return _ResultsInfo_Data pythran-0.10.0+ds2/pythran/tests/cases/lu.py000066400000000000000000000027541416264035500206760ustar00rootroot00000000000000#runas import numpy as np; x = np.arange(1., 26.).reshape(5,5); factorMatrix0(x), factorMatrix1(x) import numpy as np #pythran export factorMatrix0(float[:,:]) def factorMatrix0(M): # Gaussian elimination, partial pivoting. # M must be an (n,n+1) numpy array. Not tested! n = M.shape[0] m= M.shape[1] for line in range(0, n-1): # find pivot cmax = np.argmax(abs(M[line:n,line])) + line # exchange rows if necessary if cmax != line: M[[line,cmax]]=M[[cmax,line]] # eliminate pivot = M[line,line] for j in range(line+1,n): v= M[j,line]/pivot for k in range(line,m): M[j,k]-= v*M[line,k] #pythran export factorMatrix1(float[:,:]) def factorMatrix1(M): # Gaussian elimination, partial pivoting. # M must be an (n,n+1) numpy array. Not tested! n = M.shape[0] m= M.shape[1] for line in range(0, n-1): # find pivot cmax=line vmax= abs(M[line,line]) for i in range(line+1,n): if abs(M[i,line])> vmax: vmax= abs(M[i,line]) cmax= i # exchange rows if necessary if cmax != line: for j in range(line,m): t= M[line,j] M[line,j]= M[cmax,j] M[cmax,j]= t # eliminate pivot = M[line,line] for j in range(line+1,n): v= M[j,line]/pivot for k in range(line,m): M[j,k]-= v*M[line,k] pythran-0.10.0+ds2/pythran/tests/cases/make_circle_two_points.py000066400000000000000000000062711416264035500247770ustar00rootroot00000000000000import math #pythran export _cross_product (float, float, float, float, float, float) def _cross_product(x0, y0, x1, y1, x2, y2): return (x1 - x0) * (y2 - y0) - (y1 - y0) * (x2 - x0) #pythran export make_circumcircle ((float, float), (float, float), (float, float)) def make_circumcircle(a, b, c): # Mathematical algorithm from Wikipedia: Circumscribed circle ox = (min(a[0], b[0], c[0]) + max(a[0], b[0], c[0])) / 2.0 oy = (min(a[1], b[1], c[1]) + max(a[1], b[1], c[1])) / 2.0 ax = a[0] - ox; ay = a[1] - oy bx = b[0] - ox; by = b[1] - oy cx = c[0] - ox; cy = c[1] - oy d = (ax * (by - cy) + bx * (cy - ay) + cx * (ay - by)) * 2.0 if d == 0.0: return None x = ox + ((ax*ax + ay*ay) * (by - cy) + (bx*bx + by*by) * (cy - ay) + (cx*cx + cy*cy) * (ay - by)) / d y = oy + ((ax*ax + ay*ay) * (cx - bx) + (bx*bx + by*by) * (ax - cx) + (cx*cx + cy*cy) * (bx - ax)) / d ra = math.hypot(x - a[0], y - a[1]) rb = math.hypot(x - b[0], y - b[1]) rc = math.hypot(x - c[0], y - c[1]) return (x, y, max(ra, rb, rc)) #pythran export make_diameter ((float, float), (float, float)) def make_diameter(a, b): cx = (a[0] + b[0]) / 2.0 cy = (a[1] + b[1]) / 2.0 r0 = math.hypot(cx - a[0], cy - a[1]) r1 = math.hypot(cx - b[0], cy - b[1]) return (cx, cy, max(r0, r1)) #pythran export is_in_circle((float, float, float), (float, float)) def is_in_circle(c, p): return math.hypot(p[0] - c[0], p[1] - c[1]) <= c[2] * (1 + 1e-14) #pythran export _make_circle_one_point((float,float) list, (float, float)) def _make_circle_one_point(points, p): c = (p[0], p[1], 0.0) for (i, q) in enumerate(points): if not is_in_circle(c, q): if c[2] == 0.0: c = make_diameter(p, q) else: c = make_circle_two_points(points[ : i + 1], p, q) return c #pythran export make_circle_two_points((float,float) list, (float, float), (float, float)) #runas make_circle_two_points([(1.,4.), (0., 8.)], (3., 4.), (5., 6.)) def make_circle_two_points(points, p, q): circ = make_diameter(p, q) left = None right = None px, py = p qx, qy = q # For each point not in the two-point circle for r in points: if is_in_circle(circ, r): continue # Form a circumcircle and classify it on left or right side cross = _cross_product(px, py, qx, qy, r[0], r[1]) c = make_circumcircle(p, q, r) if c is None: continue elif cross > 0.0 and (left is None or _cross_product(px, py, qx, qy, c[0], c[1]) > _cross_product(px, py, qx, qy, left[0], left[1])): left = c elif cross < 0.0 and (right is None or _cross_product(px, py, qx, qy, c[0], c[1]) < _cross_product(px, py, qx, qy, right[0], right[1])): right = c # Select which circle to return if left is None and right is None: return circ elif left is None: return right elif right is None: return left else: return left if (left[2] <= right[2]) else right pythran-0.10.0+ds2/pythran/tests/cases/make_decision.py000066400000000000000000000007101416264035500230360ustar00rootroot00000000000000import numpy as np #pythran export md(complex128[], complex128[]) #pythran export md(complex128[], complex128[::]) #runas import numpy as np; s=np.arange(160.)+np.arange(160.)*1.j ; sc = s[::2]; md(s, sc) def md(E, symbols): L = E.shape[0] M = symbols.shape[0] syms_out = np.zeros(L, dtype=E.dtype) #omp parallel for for i in range(L): im = np.argmin(abs(E[i]-symbols)**2) syms_out[i] = symbols[im] return syms_out pythran-0.10.0+ds2/pythran/tests/cases/mandel.py000066400000000000000000000015051416264035500215070ustar00rootroot00000000000000#runas mandel(20,0.,0., 8) #bench mandel(400,0.,0., 75) #pythran export mandel(int, float, float, int) def mandel(size, x_center, y_center, max_iteration): out= [ [ 0 for i in range(size) ] for j in range(size) ] for i in range(size): "omp parallel for" for j in range(size): x,y = ( x_center + 4.0*float(i-size/2)/size, y_center + 4.0*float(j-size/2)/size ) a,b = (0.0, 0.0) iteration = 0 while (a**2 + b**2 <= 4.0 and iteration < max_iteration): a,b = a**2 - b**2 + x, 2*a*b + y iteration += 1 if iteration == max_iteration: color_value = 255 else: color_value = iteration*10 % 255 out[i][j]=color_value return out pythran-0.10.0+ds2/pythran/tests/cases/matmul.py000066400000000000000000000012571416264035500215520ustar00rootroot00000000000000#runas a=[ [ float(i) for i in range(60)] for j in range(60)] ; matrix_multiply(a,a) #runas a=[ [ float(i) for i in range(60)] for j in range(40)] ; b=[ [ float(i) for i in range(40)] for j in range(60)]; matrix_multiply(a,b) #pythran export matrix_multiply(float list list, float list list) def zero(n,m): return [[0 for row in range(n)] for col in range(m)] def matrix_multiply(m0, m1): new_matrix = zero(len(m0),len(m1[0])) for i in range(len(m0)): for j in range(len(m1[0])): r=0 "omp parallel for reduction(+:r)" for k in range(len(m1)): r += m0[i][k]*m1[k][j] new_matrix[i][j]=r return new_matrix pythran-0.10.0+ds2/pythran/tests/cases/matrix_class_distance.py000066400000000000000000000011751416264035500246150ustar00rootroot00000000000000#pythran export matrix_class_distance(float64[:,:], int[], float64[:,:], int) #from https://stackoverflow.com/questions/59601987 #runas import numpy as np; n = 200;d = 10;iterations = 20;np.random.seed(42);dat = np.random.random((n, d));dat_filter = np.random.randint(0, n, size=n); dat_points = np.random.random((n, d)); matrix_class_distance(dat, dat_filter, dat_points, iterations) import numpy as np def matrix_class_distance(dat, dat_filter, dat_points, iterations): aggregation = 0 for i in range(iterations): aggregation += np.sum(np.linalg.norm(dat[dat_filter==i] - dat_points[i], axis=1)) return aggregation pythran-0.10.0+ds2/pythran/tests/cases/monte_carlo.py000066400000000000000000000025021416264035500225470ustar00rootroot00000000000000# http://code.activestate.com/recipes/577263-numerical-integration-using-monte-carlo-method/ # Numerical Integration using Monte Carlo method # FB - 201006137 #pythran export montecarlo_integration(float, float, int, float list, int) #runas montecarlo_integration(1.,10.,100,[x/100. for x in range(100)],100) #bench montecarlo_integration(1.,10.,650000,[x/100. for x in range(100)],100) import math def montecarlo_integration(xmin, xmax, numSteps,rand,randsize): # define any function here! def f(x): return math.sin(x) # find ymin-ymax ymin = f(xmin) ymax = ymin for i in range(numSteps): x = xmin + (xmax - xmin) * float(i) / numSteps y = f(x) if y < ymin: ymin = y if y > ymax: ymax = y # Monte Carlo rectArea = (xmax - xmin) * (ymax - ymin) numPoints = numSteps # bigger the better but slower! ctr = 0 for j in range(numPoints): x = xmin + (xmax - xmin) * rand[j%randsize] y = ymin + (ymax - ymin) * rand[j%randsize] if math.fabs(y) <= math.fabs(f(x)): if f(x) > 0 and y > 0 and y <= f(x): ctr += 1 # area over x-axis is positive if f(x) < 0 and y < 0 and y >= f(x): ctr -= 1 # area under x-axis is negative fnArea = rectArea * float(ctr) / numPoints return fnArea pythran-0.10.0+ds2/pythran/tests/cases/monte_carlo_pricer.py000066400000000000000000000011261416264035500241140ustar00rootroot00000000000000#unittest.skip np.random not supported yet import numpy as np def step(dt, prices, c0, c1, noises): return prices * np.exp(c0 * dt + c1 * noises) def monte_carlo_pricer(paths, dt, interest, volatility): c0 = interest - 0.5 * volatility ** 2 c1 = volatility * np.sqrt(dt) for j in range(1, paths.shape[1]): # for all trials prices = paths[:, j - 1] # generate normally distributed random number noises = np.random.normal(0., 1., prices.size) # calculate the next batch of prices for all trials paths[:, j] = step(dt, prices, c0, c1, noises) pythran-0.10.0+ds2/pythran/tests/cases/morphology.py000066400000000000000000000062531416264035500224530ustar00rootroot00000000000000#skip.pythran export dilate_decompose(int[][], int) #pythran export dilate_decompose_loops(float[][], int) #skip.pythran export dilate_decompose_interior(int[][], int[][]) #skip.runas import numpy as np ; image = np.random.randint(0, 256, (width, height)) / 256.0 ; dilate_decompose_loops(image) #runas import numpy as np ; image = np.tri(100, 200) /2.0 ; dilate_decompose_loops(image, 4) #bench import numpy as np ; image = np.tri(500, 600) /2.0 ; dilate_decompose_loops(image, 4) from numpy import empty_like def dilate_decompose_loops(x, k): m,n = x.shape y = empty_like(x) for i in range(m): for j in range(n): left_idx = max(0, i-k//2) right_idx = min(m, i+k//2+1) currmax = x[left_idx, j] for ii in range(left_idx+1, right_idx): elt = x[ii, j] if elt > currmax: currmax = elt y[i, j] = currmax z = empty_like(x) for i in range(m): for j in range(n): left_idx = max(0, j-k//2) right_idx = min(n, j+k//2+1) currmax = y[i,left_idx] for jj in range(left_idx+1, right_idx): elt = y[i,jj] if elt > currmax: currmax = elt z[i,j] = currmax return z #def dilate_1d_naive(x_strip, k): # """ # Given a 1-dimensional input and 1-dimensional output, # fill output with 1d dilation of input # """ # nelts = len(x_strip) # y_strip = empty_like(x_strip) # half = k / 2 # for idx in range(nelts): # left_idx = max(idx-half,0) # right_idx = min(idx+half+1, nelts) # currmax = x_strip[left_idx] # for j in range(left_idx+1, right_idx): # elt = x_strip[j] # if elt > currmax: # currmax = elt # y_strip[idx] = currmax # return y_strip # #def dilate_decompose(x, k): # import numpy as np # m,n = x.shape # y = np.array([dilate_1d_naive(x[row_idx, :], k) for row_idx in range(m)]) # return np.array([dilate_1d_naive(y[:, col_idx], k) for col_idx in range(n)]).T # #def dilate_1d_interior(x_strip, k): # # nelts = len(x_strip) # y_strip = empty_like(x_strip) # half = k / 2 # # interior_start = half+1 # interior_stop = max(nelts-half, interior_start) # # # left boundary # for i in range(min(half+1, nelts)): # left_idx = max(i-half,0) # right_idx = min(i+half+1, nelts) # currmax = x_strip[left_idx] # for j in range(left_idx+1, right_idx): # elt = x_strip[j] # if elt > currmax: # currmax = elt # y_strip[i] = currmax # # #interior # for i in range(interior_start, interior_stop): # left_idx = i-half # right_idx = i+half+1 # currmax = x_strip[left_idx] # for j in range(left_idx+1, right_idx): # elt = x_strip[j] # if elt > currmax: # currmax = elt # y_strip[i] = currmax # # # right boundary # for i in range(interior_stop, nelts): # left_idx = max(i-half, 0) # right_idx = nelts # currmax = x_strip[left_idx] # for j in range(left_idx+1, right_idx): # elt = x_strip[j] # if elt > currmax: # currmax = elt # y_strip[i] = currmax # return y_strip # #def dilate_decompose_interior(x, k): # m,n = x.shape # y = np.array([dilate_1d_interior(x[row_idx, :],k) for row_idx in range(m)]) # return np.array([dilate_1d_interior(y[:, col_idx],k) for col_idx in range(n)]).T pythran-0.10.0+ds2/pythran/tests/cases/mulmod.py000066400000000000000000000005771416264035500215540ustar00rootroot00000000000000# from http://stackoverflow.com/questions/19350395/python-jit-for-known-bottlenecks # pythran export gf2mulmod(int, int, int) # runas x, y, m = 2**10 , 2**6-1, 2**5-1; gf2mulmod(x, y, m) def gf2mulmod(x,y,m): z = 0 while x > 0: if (x & 1) != 0: z ^= y y <<= 1 y2 = y ^ m if y2 < y: y = y2 x >>= 1 return z pythran-0.10.0+ds2/pythran/tests/cases/multi_export.py000066400000000000000000000004571416264035500230070ustar00rootroot00000000000000#pythran export a(int) #pythran export a(float) #pythran export a(str) #pythran export a(int[]) #pythran export a(int[3]) #runas a(2.4) #runas a(2) #runas a("hello world") #runas import numpy as np; x = np.array([1,2,3,4]); a(x) #runas import numpy as np; x = np.array([1,2,3]); a(x) def a(i): return i pythran-0.10.0+ds2/pythran/tests/cases/multitype.py000066400000000000000000000010431416264035500223000ustar00rootroot00000000000000#pythran export times(int or str, int) #runas times(200, 3) #runas times('200', 3) def times(n, m): return n*m #pythran export check([str or float] list, int) #runas check(['1'], 2) #runas check([1.5], 2) def check(x, y): if y: return x, y else: return x + x, y #pythran export check2(str or float list, str or bool list) #runas check2('1', '2') #runas check2('1', [True]) #runas check2([1.5], 'True') #runas check2([1.5], [False]) def check2(x, y): if y: return x, y else: return x + x, y pythran-0.10.0+ds2/pythran/tests/cases/nd_local_maxima.py000066400000000000000000000015341416264035500233600ustar00rootroot00000000000000#from https://github.com/iskandr/parakeet/blob/master/benchmarks/nd_local_maxima.py #pythran export local_maxima(float [][][][]) #runas import numpy as np ; shape = (8,6,4,2) ; x = np.arange(8*6*4*2, dtype=np.float64).reshape(*shape) ; local_maxima(x) import numpy as np def wrap(pos, offset, bound): return ( pos + offset ) % bound def clamp(pos, offset, bound): return min(bound-1,max(0,pos+offset)) def reflect(pos, offset, bound): idx = pos+offset return min(2*(bound-1)-idx,max(idx,-idx)) def local_maxima(data, mode=wrap): wsize = data.shape result = np.ones(data.shape, bool) for pos in np.ndindex(data.shape): myval = data[pos] for offset in np.ndindex(wsize): neighbor_idx = tuple(mode(p, o-w//2, w) for (p, o, w) in zip(pos, offset, wsize)) result[pos] &= (data[neighbor_idx] <= myval) return result pythran-0.10.0+ds2/pythran/tests/cases/nqueens.py000066400000000000000000000031301416264035500217210ustar00rootroot00000000000000#bench n_queens(9) #runas n_queens(6) #pythran export n_queens(int) # Pure-Python implementation of itertools.permutations(). def permutations(iterable, r=None): """permutations(range(3), 2) --> (0,1) (0,2) (1,0) (1,2) (2,0) (2,1)""" pool = tuple(iterable) n = len(pool) if r is None: r = n indices = list(range(n)) cycles = list(range(n-r+1, n+1))[::-1] yield tuple(pool[i] for i in indices[:r]) while n: for i in reversed(range(r)): cycles[i] -= 1 if cycles[i] == 0: indices[i:] = indices[i+1:] + indices[i:i+1] cycles[i] = n - i else: j = cycles[i] indices[i], indices[-j] = indices[-j], indices[i] yield tuple(pool[i] for i in indices[:r]) break else: return # From http://code.activestate.com/recipes/576647/ def n_queens(queen_count): """N-Queens solver. Args: queen_count: the number of queens to solve for. This is also the board size. Yields: Solutions to the problem. Each yielded value is looks like (3, 8, 2, 1, 4, ..., 6) where each number is the column position for the queen, and the index into the tuple indicates the row. """ out =list() cols = list(range(queen_count)) #for vec in permutations(cols): for vec in permutations(cols,None): if (queen_count == len(set(vec[i]+i for i in cols)) == len(set(vec[i]-i for i in cols))): #yield vec out.append(vec) return out pythran-0.10.0+ds2/pythran/tests/cases/pairwise.py000066400000000000000000000011631416264035500220720ustar00rootroot00000000000000#from http://jakevdp.github.com/blog/2012/08/24/numba-vs-cython/ #runas X = [ [i/100.+j for i in range(100) ] for j in range(30) ] ; pairwise(X) #bench X = [ [i/100.+j for i in range(800) ] for j in range(100) ] ; pairwise(X) #pythran export pairwise(float list list) import math def pairwise(X): M = len(X) N = len(X[0]) D = [ [0 for x in range(M) ] for y in range(M) ] "omp parallel for" for i in range(M): for j in range(M): d = 0.0 for k in range(N): tmp = X[i][k] - X[j][k] d += tmp * tmp D[i][j] = math.sqrt(d) return D pythran-0.10.0+ds2/pythran/tests/cases/pairwise_numpy.py000066400000000000000000000010731416264035500233220ustar00rootroot00000000000000#from http://jakevdp.github.com/blog/2012/08/24/numba-vs-cython/ #runas import numpy as np ; X = np.linspace(0,10,20000).reshape(200,100) ; pairwise(X) #bench import numpy as np ; X = np.linspace(0,10,10000).reshape(100,100) ; pairwise(X) #pythran export pairwise(float [][]) import numpy as np def pairwise(X): M, N = X.shape D = np.empty((M,M)) for i in range(M): for j in range(M): d = 0.0 for k in range(N): tmp = X[i,k] - X[j,k] d += tmp * tmp D[i,j] = np.sqrt(d) return D pythran-0.10.0+ds2/pythran/tests/cases/periodic_bc.py000066400000000000000000000003011416264035500225020ustar00rootroot00000000000000#pythran export periodic_bc(float [][][]) #runas import numpy; r = numpy.arange(0., 27.).reshape((3,3,3)); periodic_bc(r) def periodic_bc(f): f[:, 0, :] = f[:, -2, :] return f pythran-0.10.0+ds2/pythran/tests/cases/periodic_dist.py000066400000000000000000000023531416264035500230720ustar00rootroot00000000000000#pythran export dist(float [], float[], float[], int, bool, bool, bool) #runas import numpy as np ; N = 20 ; x = np.arange(0., N, 0.1) ; L = 4 ; periodic = True ; dist(x, x, x, L,periodic, periodic, periodic) #bench import numpy as np ; N = 300 ; x = np.arange(0., N, 0.1) ; L = 4 ; periodic = True ; dist(x, x, x, L,periodic, periodic, periodic) import numpy as np def dist(x, y, z, L, periodicX, periodicY, periodicZ): " ""Computes distances between all particles and places the result in a matrix such that the ij th matrix entry corresponds to the distance between particle i and j"" " N = len(x) xtemp = np.tile(x,(N,1)) dx = xtemp - xtemp.T ytemp = np.tile(y,(N,1)) dy = ytemp - ytemp.T ztemp = np.tile(z,(N,1)) dz = ztemp - ztemp.T # Particles 'feel' each other across the periodic boundaries if periodicX: dx[dx>L/2]=dx[dx > L/2]-L dx[dx<-L/2]=dx[dx < -L/2]+L if periodicY: dy[dy>L/2]=dy[dy>L/2]-L dy[dy<-L/2]=dy[dy<-L/2]+L if periodicZ: dz[dz>L/2]=dz[dz>L/2]-L dz[dz<-L/2]=dz[dz<-L/2]+L # Total Distances d = np.sqrt(dx**2+dy**2+dz**2) # Mark zero entries with negative 1 to avoid divergences d[d==0] = -1 return d, dx, dy, dz pythran-0.10.0+ds2/pythran/tests/cases/perm.py000066400000000000000000000015261416264035500212150ustar00rootroot00000000000000#pythran export permutations(int list) #runas permutations([1,4,5,6,12]) #bench in_ = range(9); permutations(in_) def permutations(iterable): """permutations(range(3), 2) --> (0,1) (0,2) (1,0) (1,2) (2,0) (2,1)""" out=[] pool = tuple(iterable) n = len(pool) r = n indices = list(range(n)) cycles = list(range(n-r+1, n+1))[::-1] out.append( tuple([pool[i] for i in indices[:r]])) while 1: for i in reversed(range(r)): cycles[i] -= 1 if cycles[i] == 0: indices[i:] = indices[i+1:] + indices[i:i+1] cycles[i] = n - i else: j = cycles[i] indices[i], indices[-j] = indices[-j], indices[i] out.append( tuple([pool[i] for i in indices[:r]])) break else: return out pythran-0.10.0+ds2/pythran/tests/cases/pi_buffon.py000066400000000000000000000011041416264035500222110ustar00rootroot00000000000000#pythran export pi_estimate(int,float list, int) #runas pi_estimate(4000,[x/100. for x in range(100)],100) #bench pi_estimate(2200000,[x/1000. for x in range(1000)],1000) from math import sqrt, pow from random import random def pi_estimate(DARTS,rand,randsize): hits = 0 "omp parallel for reduction(+:hits)" for i in range (0, DARTS): x = rand[i%randsize] y = rand[(randsize-i)%randsize] dist = sqrt(pow(x, 2) + pow(y, 2)) if dist <= 1.0: hits += 1.0 # hits / throws = 1/4 Pi pi = 4 * (hits / DARTS) return pi pythran-0.10.0+ds2/pythran/tests/cases/pivot.py000066400000000000000000000017011416264035500214060ustar00rootroot00000000000000#nopythran export pivot(int, int, int list list, int list) #nopythran export pivot(int, int, float list list, float list) #norunas pivot(5,1,[[1,2,4,-6,1],[12,3,8,1,6],[-3,7,13,-6,1],[7,4,-3,1,78],[4,1,8,5,3]],[43,-2,7,1,67]) #norunas pivot(5,1,[[1.4,2.2,4.3,-6.4,1.6],[12.2,3.4,8.4,1.1,6.2],[-3.6,7.8,13.2,-6.1,1.5],[7.2,4.4,-3.5,1.6,78.4],[4.4,1.4,8.2,5.6,3.]],[43.3,-2.3,7.2,1.5,67.6]) #pythran export pivot(int, int, complex list list, complex list) #runas pivot(2,1,[[complex(1.3,-3),complex(3,4)],[complex(10.2,2.3),complex(-3,4)]],[complex(1.2,12.3),complex(-4.3,2.4)]) def pivot(n,i,a,b): i0=i amp0=abs(a[i-1][i-1]) for j in range(i+1,n+1): amp=abs(a[i-1][j-1]) if amp>amp0: i0=j amp0=amp if i==i0: return temp=b[i-1] b[i-1]=b[i0-1]; b[i0-1]=temp; for j in range(i,n+1): temp=a[j-1][i-1] a[j-1][i-1]=a[j-1][i0-1] a[j-1][i0-1]=temp return a,b pythran-0.10.0+ds2/pythran/tests/cases/primes_sieve.py000066400000000000000000000011171416264035500227400ustar00rootroot00000000000000# from http://stackoverflow.com/questions/3939660/sieve-of-eratosthenes-finding-primes-python # using a list instead of generator as return values #pythran export primes_sieve(int) #runas primes_sieve(100) #bench primes_sieve(6000000) def primes_sieve(limit): a = [True] * limit # Initialize the primality list a[0] = a[1] = False primes=list() for (i, isprime) in enumerate(a): if isprime: primes.append(i) for n in range(i*i, limit, i): # Mark factors non-prime a[n] = False return primes pythran-0.10.0+ds2/pythran/tests/cases/primes_sieve2.py000066400000000000000000000012731416264035500230250ustar00rootroot00000000000000#pythran export get_primes7(int) #from http://blog.famzah.net/2010/07/01/cpp-vs-python-vs-perl-vs-php-performance-benchmark/ #runas get_primes7(100) #bench get_primes7(7000000) def get_primes7(n): """ standard optimized sieve algorithm to get a list of prime numbers --- this is the function to compare your functions against! --- """ if n < 2: return [] if n == 2: return [2] # do only odd numbers starting at 3 s = list(range(3, n+1, 2)) # n**0.5 simpler than math.sqr(n) mroot = n ** 0.5 half = len(s) i = 0 m = 3 while m <= mroot: if s[i]: j = (m*m-3)//2 # int div s[j] = 0 while j < half: s[j] = 0 j += m i = i+1 m = 2*i+3 return [2]+[x for x in s if x] pythran-0.10.0+ds2/pythran/tests/cases/projection_simplex.py000066400000000000000000000012361416264035500241650ustar00rootroot00000000000000#from https://gist.github.com/mblondel/c99e575a5207c76a99d714e8c6e08e89 #pythran export projection_simplex(float[], int) #runas import numpy as np; np.random.seed(0); x = np.random.rand(10); projection_simplex(x, 1) import numpy as np def projection_simplex(v, z=1): """ Old implementation for test and benchmark purposes. The arguments v and z should be a vector and a scalar, respectively. """ n_features = v.shape[0] u = np.sort(v)[::-1] cssv = np.cumsum(u) - z ind = np.arange(n_features) + 1 cond = u - cssv / ind > 0 rho = ind[cond][-1] theta = cssv[cond][-1] / float(rho) w = np.maximum(v - theta, 0) return w pythran-0.10.0+ds2/pythran/tests/cases/pselect.py000066400000000000000000000004171416264035500217070ustar00rootroot00000000000000#pythran export pselect(int) #runas pselect(0) #runas pselect(1) def pselect(n): l=list() for k in (n, not n): if k: a=sel0 else: a=sel1 a(l) return l def sel0(n): n.append(1) def sel1(n): n.append(2.) pythran-0.10.0+ds2/pythran/tests/cases/pythagorean_triples.py000066400000000000000000000011261416264035500243310ustar00rootroot00000000000000#pythran export next_pythagorean_triples(int64[:,:]) #runas import numpy as np; next_pythagorean_triples(np.array([[3, 4, 5]], dtype=np.int64)) import numpy as np def next_pythagorean_triples(previous): matrices = np.array( [[-1, 2, 2], [-2, 1, 2], [-2, 2, 3], [1, 2, 2], [2, 1, 2], [2, 2, 3], [1, -2, 2], [2, -1, 2], [2, -2, 3]]) next_triples = np.transpose(np.dot(matrices, np.transpose(previous))) next_triples = next_triples.reshape((3 * previous.shape[0], previous.shape[1])) return next_triples pythran-0.10.0+ds2/pythran/tests/cases/queens_numba.py000066400000000000000000000023361416264035500227340ustar00rootroot00000000000000#pythran export solve(int) #unittest.skip requires extensive typing, use enable_two_steps_typing from the cfg file def hits(x1, y1, x2, y2): "Check whether a queen positioned at (x1, y1) will hit a queen at position (x2, y2)" return x1 == x2 or y1 == y2 or abs(x1 - x2) == abs(y1 - y2) def hitsany(x, y, queens_x, queens_y): "Check whether a queen positioned at (x1, y1) will hit any other queen" for i in range(len(queens_x)): if hits(x, y, queens_x[i], queens_y[i]): return True return False def _solve(n, queens_x, queens_y): "Solve the queens puzzle" if n == 0: return True for x in range(1, 9): for y in range(1, 9): if not hitsany(x, y, queens_x, queens_y): queens_x.append(x) queens_y.append(y) if _solve(n - 1, queens_x, queens_y): return True queens_x.pop() queens_y.pop() return False def solve(n): queens_x = [] queens_y = [] if _solve(n, queens_x, queens_y): return queens_x, queens_y else: return None #print(solve(8)) # %timeit solve(8) # Comment out @jit/@autojit # print(solve(8)) # %timeit solve(8) pythran-0.10.0+ds2/pythran/tests/cases/quicksort.py000066400000000000000000000040531416264035500222740ustar00rootroot00000000000000#pythran export quicksort(int list, int, int) #runas quicksort(list(range(10)),0,9) def partition(list, start, end): pivot = list[end] # Partition around the last value bottom = start-1 # Start outside the area to be partitioned top = end # Ditto done = 0 while not done: # Until all elements are partitioned... while not done: # Until we find an out of place element... bottom = bottom+1 # ... move the bottom up. if bottom == top: # If we hit the top... done = 1 # ... we are done. break if list[bottom] > pivot: # Is the bottom out of place? list[top] = list[bottom] # Then put it at the top... break # ... and start searching from the top. while not done: # Until we find an out of place element... top = top-1 # ... move the top down. if top == bottom: # If we hit the bottom... done = 1 # ... we are done. break if list[top] < pivot: # Is the top out of place? list[bottom] = list[top] # Then put it at the bottom... break # ...and start searching from the bottom. list[top] = pivot # Put the pivot in its place. return top # Return the split point def do_quicksort(list, start, end): if start < end: # If there are two or more elements... split = partition(list, start, end) # ... partition the sublist... do_quicksort(list, start, split-1) # ... and sort both halves. do_quicksort(list, split+1, end) def quicksort(l,s,e): do_quicksort(l,s,e) pythran-0.10.0+ds2/pythran/tests/cases/ramsurf.py000066400000000000000000000206721416264035500217340ustar00rootroot00000000000000#pythran export deriv(int, float, float, complex list, complex list, complex list, float list list, float) import cmath #This subroutine finds a root of a polynomial of degree n > 2 # by Laguerre's method. def guerre(a,n,z,err,nter): az = [complex(0,0) for i in range(50)] azz = [complex(0,0) for i in range(50)] ci=complex(0.0,1.0) eps=1.0e-20 # The coefficients of p'[z] and p''[z]. for i in range(1,n+1): az[i-1]=float(i)*a[i] for i in range(1,n): azz[i-1]=float(i)*az[i] dz=err+1 itera=0 jter=0 while abs(dz)>err and iteraamp2: dz=float(-n)/(f+h) else: dz=float(-n)/(f-h) itera=itera+1 # Rotate by 90 degrees to avoid limit cycles. jter=jter+1 if jter==10: jter=1 dz=dz*ci z=z+dz if jter==100: raise RuntimeError("Laguerre method not converging") return z # The root-finding subroutine. def fndrt(a,n): z=[complex(0,0) for k in range(n) ] if n==1: z[0]=-a[0]/a[1] return z if n>2: for k in range(n,2,-1): # Obtain an approximate root. root=complex(0.0,0) err=1.0e-12 root = guerre(a,k,root,err,1000) # Refine the root by iterating five more times. err=0.0; root = guerre(a,k,root,err,5) z[k-1]=root # Divide out the factor [z-root]. for i in range(k,0,-1): a[i-1]=a[i-1]+root*a[i] for i in range(1,k+1): a[i-1]=a[i]; # Solve the quadratic equation. z[1]=0.5*(-a[1]+cmath.sqrt(a[1]*a[1]-4.0*a[0]*a[2]))/a[2] z[0]=0.5*(-a[1]-cmath.sqrt(a[1]*a[1]-4.0*a[0]*a[2]))/a[2] return z # Rows are interchanged for stability. def pivot(n,i,a,b): i0=i amp0=abs(a[i-1][i-1]) for j in range(i+1,n+1): amp=abs(a[i-1][j-1]) if amp>amp0: i0=j amp0=amp if i==i0: return temp=b[i-1] b[i-1]=b[i0-1]; b[i0-1]=temp; for j in range(i,n+1): temp=a[j-1][i-1] a[j-1][i-1]=a[j-1][i0-1] a[j-1][i0-1]=temp def gauss(n,a,b): # Downward elimination. for i in range(1,n+1): if i=1: z1=-3.0 b[n-1]=-1.0 for j in range(1,np+1): a[2*j-2][n-1]=z1 ** j a[2*j-1][n-1]=0.0 if ns>=2: z1=-1.5 b[n-2]=-1.0 for j in range(1,np+1): a[2*j-2][n-2]=z1 ** j a[2*j-1][n-2]=0.0 gauss(n,a,b) dh1[0]=1.0 for j in range(1,np+1): dh1[j]=b[2*j-2] dh2=fndrt(dh1,np) for j in range(0,np): pd1[j]=-1.0/dh2[j] dh1[0]=1.0 for j in range(1,np+1): dh1[j]=b[2*j-1] dh2=fndrt(dh1,np) for j in range(0,np): pd2[j]=-1.0/dh2[j] # The tridiagonal matrices. def matrc(nz,np,iz,dz,k0,rhob,alpw,alpb,ksq,ksqw,ksqb,f1,f2,f3,r1,r2,r3,s1,s2,s3,pd1,pd2,izsrf): a1=k0*k0/6.0 a2=2.0*k0*k0/3.0 a3=k0*k0/6.0 cfact=0.5/(dz*dz) dfact=1.0/12.0 for i in range(0,iz): f1[i]=1.0/alpw[i] f2[i]=1.0 f3[i]=alpw[i] ksq[i]=ksqw[i] ii=0 for i in range(iz,nz+2): f1[i]=rhob[ii]/alpb[ii] f2[i]=1.0/rhob[ii] f3[i]=alpb[ii] ksq[i]=ksqb[ii] ii+=1 for i in range(1,nz+1): # Discretization by Galerkin's method. c1=cfact*f1[i]*(f2[i-1]+f2[i])*f3[i-1] c2=-cfact*f1[i]*(f2[i-1]+2.0*f2[i]+f2[i+1])*f3[i] c3=cfact*f1[i]*(f2[i]+f2[i+1])*f3[i+1] d1=c1+dfact*(ksq[i-1]+ksq[i]) d2=c2+dfact*(ksq[i-1]+complex(6.0,0)*ksq[i]+ksq[i+1]) d3=c3+dfact*(ksq[i]+ksq[i+1]) for j in range(0,np): r1[j][i]=a1+pd2[j]*d1 r2[j][i]=a2+pd2[j]*d2 r3[j][i]=a3+pd2[j]*d3 s1[j][i]=a1+pd1[j]*d1 s2[j][i]=a2+pd1[j]*d2 s3[j][i]=a3+pd1[j]*d3 # The entries above the surface. for j in range(0,np): for i in range(0,izsrf): r1[j][i]=0.0 r2[j][i]=1.0 r3[j][i]=0.0 s1[j][i]=0.0 s2[j][i]=0.0 s3[j][i]=0.0 # The matrix decomposition. for j in range(0,np): for i in range(1,nz+1): rfact=complex(1.0,0)/(r2[j][i]-r1[j][i]*r3[j][i-1]) r1[j][i]=r1[j][i]*rfact r3[j][i]=r3[j][i]*rfact s1[j][i]=s1[j][i]*rfact s2[j][i]=s2[j][i]*rfact s3[j][i]=s3[j][i]*rfact ## Matrix updates. #def updat(fs1,nz,np,iz,ib,dr,dz,eta,omega,rmax,c0,k0,ci,r,rp,rs,rb,zb,cw,cb,rhob,attn, \ #alpw,alpb,ksq,ksqw,ksqb,f1,f2,f3,r1,r2,r3,s1,s2,s3,pd1,pd2,rsrf,zsrf,izsrf,isrf,attw): ## Varying bathymetry. # if r>=rb[ib]: # ib=ib+1 # if r>=rsrf[isrf]: # isrf=isrf+1 # jzsrf=izsrf # z=zsrf[isrf-1]+(r+0.5*dr-rsrf[isrf-1])*(zsrf[isrf]-zsrf[isrf-1])/(rsrf[isrf]-rsrf[isrf-1]) # izsrf=int(z/dz) # jz=iz # z=zb[ib-1]+(r+0.5*dr-rb[ib-1])*(zb[ib]-zb[ib-1])/(rb[ib]-rb[ib-1]) # iz=int(1.0+z/dz) # iz=max(2,iz) # iz=min(nz,iz) # if iz!=jz or izsrf != jzsrf: # matrc(nz,np,iz,dz,k0,rhob,alpw,alpb,ksq,ksqw,ksqb,f1,f2,f3,r1,r2,r3,s1,s2,s3,pd1,pd2,izsrf) ## Varying profiles. # if r>=rp: # rp = profl(fs1,nz,ci,dz,eta,omega,rmax,c0,k0,rp,cw,cb,rhob,attn,alpw,alpb,ksqw,ksqb,attw) # matrc(nz,np,iz,dz,k0,rhob,alpw,alpb,ksq,ksqw,ksqb,f1,f2,f3,r1,r2,r3,s1,s2,s3,pd1,pd2,izsrf) ## Turn off the stability constraints. # if r>=rs: # ns=0 # epade(np,ns,1,k0,dr,pd1,pd2) # matrc(nz,np,iz,dz,k0,rhob,alpw,alpb,ksq,ksqw,ksqb,f1,f2,f3,r1,r2,r3,s1,s2,s3,pd1,pd2,izsrf) # return ib,isrf,izsrf,iz,rp pythran-0.10.0+ds2/pythran/tests/cases/rand_mat_stat.py000066400000000000000000000014071416264035500230700ustar00rootroot00000000000000import numpy as np from numpy import trace, concatenate, dot from numpy.random import randn from numpy.linalg import matrix_power # pythran export rand_mat_stat(int) def rand_mat_stat(t): n = 5 v = np.empty(t) w = np.empty(t) for i in range(t): a = randn(n, n) b = randn(n, n) c = randn(n, n) d = randn(n, n) P = concatenate((a, b, c, d), axis=1) Q = concatenate((concatenate((a, b), axis=1), concatenate((c, d), axis=1)), axis=0) # v[i] = trace(matrix_power(P.T @ P, 4)) # w[i] = trace(matrix_power(Q.T @ Q, 4)) v[i] = trace(matrix_power(dot(P.T, P), 4)) w[i] = trace(matrix_power(dot(Q.T, Q), 4)) return np.std(v)/np.mean(v), np.std(w)/np.mean(w) pythran-0.10.0+ds2/pythran/tests/cases/rc4.py000066400000000000000000000013141416264035500207350ustar00rootroot00000000000000#from http://www.emoticode.net/python/python-implementation-of-rc4-algorithm.html #runas data = "e"*100 ; key = "f"*3 ; rc4_crypt(data, key) #bench data = "e"*2000000 ; key = "f"*3 ; rc4_crypt(data, key) #pythran export rc4_crypt(str, str) #unittest.python3.skip #RC4 Implementation def rc4_crypt( data , key ): S = range(256) j = 0 out = [] #KSA Phase for i in range(256): j = (j + S[i] + ord( key[i % len(key)] )) % 256 S[i] , S[j] = S[j] , S[i] #PRGA Phase for char in data: i = j = 0 i = (i + 1) % 256 j = (j + S[i]) % 256 S[i], S[j] = S[j], S[i] out.append(chr(ord(char) ^ S[(S[i] + S[j]) % 256])) return ''.join(out) pythran-0.10.0+ds2/pythran/tests/cases/repeating.py000066400000000000000000000007361416264035500222320ustar00rootroot00000000000000#from: http://stackoverflow.com/questions/14553331/how-to-improve-numpy-performance-in-this-short-code #pythran export repeating(float[], int) #runas import numpy as np ; a = np.arange(10, dtype=float); repeating(a, 2) #bench import numpy as np ; a = np.random.rand(10000); repeating(a, 20) import numpy as np def repeating(x, nvar_y): nvar_x = x.shape[0] y = np.empty(nvar_x*(1+nvar_y)) y[0:nvar_x] = x[0:nvar_x] y[nvar_x:] = np.repeat(x,nvar_y) return y pythran-0.10.0+ds2/pythran/tests/cases/reverse_cumsum.py000066400000000000000000000005761416264035500233220ustar00rootroot00000000000000#from: http://stackoverflow.com/questions/16541618/perform-a-reverse-cumulative-sum-on-a-numpy-array #pythran export reverse_cumsum(float[]) #runas import numpy as np ; r = np.arange(10, dtype=float) ; reverse_cumsum(r) #bench import numpy as np ; r = np.arange(1000000, dtype=float) ; reverse_cumsum(r) import numpy as np def reverse_cumsum(x): return np.cumsum(x[::-1])[::-1] pythran-0.10.0+ds2/pythran/tests/cases/roman_decode.py000066400000000000000000000007021416264035500226640ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Roman_numerals/Decode#Python #runas decode('MCMXC') #runas decode('MMVIII') #runas decode('MDCLXVI') #pythran export decode(str) def decode( roman ): s, t = 'MDCLXVI', (1000, 500, 100, 50, 10, 5, 1) _rdecode = dict(zip(s, t)) result = 0 for r, r1 in zip(roman, roman[1:]): rd, rd1 = _rdecode[r], _rdecode[r1] result += -rd if rd < rd1 else rd return result + _rdecode[roman[-1]] pythran-0.10.0+ds2/pythran/tests/cases/rosen.py000066400000000000000000000004741416264035500214010ustar00rootroot00000000000000import numpy as np #runas import numpy as np; r = np.arange(0.,10., .01); rosen(r) #bench import numpy as np; r = np.arange(50000000); rosen(r) #pythran export rosen(int[]) #pythran export rosen(float[]) def rosen(x): t0 = 100 * (x[1:] - x[:-1] ** 2) ** 2 t1 = (1 - x[:-1]) ** 2 return np.sum(t0 + t1) pythran-0.10.0+ds2/pythran/tests/cases/scrabble.py000066400000000000000000000014241416264035500220240ustar00rootroot00000000000000#from http://stackoverflow.com/questions/18345202/functional-vs-imperative-style-in-python #pythran export scrabble_fun_score(str, str: int dict) #pythran export scrabble_imp_score(str, str: int dict) #runas scrabble_fun_score('tralala', {'t': 1, 'r': 2, 'a': 3, 'l': 4}) #runas scrabble_fun_score('tralala', {'t': 1, 'r': 2, 'a': 3, 'l': 4}) #bench import string; import random; a = "".join([random.choice(string.letters) for i in range(12000000)]); v = dict(zip(string.letters, range(1000))); scrabble_fun_score(a, v) def scrabble_fun_score(word, scoretable): return sum([scoretable.get(x, 0) for x in word]) def scrabble_imp_score(word, scoretable): score = 0 for letter in word: if letter in scoretable: score += scoretable[letter] return score pythran-0.10.0+ds2/pythran/tests/cases/sexy_primes.py000066400000000000000000000005671416264035500226250ustar00rootroot00000000000000#from http://stackoverflow.com/questions/11641098/interpreting-a-benchmark-in-c-clojure-python-ruby-scala-and-others #pythran export primes_below(int) #runas primes_below(1000) #bench primes_below(15000) def is_prime(n): return all((n%j > 0) for j in range(2, n)) def primes_below(x): return [[j-6, j] for j in range(9, x+1) if is_prime(j) and is_prime(j-6)] pythran-0.10.0+ds2/pythran/tests/cases/shallow_water.py000066400000000000000000000051231416264035500231220ustar00rootroot00000000000000#pythran export run(int, int, int) #runas run(10,10,10) #from https://raw.githubusercontent.com/cphhpc/numpy/victim_cache/benchmark/Python/shallow_water.py import numpy as np def model(height, width, dtype): m = np.ones((height, width),dtype=dtype) m[height//4,width//4] = 6.0 return m def step(H, U, V, dt=0.02, dx=1.0, dy=1.0): g = 9.80665 # gravitational acceleration # Reflecting boundary conditions H[:,0] = H[:,1] ; U[:,0] = U[:,1] ; V[:,0] = -V[:,1] H[:,-1] = H[:,-2] ; U[:,-1] = U[:,-2] ; V[:,-1] = -V[:,-2] H[0,:] = H[1,:] ; U[0,:] = -U[1,:] ; V[0,:] = V[1,:] H[-1,:] = H[-2,:] ; U[-1,:] = -U[-2,:] ; V[-1,:] = V[-2,:] #First half step # height Hx = (H[1:,1:-1]+H[:-1,1:-1])//2 - dt//(2*dx)*(U[1:,1:-1]-U[:-1,1:-1]) # x momentum Ux = (U[1:,1:-1]+U[:-1,1:-1])//2 - \ dt/(2*dx) * ((U[1:,1:-1]**2//H[1:,1:-1] + g//2*H[1:,1:-1]**2) - (U[:-1,1:-1]**2//H[:-1,1:-1] + g//2*H[:-1,1:-1]**2)) # y momentum Vx = (V[1:,1:-1]+V[:-1,1:-1])//2 - \ dt//(2*dx) * ((U[1:,1:-1]*V[1:,1:-1]//H[1:,1:-1]) - (U[:-1,1:-1]*V[:-1,1:-1]//H[:-1,1:-1])) # height Hy = (H[1:-1,1:]+H[1:-1,:-1])//2 - dt//(2*dy)*(V[1:-1,1:]-V[1:-1,:-1]) #x momentum Uy = (U[1:-1,1:]+U[1:-1,:-1])//2 - \ dt//(2*dy)*((V[1:-1,1:]*U[1:-1,1:]//H[1:-1,1:]) - (V[1:-1,:-1]*U[1:-1,:-1]//H[1:-1,:-1])) #y momentum Vy = (V[1:-1,1:]+V[1:-1,:-1])//2 - \ dt//(2*dy)*((V[1:-1,1:]**2//H[1:-1,1:] + g//2*H[1:-1,1:]**2) - (V[1:-1,:-1]**2//H[1:-1,:-1] + g//2*H[1:-1,:-1]**2)) #Second half step # height H[1:-1,1:-1] -= (dt//dx)*(Ux[1:,:]-Ux[:-1,:]) + (dt//dy)*(Vy[:,1:]-Vy[:,:-1]) # x momentum U[1:-1,1:-1] -= (dt//dx)*((Ux[1:,:]**2//Hx[1:,:] + g//2*Hx[1:,:]**2) - (Ux[:-1,:]**2//Hx[:-1,:] + g//2*Hx[:-1,:]**2)) + \ (dt//dy)*((Vy[:,1:] * Uy[:,1:]//Hy[:,1:]) - (Vy[:,:-1] * Uy[:,:-1]//Hy[:,:-1])) # y momentum V[1:-1,1:-1] -= (dt//dx)*((Ux[1:,:] * Vx[1:,:]//Hx[1:,:]) - (Ux[:-1,:]*Vx[:-1,:]//Hx[:-1,:])) + \ (dt//dy)*((Vy[:,1:]**2//Hy[:,1:] + g//2*Hy[:,1:]**2) - (Vy[:,:-1]**2//Hy[:,:-1] + g//2*Hy[:,:-1]**2)) return (H, U, V) def simulate(H, timesteps): U = np.zeros_like(H) V = np.zeros_like(H) for i in range(timesteps): (H, U, V) = step(H, U, V) return H def run(H, W, I): m = model(H, W, dtype=np.float64) m = simulate(m,I) return m pythran-0.10.0+ds2/pythran/tests/cases/slowparts.py000066400000000000000000000020741416264035500223070ustar00rootroot00000000000000#from https://groups.google.com/forum/#!topic/parakeet-python/p-flp2kdE4U #pythran export slowparts(int, int, float [][][], float [][][], float [][], float [][], float [][][], float [][][], int) #runas import numpy as np ;d = 10 ;re = 5 ;params = (d, re, np.ones((2*d, d+1, re)), np.ones((d, d+1, re)), np.ones((d, 2*d)), np.ones((d, 2*d)), np.ones((d+1, re, d)), np.ones((d+1, re, d)), 1) ; slowparts(*params) #bench import numpy as np ;d = 87 ;re = 5 ;params = (d, re, np.ones((2*d, d+1, re)), np.ones((d, d+1, re)), np.ones((d, 2*d)), np.ones((d, 2*d)), np.ones((d+1, re, d)), np.ones((d+1, re, d)), 1) ; slowparts(*params) from numpy import zeros, power, tanh def slowparts(d, re, preDz, preWz, SRW, RSW, yxV, xyU, resid): """ computes the linear algebra intensive part of the gradients of the grae """ fprime = lambda x: 1 - power(tanh(x), 2) partialDU = zeros((d+1, re, 2*d, d)) for k in range(2*d): for i in range(d): partialDU[:,:,k,i] = fprime(preDz[k]) * fprime(preWz[i]) * (SRW[i,k] + RSW[i,k]) * yxV[:,:,i] return partialDU pythran-0.10.0+ds2/pythran/tests/cases/smoothing.py000066400000000000000000000007531416264035500222620ustar00rootroot00000000000000#from http://www.parakeetpython.com/ #pythran export smoothing(float[], float) #runas import numpy as np ; a = np.arange(0,1,10e-3) ; smoothing(a, .4) #bench import numpy as np ; a = np.arange(0,1,1.5e-6) ;smoothing(a, .4) def smoothing(x, alpha): """ Exponential smoothing of a time series For x = 10**6 floats - Python runtime: 9 seconds - Parakeet runtime: .01 seconds """ s = x.copy() for i in range(1, len(x)): s[i] = alpha * x[i] + (1 - alpha) * s[i-1] return s pythran-0.10.0+ds2/pythran/tests/cases/sobelfilter.py000066400000000000000000000041061416264035500225610ustar00rootroot00000000000000#skip.runas import Image; im = Image.open("Scribus.gif"); image_list = list(im.getdata()); cols, rows = im.size; res = range(len(image_list)); sobelFilter(image_list, res, cols, rows) #runas cols = 100; rows = 100 ;image_list=[x%10+y%20 for x in range(cols) for y in range(rows)]; sobelFilter(image_list, cols, rows) #bench cols = 1000; rows = 500 ;image_list=[x%10+y%20 for x in range(cols) for y in range(rows)]; sobelFilter(image_list, cols, rows) #pythran export sobelFilter(int list, int, int) def sobelFilter(original_image, cols, rows): edge_image = list(range(len(original_image))) for i in range(rows): edge_image[i * cols] = 255 edge_image[((i + 1) * cols) - 1] = 255 for i in range(1, cols - 1): edge_image[i] = 255 edge_image[i + ((rows - 1) * cols)] = 255 for iy in range(1, rows - 1): for ix in range(1, cols - 1): sum_x = 0 sum_y = 0 sum = 0 #x gradient approximation sum_x += original_image[ix - 1 + (iy - 1) * cols] * -1 sum_x += original_image[ix + (iy - 1) * cols] * -2 sum_x += original_image[ix + 1 + (iy - 1) * cols] * -1 sum_x += original_image[ix - 1 + (iy + 1) * cols] * 1 sum_x += original_image[ix + (iy + 1) * cols] * 2 sum_x += original_image[ix + 1 + (iy + 1) * cols] * 1 sum_x = min(255, max(0, sum_x)) #y gradient approximatio sum_y += original_image[ix - 1 + (iy - 1) * cols] * 1 sum_y += original_image[ix + 1 + (iy - 1) * cols] * -1 sum_y += original_image[ix - 1 + (iy) * cols] * 2 sum_y += original_image[ix + 1 + (iy) * cols] * -2 sum_y += original_image[ix - 1 + (iy + 1) * cols] * 1 sum_y += original_image[ix + 1 + (iy + 1) * cols] * -1 sum_y = min(255, max(0, sum_y)) #GRADIENT MAGNITUDE APPROXIMATION sum = abs(sum_x) + abs(sum_y) #make edges black and background white edge_image[ix + iy * cols] = 255 - (255 & sum) return edge_image pythran-0.10.0+ds2/pythran/tests/cases/specialconvolve.py000066400000000000000000000010631416264035500234420ustar00rootroot00000000000000#from: http://stackoverflow.com/questions/2196693/improving-numpy-performance #pythran export specialconvolve(uint32 [][]) #runas import numpy as np ; r = np.arange(100*100, dtype=np.uint32).reshape(100,100); specialconvolve(r) #bench import numpy as np ; r = np.arange(1000*1000, dtype=np.uint32).reshape(1000,1000); specialconvolve(r) def specialconvolve(a): # sorry, you must pad the input yourself rowconvol = a[1:-1,:] + a[:-2,:] + a[2:,:] colconvol = rowconvol[:,1:-1] + rowconvol[:,:-2] + rowconvol[:,2:] - 9*a[1:-1,1:-1] return colconvol pythran-0.10.0+ds2/pythran/tests/cases/spectralDNS.py000066400000000000000000000033421416264035500224320ustar00rootroot00000000000000__all__ = ['add_pressure_diffusion', 'cross1', 'cross2'] #runas import numpy as np; x = np.empty((3,2,5,7)); y = np.arange(210.).reshape(3,2,5,7); z = np.arange(210.).reshape(3,2,5,7) + 3; cross1(x,y,z) #runas import numpy as np; x = np.empty((3,2,5,7), dtype=complex); y = np.arange(210.).reshape(3,2,5,7); z = np.arange(210., dtype=complex).reshape(3,2,5,7) + 3; cross2(x,y,z) #runas import numpy as np; x = np.empty((3,2,5,7), dtype=complex); y = np.arange(210, dtype=np.int64).reshape(3,2,5,7); z = np.arange(210., dtype=complex).reshape(3,2,5,7) + 3; cross2(x,y,z) #runas import numpy as np; x = np.ones((3,2,5,7), dtype=complex); y = np.arange(210, dtype=complex).reshape(3,2,5,7); z = np.arange(70.).reshape(2,5,7); w = np.arange(210.).reshape(3,2,5,7) + 3; t = np.arange(70., dtype=complex).reshape(2,5, 7) + 1 ; u = np.arange(210.).reshape(3,2,5,7) + 8; add_pressure_diffusion(x,y,z,w,t,u,3.) #pythran export cross1(float[:,:,:,:], float[:,:,:,:], float[:,:,:,:]) def cross1(c, a, b): c[0] = a[0] * b[2] - a[2] * b[1] c[1] = a[2] * b[0] - a[0] * b[2] c[2] = a[0] * b[1] - a[1] * b[0] return c #pythran export cross2(complex[:,:,:,:], float[:,:,:,:], complex[:,:,:,:]) #pythran export cross2(complex[:,:,:,:], int64[:,:,:,:], complex[:,:,:,:]) def cross2(c, a, b): cross1(c, a, b) c *= 1j return c #pythran export add_pressure_diffusion( # complex[:,:,:,:], complex[:,:,:,:], # float[:,:,:], float[:,:,:,:], complex[:,:,:], # float[:,:,:,:], float) def add_pressure_diffusion(dU, U_hat, K2, K, P_hat, K_over_K2, nu): du0, du1, du2 = dU[0], dU[1], dU[2] k_0, k_1, k_2 = K_over_K2[0], K_over_K2[1], K_over_K2[2] P_hat[:] = du0*k_0+du1*k_1+du2*k_2 dU[:] = dU - P_hat*K - nu*K2*U_hat return P_hat pythran-0.10.0+ds2/pythran/tests/cases/stone.py000066400000000000000000000124131416264035500213770ustar00rootroot00000000000000#pythran export whetstone(int) #runas whetstone(2*10**2) #bench whetstone(1500) """ /* * C Converted Whetstone Double Precision Benchmark * Version 1.2 22 March 1998 * * (c) Copyright 1998 Painter Engineering, Inc. * All Rights Reserved. * * Permission is granted to use, duplicate, and * publish this text and program as long as it * includes this entire comment block and limited * rights reference. * * Converted by Rich Painter, Painter Engineering, Inc. based on the * www.netlib.org benchmark/whetstoned version obtained 16 March 1998. * * A novel approach was used here to keep the look and feel of the * FORTRAN version. Altering the FORTRAN-based array indices, * starting at element 1, to start at element 0 for C, would require * numerous changes, including decrementing the variable indices by 1. * Instead, the array E1[] was declared 1 element larger in C. This * allows the FORTRAN index range to function without any literal or * variable indices changes. The array element E1[0] is simply never * used and does not alter the benchmark results. * * The major FORTRAN comment blocks were retained to minimize * differences between versions. Modules N5 and N12, like in the * FORTRAN version, have been eliminated here. * * An optional command-line argument has been provided [-c] to * offer continuous repetition of the entire benchmark. * An optional argument for setting an alternate LOOP count is also * provided. Define PRINTOUT to cause the POUT() function to print * outputs at various stages. Final timing measurements should be * made with the PRINTOUT undefined. * * Questions and comments may be directed to the author at * r.painter@ieee.org */ """ from math import sin as DSIN, cos as DCOS, atan as DATAN, log as DLOG, exp as DEXP, sqrt as DSQRT def whetstone(loopstart): # The actual benchmark starts here. T = .499975; T1 = 0.50025; T2 = 2.0; # With loopcount LOOP=10, one million Whetstone instructions # will be executed in EACH MAJOR LOOP..A MAJOR LOOP IS EXECUTED # 'II' TIMES TO INCREASE WALL-CLOCK TIMING ACCURACY. LOOP = loopstart; II = 1; JJ = 1; while JJ <= II: N1 = 0; N2 = 12 * LOOP; N3 = 14 * LOOP; N4 = 345 * LOOP; N6 = 210 * LOOP; N7 = 32 * LOOP; N8 = 899 * LOOP; N9 = 616 * LOOP; N10 = 0; N11 = 93 * LOOP; # Module 1: Simple identifiers X1 = 1.0; X2 = -1.0; X3 = -1.0; X4 = -1.0; for I in range(1,N1+1): X1 = (X1 + X2 + X3 - X4) * T; X2 = (X1 + X2 - X3 + X4) * T; X3 = (X1 - X2 + X3 + X4) * T; X4 = (-X1+ X2 + X3 + X4) * T; # Module 2: Array elements E1 = [ 1.0, -1.0, -1.0, -1.0 ] for I in range(1,N2+1): E1[0] = ( E1[0] + E1[1] + E1[2] - E1[3]) * T; E1[1] = ( E1[0] + E1[1] - E1[2] + E1[3]) * T; E1[2] = ( E1[0] - E1[1] + E1[2] + E1[3]) * T; E1[3] = (-E1[0] + E1[1] + E1[2] + E1[3]) * T; # Module 3: Array as parameter for I in range(1,N3+1): PA(E1, T, T2); # Module 4: Conditional jumps J = 1; for I in range(1,N4+1): if J == 1: J = 2; else: J = 3; if J > 2: J = 0; else: J = 1; if J < 1: J = 1; else: J = 0; # Module 5: Omitted # Module 6: Integer arithmetic J = 1; K = 2; L = 3; for I in range(1,N6+1): J = J * (K-J) * (L-K); K = L * K - (L-J) * K; L = (L-K) * (K+J); E1[L-2] = J + K + L; E1[K-2] = J * K * L; # Module 7: Trigonometric functions X = 0.5; Y = 0.5; for I in range(1,N7+1): X = T * DATAN(T2*DSIN(X)*DCOS(X)/(DCOS(X+Y)+DCOS(X-Y)-1.0)); Y = T * DATAN(T2*DSIN(Y)*DCOS(Y)/(DCOS(X+Y)+DCOS(X-Y)-1.0)); # Module 8: Procedure calls X = 1.0; Y = 1.0; Z = 1.0; for I in range(1,N8+1): Z=P3(X,Y,T, T2) # Module 9: Array references J = 1; K = 2; L = 3; E1[0] = 1.0; E1[1] = 2.0; E1[2] = 3.0; for I in range(1,N9+1): P0(E1, J, K, L) # Module 10: Integer arithmetic J = 2; K = 3; for I in range(1,N10+1): J = J + K; K = J + K; J = K - J; K = K - J - J; # Module 11: Standard functions X = 0.75; for I in range(1,N11+1): X = DSQRT(DEXP(DLOG(X)/T1)); JJ+=1 KIP = (100.0*LOOP*II) return KIP def PA(E, T, T2): J = 0; while J<6: E[0] = ( E[0] + E[1] + E[2] - E[3]) * T; E[1] = ( E[0] + E[1] - E[2] + E[3]) * T; E[2] = ( E[0] - E[1] + E[2] + E[3]) * T; E[3] = (-E[0] + E[1] + E[2] + E[3]) / T2; J += 1; def P0(E1, J, K, L): E1[J-1] = E1[K-1]; E1[K-1] = E1[L-1]; E1[L-1] = E1[J-1]; def P3(X, Y, T, T2): X1 = X; Y1 = Y; X1 = T * (X1 + Y1); Y1 = T * (X1 + Y1); return (X1 + Y1) / T2; pythran-0.10.0+ds2/pythran/tests/cases/sum_primes.py000066400000000000000000000011041416264035500224250ustar00rootroot00000000000000# taken from http://oddbloke.uwcs.co.uk/parallel_benchmarks/ #pythran export sum_primes(int) #runas sum_primes(200) #bench sum_primes(320000) import math def isprime(n): """Returns True if n is prime and False otherwise""" if n < 2: return False if n == 2: return True max = int(math.ceil(math.sqrt(n))) i = 2 while i <= max: if n % i == 0: return False i += 1 return True def sum_primes(n): """Calculates sum of all primes below given integer n""" return sum([x for x in range(2,n) if isprime(x)]) pythran-0.10.0+ds2/pythran/tests/cases/sumarray3d.py000066400000000000000000000025341416264035500223440ustar00rootroot00000000000000#from http://stackoverflow.com/questions/20076030/lack-of-speedup-and-erroneous-results-with-openmp-and-cython/20183767#20183767 #pythran export summation(float32[][], float32[], float32[][]) #runas import numpy as np ; N=30 ; pos = np.arange(1, N*3. + 1, dtype=np.float32).reshape((N,3)) ; w = np.ones(N, dtype=np.float32) ; p = np.arange(N*3., dtype=np.float32).reshape((N,3)) ; summation(pos, w, p) #bench import numpy as np ; N=300 ; pos = np.arange(1, N*3. + 1, dtype=np.float32).reshape((N,3)) ; w = np.ones(N, dtype=np.float32) ; p = np.arange(N*3., dtype=np.float32).reshape((N,3)) ; summation(pos, w, p) import numpy as np def summation(pos, weights, points): n_points = len(points) n_weights = len(weights) sum_array3d = np.zeros((n_points,3)) def compute(i): pxi = points[i, 0] pyi = points[i, 1] pzi = points[i, 2] total = 0.0 for j in range(n_weights): weight_j = weights[j] xj = pos[j,0] yj = pos[j,1] zj = pos[j,2] dx = pxi - pos[j, 0] dy = pyi - pos[j, 1] dz = pzi - pos[j, 2] dr = 1.0/np.sqrt(dx*dx + dy*dy + dz*dz) total += weight_j * dr sum_array3d[i,0] += weight_j * dx sum_array3d[i,1] += weight_j * dy sum_array3d[i,2] += weight_j * dz return total sum_array = np.array([compute(i) for i in range(n_points)]) return sum_array, sum_array3d pythran-0.10.0+ds2/pythran/tests/cases/tax.py000066400000000000000000000011021416264035500210340ustar00rootroot00000000000000import numpy as np #runas import numpy as np; x = np.ones(10) * 80000 ; tax(x) #pythran export tax(float[]) def tax(d): tax_seg1 = d[(d > 256303)] * 0.45 - 16164.53 tax_seg2 = d[(d > 54057) & (d <= 256303)] * 0.42 - 8475.44 seg3 = d[(d > 13769) & (d <= 54057)] - 13769 seg4 = d[(d > 8820) & (d <= 13769)] - 8820 prog_seg3 = seg3 * 0.0000022376 + 0.2397 prog_seg4 = seg4 * 0.0000100727 + 0.14 return ( np.sum(tax_seg1) + np.sum(tax_seg2) + np.sum(seg3 * prog_seg3 + 939.57) + np.sum(seg4 * prog_seg4) ) / np.sum(d) pythran-0.10.0+ds2/pythran/tests/cases/train_eq.py000066400000000000000000000050661416264035500220570ustar00rootroot00000000000000import numpy as np #runas import numpy as np; np.random.seed(0); s=np.array([1+1j, -1+1j, -1-1j, 1-1j]); e=np.random.choice(s, (1, 100)); ee=e/np.sqrt(np.mean(abs(e)**2)); eee=ee+0.02*(np.random.randn(*ee.shape)+1j*np.random.randn(*ee.shape)); r=(np.sqrt(2)/2+0j);wx=np.zeros((1,10), dtype=eee.dtype); wx[:,4]=1; train_eq(eee.astype(np.complex64), eee.shape[1]-15, 1, np.float32(1e-3), wx.astype(np.complex64), (np.complex64(r),s.astype(np.complex64)), False) #runas import numpy as np; np.random.seed(0); s=np.array([1+1j, -1+1j, -1-1j, 1-1j]); e=np.random.choice(s, (1, 100)); ee=e/np.sqrt(np.mean(abs(e)**2)); eee=ee+0.02*(np.random.randn(*ee.shape)+1j*np.random.randn(*ee.shape)); r=(np.sqrt(2)/2+0j);wx=np.zeros((1,10), dtype=eee.dtype); wx[:,4]=1; train_eq2(eee.astype(np.complex64), eee.shape[1]-15, 1, np.float32(1e-3), wx.astype(np.complex64), (np.complex64(r),s.astype(np.complex64)), False) #pythran export train_eq(complex64[][], int, int, float32, # complex64[][], # (complex64, complex64[]), bool) #pythran export train_eq2(complex64[][], int, int, float32, # complex64[][], # (complex64, complex64[]), bool) def train_eq(E, TrSyms, os, mu, wx, errfctprs, adapt): Ntaps = wx.shape[1] pols = wx.shape[0] R, symbs = errfctprs err = np.zeros(TrSyms, dtype=E.dtype) for i in range(TrSyms): X = E[:, i * os:i * os + Ntaps] Xest = np.sum(np.conj(wx) * X) err[i] = (R.real - abs(Xest)**2)*Xest wx += mu*np.conj(err[i])*X if adapt and i>0: if err[i].real*err[i-1].real > 0 and err[i].imag*err[i-1].imag > 0: mu = mu else: mu = np.float32(mu/(1+mu*(err[i].real*err[i].real + err[i].imag*err[i].imag))) return err, wx, mu def train_eq2(E, TrSyms, os, mu, wx, errfctprs, adapt): Ntaps = wx.shape[1] pols = wx.shape[0] R, symbs = errfctprs err = np.zeros(TrSyms, dtype=E.dtype) for i in range(TrSyms): X = E[:, i * os:i * os + Ntaps] Xest = np.sum(np.conj(wx) * X) err[i] = (R.real - abs(Xest)**2)*Xest wx += mu*np.conj(err[i])*X if adapt and i>0: if err[i].real*err[i-1].real > 0 and err[i].imag*err[i-1].imag > 0: mu = mu else: mu = np.float32(mu/(1+mu*(err[i].real*err[i].real + err[i].imag*err[i].imag))) return err, wx, mu def cma_error(Xest, R, symbs): return (R.real - abs(Xest)**2)*Xest def mcma_error(Xest, R, symbs): return symbs.dtype.type((R.real - Xest.real**2)*Xest.real + (R.imag - Xest.imag**2)*Xest.imag*1j) pythran-0.10.0+ds2/pythran/tests/cases/train_equalizer.py000066400000000000000000000035721416264035500234530ustar00rootroot00000000000000import numpy as np #pythran export train_equaliser(complex128[][], int, int, int, float64, complex128[][][], int[], bool, complex128[][], str) #pythran export train_equaliser(complex64[][], int, int, int, float32, complex64[][][], int[], bool, complex64[][], str) def train_equaliser(E, TrSyms, Niter, os, mu, wx, modes, adaptive, symbols, method): if method == "cma": errorfct = cma_error else: raise ValueError("Unknown method %s"%method) nmodes = E.shape[0] ntaps = wx.shape[-1] assert symbols.shape[0] == nmodes, "symbols must be at least size of modes" assert wx.shape[0] == nmodes, "wx needs to have at least as many dimensions as the maximum mode" assert E.shape[1] > TrSyms*os+ntaps, "Field must be longer than the number of training symbols" assert modes.max() < nmodes, "Maximum mode number must not be higher than number of modes" err = np.zeros((nmodes, TrSyms*Niter), dtype=E.dtype) #omp parallel for for mode in modes: for it in range(Niter): for i in range(TrSyms): X = E[:, i * os:i * os + ntaps] Xest = apply_filter(X, wx[mode]) err[mode, it*Niter+i] = errorfct(Xest, symbols[mode], i) wx[mode] += mu * np.conj(err[mode, it*Niter+i]) * X if adaptive and i > 0: mu = adapt_step(mu, err[mode, it*Niter+i], err[mode, it*Niter+i-1]) return err, wx, mu def cma_error(Xest, s1, i): d = s1[0].real - abs(Xest)**2 return d*Xest def adapt_step(mu, err_p, err): if err.real*err_p.real > 0 and err.imag*err_p.imag > 0: return mu else: return mu/(1+mu*(err.real*err.real + err.imag*err.imag)) def apply_filter(E, wx): pols = E.shape[0] Ntaps = wx.shape[1] Xest = E.dtype.type(0) for k in range(pols): for i in range(Ntaps): Xest += E[k, i]*np.conj(wx[k,i]) return Xest pythran-0.10.0+ds2/pythran/tests/cases/value_index.py000066400000000000000000000004261416264035500225530ustar00rootroot00000000000000import numpy as np #import parameters as p #pythran export tri(int [][],int [][]) #runas import numpy as np; a = np.arange(100, 0, -1).reshape(10, 10); tri(a, a) def tri(score,score_t): score_t[0,0]=score[0,0] score_t=score[np.argsort(score[:,1])] return score_t pythran-0.10.0+ds2/pythran/tests/cases/vibr_energy.py000066400000000000000000000022171416264035500225630ustar00rootroot00000000000000#from http://stackoverflow.com/questions/17112550/python-and-numba-for-vectorized-functions #pythran export calculate_vibr_energy(float[], float[],int []) #pythran export calculate_vibr_energy(float[], float[], int) #pythran export calculate_vibr_energy(float[], float[], float) #pythran export calculate_vibr_energy(float[], float[], float []) #runas import numpy as np ; a = np.sin(np.ones(1000000)) ; b = np.cos(np.ones(1000000)) ; n = np.arange(1000000); calculate_vibr_energy(a, b, n) #runas import numpy as np ; a = np.sin(np.ones(1000000)) ; b = np.cos(np.ones(1000000)) ; calculate_vibr_energy(a, b, 10) #runas import numpy as np ; a = np.sin(np.ones(1000000)) ; b = np.cos(np.ones(1000000)) ; calculate_vibr_energy(a, b, 10.) #runas import numpy as np ; a = np.sin(np.ones(1000000)) ; b = np.cos(np.ones(1000000)) ; n = np.arange(1000000, dtype=np.double); calculate_vibr_energy(a, b, n) #bench import numpy as np ; a = np.sin(np.ones(1000000)) ; b = np.cos(np.ones(1000000)) ; n = np.arange(1000000); calculate_vibr_energy(a, b, n) import numpy def calculate_vibr_energy(harmonic, anharmonic, i): return numpy.exp(-harmonic * i - anharmonic * (i ** 2)) pythran-0.10.0+ds2/pythran/tests/cases/wave_simulation.py000066400000000000000000000025721416264035500234620ustar00rootroot00000000000000# from https://github.com/sklam/numba-example-wavephysics #runas test(50) #bench test(55000) import numpy as np from math import ceil def physics(masspoints, dt, plunk, which): ppos = masspoints[1] cpos = masspoints[0] N = cpos.shape[0] # apply hooke's law HOOKE_K = 2100000. DAMPING = 0.0001 MASS = .01 force = np.zeros((N, 2)) for i in range(1, N): dx, dy = cpos[i] - cpos[i - 1] dist = np.sqrt(dx**2 + dy**2) assert dist != 0 fmag = -HOOKE_K * dist cosine = dx / dist sine = dy / dist fvec = np.array([fmag * cosine, fmag * sine]) force[i - 1] -= fvec force[i] += fvec force[0] = force[-1] = 0, 0 force[which][1] += plunk accel = force / MASS # verlet integration npos = (2 - DAMPING) * cpos - (1 - DAMPING) * ppos + accel * (dt**2) masspoints[1] = cpos masspoints[0] = npos #pythran export test(int) def test(PARTICLE_COUNT): SUBDIVISION = 300 FRAMERATE = 60 count = PARTICLE_COUNT width, height = 600, 200 masspoints = np.empty((2, count, 2), np.float64) initpos = np.zeros(count, np.float64) for i in range(1, count): initpos[i] = initpos[i - 1] + float(width) / count masspoints[:, :, 0] = initpos masspoints[:, :, 1] = height / 2 f = 15 plunk_pos = count // 2 physics( masspoints, 1./ (SUBDIVISION * FRAMERATE), f, plunk_pos) return masspoints[0, count // 2] pythran-0.10.0+ds2/pythran/tests/cases/wdist.py000066400000000000000000000014451416264035500214040ustar00rootroot00000000000000#from http://stackoverflow.com/questions/19277244/fast-weighted-euclidean-distance-between-points-in-arrays/19277334#19277334 #pythran export slow_wdist(float64 [][], float64 [][], float64[][]) #runas import numpy as np ; A = np.arange(6.).reshape((2,3)) ; B = np.arange(1,7.).reshape((2,3)) ; W = np.arange(2,8.).reshape((2,3)) ; slow_wdist(A,B,W) #bench S = 520.; import numpy as np ; A = np.arange(S).reshape((2,S / 2)) ; B = np.arange(1,1 + S).reshape((2,S / 2)) ; W = np.arange(2,S + 2).reshape((2,S / 2)) ; slow_wdist(A,B,W) import numpy as np def slow_wdist(A, B, W): k,m = A.shape _,n = B.shape D = np.zeros((m, n)) for ii in range(m): for jj in range(n): wdiff = (A[:,ii] - B[:,jj]) / W[:,ii] D[ii,jj] = np.sqrt((wdiff**2).sum()) return D pythran-0.10.0+ds2/pythran/tests/cases/weights.py000066400000000000000000000010531416264035500217170ustar00rootroot00000000000000#pythran export weights(uint8[:,:],float?) #runas import numpy as np; x = np.arange(10, dtype=np.uint8).reshape(5,2); weights(x) import numpy as np def weights(input_data, threshold=0.3): n_seq, length = input_data.shape weights = np.zeros(n_seq, dtype=np.float32) for i in range(n_seq): vector = input_data[i, None, :] count_matches = np.sum(vector == input_data, axis=1) over_threshold = count_matches > (threshold * length) total = np.sum(over_threshold) weights[i] = 1 / total return weights pythran-0.10.0+ds2/pythran/tests/cases/zero.py000066400000000000000000000002161416264035500212240ustar00rootroot00000000000000#pythran export zero(int, int) #runas zero(10,20) #bench zero(6000,6000) def zero(n,m): return [[0 for row in range(n)] for col in range(m)] pythran-0.10.0+ds2/pythran/tests/cython/000077500000000000000000000000001416264035500201025ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/tests/cython/add.pyx000066400000000000000000000002441416264035500213740ustar00rootroot00000000000000# cython: np_pythran=True cimport numpy as np ctypedef np.double_t DTYPE_t def add(np.ndarray[DTYPE_t, ndim=2] f, np.ndarray[DTYPE_t, ndim=2] g): return f + g pythran-0.10.0+ds2/pythran/tests/cython/diffuse.pyx000066400000000000000000000013111416264035500222650ustar00rootroot00000000000000# cython: np_pythran=True import numpy as np cimport numpy as cnp def test(): """ >>> test() """ lx, ly = (2**7, 2**7) u = np.zeros([lx, ly], dtype=np.double) u[lx // 2, ly // 2] = 1000.0 diffuse_numpy(u, 500) return u def diffuse_numpy(cnp.ndarray[double, ndim=2] u, int N): """ Apply Numpy matrix for the Forward-Euler Approximation """ cdef cnp.ndarray[double, ndim=2] temp = np.zeros_like(u) mu = 0.1 for n in range(N): temp[1:-1, 1:-1] = u[1:-1, 1:-1] + mu * ( u[2:, 1:-1] - 2L * u[1:-1, 1:-1] + u[0:-2, 1:-1] + u[1:-1, 2:] - 2L * u[1:-1, 1:-1] + u[1:-1, 0:-2]) u[:, :] = temp[:, :] temp[:, :] = 0.0 pythran-0.10.0+ds2/pythran/tests/cython/indexing.pyx000066400000000000000000000003521416264035500224510ustar00rootroot00000000000000# cython: np_pythran=True import numpy as np cimport numpy as np def indexing(): cdef np.ndarray[double, ndim=2] A = np.random.rand(1, 1) cdef double a with nogil: A[0, 0] = 12. a = A[0, 0] return a pythran-0.10.0+ds2/pythran/tests/cython/setup_add.py000066400000000000000000000004411416264035500224230ustar00rootroot00000000000000from distutils.core import setup from Cython.Build import cythonize setup( name = "add", ext_modules = cythonize('add.pyx'), script_name = 'setup.py', script_args = ['build_ext', '--inplace'] ) import add import numpy as np print(add.add(np.ones((3,3)), np.ones((3,3)))) pythran-0.10.0+ds2/pythran/tests/cython/setup_diffuse.py000066400000000000000000000003721416264035500233230ustar00rootroot00000000000000from distutils.core import setup from Cython.Build import cythonize setup( name = "diffuse", ext_modules = cythonize('diffuse.pyx'), script_name = 'setup.py', script_args = ['build_ext', '--inplace'] ) import diffuse diffuse.test() pythran-0.10.0+ds2/pythran/tests/cython/setup_indexing.py000066400000000000000000000004341416264035500235020ustar00rootroot00000000000000from distutils.core import setup from Cython.Build import cythonize setup( name = "indexing", ext_modules = cythonize('indexing.pyx'), script_name = 'setup.py', script_args = ['build_ext', '--inplace'] ) import indexing import numpy as np print(indexing.indexing()) pythran-0.10.0+ds2/pythran/tests/cython/setup_tax.py000066400000000000000000000004171416264035500224720ustar00rootroot00000000000000from distutils.core import setup from Cython.Build import cythonize setup( name = "tax", ext_modules = cythonize('tax.pyx'), script_name = 'setup.py', script_args = ['build_ext', '--inplace'] ) import tax import numpy as np print(tax.tax(np.ones(10))) pythran-0.10.0+ds2/pythran/tests/cython/tax.pyx000066400000000000000000000011161416264035500214370ustar00rootroot00000000000000# cython: language=c++ # cython: np_pythran=True import numpy as np cimport numpy as cnp def tax(cnp.ndarray[double, ndim=1] d): tax_seg1 = d[(d > 256303)] * 0.45 - 16164.53 tax_seg2 = d[(d > 54057) & (d <= 256303)] * 0.42 - 8475.44 seg3 = d[(d > 13769) & (d <= 54057)] - 13769 seg4 = d[(d > 8820) & (d <= 13769)] - 8820 prog_seg3 = seg3 * 0.0000022376 + 0.2397 prog_seg4 = seg4 * 0.0000100727 + 0.14 return ( np.sum(tax_seg1) + np.sum(tax_seg2) + np.sum(seg3 * prog_seg3 + 939.57) + np.sum(seg4 * prog_seg4) ) / np.sum(d) pythran-0.10.0+ds2/pythran/tests/euler/000077500000000000000000000000001416264035500177125ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/tests/euler/README000066400000000000000000000014231416264035500205720ustar00rootroot00000000000000All this files are taken from http://www.s-anand.net/euler.html. The only sensible modification is that they have been turned into function with no parameters returning a value instead of printing something. Special cases are listed below: euler07: manual import prime expansion euler11: manual tuple to list conversion euler12: manual import prime expansion euler18: manual tuple to list conversion euler27: manual import prime expansion euler31: manual tuple to list conversion euler32: manual import combinatorics expansion euler34: manual tuple to list conversion euler38: turn polymorphic variable into monomorphic variable euler47: when importing factros from prime, rename it as prime_factor to prevent name clashing euler55: turn polymorphic variable into monomorphic variable pythran-0.10.0+ds2/pythran/tests/euler/euler01.py000066400000000000000000000005771416264035500215520ustar00rootroot00000000000000#pythran export solve(int) #runas solve(1000) def solve(max): ''' If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23. Find the sum of all the multiples of 3 or 5 below 1000. ''' n = 0 for i in range(1, max): if not i % 5 or not i % 3: n = n + i return n pythran-0.10.0+ds2/pythran/tests/euler/euler02.py000066400000000000000000000011631416264035500215430ustar00rootroot00000000000000#runas solve(4000000) #pythran export solve(int) def solve(max): ''' Each new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 1 and 2, the first 10 terms will be: 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ... Find the sum of all the even-valued terms in the sequence which do not exceed four million. ''' cache = {} def fib(n): cache[n] = cache.get(n, 0) or (n <= 1 and 1 or fib(n-1) + fib(n-2)) return cache[n] n = 0 i = 0 while fib(i) <= max: if not fib(i) % 2: n = n + fib(i) i = i + 1 return n pythran-0.10.0+ds2/pythran/tests/euler/euler04.py000066400000000000000000000010731416264035500215450ustar00rootroot00000000000000#pythran export solve(int) #runas solve(3) def solve(digit): ''' A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 x 99. Find the largest palindrome made from the product of two 3-digit numbers. ''' n = 0 for a in range(10 ** digit - 1, 10 ** (digit - 1), -1): for b in range(a, 10 ** (digit - 1), -1): x = a * b if x > n: s = str(a * b) if s == s[::-1]: n = a * b return n pythran-0.10.0+ds2/pythran/tests/euler/euler05.py000066400000000000000000000007671416264035500215570ustar00rootroot00000000000000# pythran export solve(int, int) # runas solve(1, 10) def solve(start, end): """ 2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder. What is the smallest number that is evenly divisible by all of the numbers from 1 to 20? """ def gcd(a, b): return b and gcd(b, a % b) or a def lcm(a, b): return a * b / gcd(a, b) n = 1 for i in range(start, end + 1): n = lcm(n, i) return n pythran-0.10.0+ds2/pythran/tests/euler/euler06.py000066400000000000000000000011551416264035500215500ustar00rootroot00000000000000#runas solve(100) #pythran export solve(int) def solve(max): ''' The sum of the squares of the first ten natural numbers is, 1^2 + 2^2 + ... + 10^2 = 385 The square of the sum of the first ten natural numbers is, (1 + 2 + ... + 10)^2 = 552 = 3025 Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025 - 385 = 2640. Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum. ''' r = range(1, max + 1) a = sum(r) return a * a - sum(i*i for i in r) pythran-0.10.0+ds2/pythran/tests/euler/euler07.py000066400000000000000000000024521416264035500215520ustar00rootroot00000000000000#pythran export solve(int) #runas solve(10001) def solve(p): ''' By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13. What is the 10001st prime number? ''' prime_list = [2, 3, 5, 7, 11, 13, 17, 19, 23] # Ensure that this is initialised with at least 1 prime prime_dict = dict.fromkeys(prime_list, 1) def _isprime(n): ''' Raw check to see if n is prime. Assumes that prime_list is already populated ''' isprime = n >= 2 and 1 or 0 for prime in prime_list: # Check for factors with all primes if prime * prime > n: break # ... up to sqrt(n) if not n % prime: isprime = 0 break if isprime: prime_dict[n] = 1 # Maintain a dictionary for fast lookup return isprime def prime(x): ''' Returns the xth prime ''' lastn = prime_list[-1] while len(prime_list) <= x: # Keep working until we've got the xth prime lastn = lastn + 1 # Check the next number if _isprime(lastn): prime_list.append(lastn) # Maintain a list for sequential access return prime_list[x] return prime(p - 1) pythran-0.10.0+ds2/pythran/tests/euler/euler08.py000066400000000000000000000045771416264035500215650ustar00rootroot00000000000000#runas solve(5) #pythran export solve(int) def solve(cons): ''' Find the greatest product of five consecutive digits in the 1000-digit number. 73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 85861560789112949495459501737958331952853208805511 12540698747158523863050715693290963295227443043557 66896648950445244523161731856403098711121722383113 62229893423380308135336276614282806444486645238749 30358907296290491560440772390713810515859307960866 70172427121883998797908792274921901699720888093776 65727333001053367881220235421809751254540594752243 52584907711670556013604839586446706324415722155397 53697817977846174064955149290862569321978468622482 83972241375657056057490261407972968652414535100474 82166370484403199890008895243450658541227588666881 16427171479924442928230863465674813919123162824586 17866458359124566529476545682848912883142607690042 24219022671055626321111109370544217506941658960408 07198403850962455444362981230987879927244284909188 84580156166097919133875499200524063689912560717606 05886116467109405077541002256983155200055935729725 71636269561882670428252483600823257530420752963450 ''' s = '7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450' n = 0 for i in range(0, len(s)-4): p = 1 for j in range(i,i+cons): p = p * int(s[j]) if p > n: n = p return n pythran-0.10.0+ds2/pythran/tests/euler/euler09.py000066400000000000000000000007741416264035500215610ustar00rootroot00000000000000#pythran export solve(int) #runas solve(1000) def solve(v): ''' A Pythagorean triplet is a set of three natural numbers, a b c, for which, a^2 + b^2 = c^2 For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2. There exists exactly one Pythagorean triplet for which a + b + c = 1000. Find the product abc. ''' for a in range(1, v): for b in range(a, v): c = v - a - b if c > 0: if c*c == a*a + b*b: return a*b*c pythran-0.10.0+ds2/pythran/tests/euler/euler11.py000066400000000000000000000044751416264035500215540ustar00rootroot00000000000000#runas solve(4) #pythran export solve(int) def solve(adj): nums = [ [ 8, 2,22,97,38,15, 0,40, 0,75, 4, 5, 7,78,52,12,50,77,91, 8,], [49,49,99,40,17,81,18,57,60,87,17,40,98,43,69,48, 4,56,62, 0,], [81,49,31,73,55,79,14,29,93,71,40,67,53,88,30, 3,49,13,36,65,], [52,70,95,23, 4,60,11,42,69,24,68,56, 1,32,56,71,37, 2,36,91,], [22,31,16,71,51,67,63,89,41,92,36,54,22,40,40,28,66,33,13,80,], [24,47,32,60,99, 3,45, 2,44,75,33,53,78,36,84,20,35,17,12,50,], [32,98,81,28,64,23,67,10,26,38,40,67,59,54,70,66,18,38,64,70,], [67,26,20,68, 2,62,12,20,95,63,94,39,63, 8,40,91,66,49,94,21,], [24,55,58, 5,66,73,99,26,97,17,78,78,96,83,14,88,34,89,63,72,], [21,36,23, 9,75, 0,76,44,20,45,35,14, 0,61,33,97,34,31,33,95,], [78,17,53,28,22,75,31,67,15,94, 3,80, 4,62,16,14, 9,53,56,92,], [16,39, 5,42,96,35,31,47,55,58,88,24, 0,17,54,24,36,29,85,57,], [86,56, 0,48,35,71,89, 7, 5,44,44,37,44,60,21,58,51,54,17,58,], [19,80,81,68, 5,94,47,69,28,73,92,13,86,52,17,77, 4,89,55,40,], [ 4,52, 8,83,97,35,99,16, 7,97,57,32,16,26,26,79,33,27,98,66,], [88,36,68,87,57,62,20,72, 3,46,33,67,46,55,12,32,63,93,53,69,], [ 4,42,16,73,38,25,39,11,24,94,72,18, 8,46,29,32,40,62,76,36,], [20,69,36,41,72,30,23,88,34,62,99,69,82,67,59,85,74, 4,36,16,], [20,73,35,29,78,31,90, 1,74,31,49,71,48,86,81,16,23,57, 5,54,], [ 1,70,54,71,83,51,54,69,16,92,33,48,61,43,52, 1,89,19,67,48,], ] def seqs(nums, row, col): if row + adj <= len(nums): yield list(nums[i][col] for i in range(row, row+adj)) if col + adj <= len(nums[row]): yield list(nums[row][i] for i in range(col, col+adj)) if row + adj <= len(nums) and col + adj <= len(nums[row]):yield list(nums[row+i][col+i] for i in range(0,adj)) if row + adj <= len(nums) and col >= adj - 1: yield list(nums[row+i][col-i] for i in range(0,adj)) def product(seq): n = 1 for x in seq: n = n * x return n def list_seqs(nums): for row in range(0, len(nums)): for col in range(0, len(nums[row])): for seq in seqs(nums, row, col): yield seq return max(product(seq) for seq in list_seqs(nums)) pythran-0.10.0+ds2/pythran/tests/euler/euler12.py000066400000000000000000000031031416264035500215400ustar00rootroot00000000000000#runas solve(500) #pythran export solve(int) def solve(nfact): prime_list = [2, 3, 5, 7, 11, 13, 17, 19, 23] # Ensure that this is initialised with at least 1 prime prime_dict = dict.fromkeys(prime_list, 1) def _isprime(n): ''' Raw check to see if n is prime. Assumes that prime_list is already populated ''' isprime = n >= 2 and 1 or 0 for prime in prime_list: # Check for factors with all primes if prime * prime > n: break # ... up to sqrt(n) if not n % prime: isprime = 0 break if isprime: prime_dict[n] = 1 # Maintain a dictionary for fast lookup return isprime def prime(x): ''' Returns the xth prime ''' lastn = prime_list[-1] while len(prime_list) <= x: # Keep working until we've got the xth prime lastn = lastn + 1 # Check the next number if _isprime(lastn): prime_list.append(lastn) # Maintain a list for sequential access return prime_list[x] def num_factors(n): ''' Returns the number of factors of n, including 1 and n ''' div = 1 x = 0 while n > 1: c = 1 while not n % prime(x): c = c + 1 n = n // prime(x) x = x + 1 div = div * c return div for i in range(1, 1000000000): n = i * (i+1) // 2 if num_factors(n) > nfact: return n break pythran-0.10.0+ds2/pythran/tests/euler/euler17.py000066400000000000000000000035531416264035500215560ustar00rootroot00000000000000#runas solve(1, 1000) #pythran export solve(int, int) #FIXME unittest.skip conflicting name for end def solve(start, end): ''' How many letters would be needed to write all the numbers in words from 1 to 1000? ''' words = [ ( 1, 'one' , '' ), ( 2, 'two' , '' ), ( 3, 'three' , '' ), ( 4, 'four' , '' ), ( 5, 'five' , '' ), ( 6, 'six' , '' ), ( 7, 'seven' , '' ), ( 8, 'eight' , '' ), ( 9, 'nine' , '' ), ( 10, 'ten' , '' ), ( 11, 'eleven' , '' ), ( 12, 'twelve' , '' ), ( 13, 'thirteen' , '' ), ( 14, 'fourteen' , '' ), ( 15, 'fifteen' , '' ), ( 16, 'sixteen' , '' ), ( 17, 'seventeen', '' ), ( 18, 'eighteen' , '' ), ( 19, 'nineteen' , '' ), ( 20, 'twenty' , '' ), ( 30, 'thirty' , '' ), ( 40, 'forty' , '' ), ( 50, 'fifty' , '' ), ( 60, 'sixty' , '' ), ( 70, 'seventy' , '' ), ( 80, 'eighty' , '' ), ( 90, 'ninety' , '' ), ( 100, 'hundred' , 'and' ), (1000, 'thousand' , 'and' ), ] words.reverse() def spell(n, words): word = [] while n > 0: for num in words: if num[0] <= n: div = n / num[0] n = n % num[0] if num[2]: word.append(' '.join(spell(div, words))) word.append(num[1]) if num[2] and n: word.append(num[2]) break return word return sum(len(word) for n in range(start, end + 1) for word in spell(n, words)) pythran-0.10.0+ds2/pythran/tests/euler/euler18.py000066400000000000000000000046271416264035500215620ustar00rootroot00000000000000#runas solve(16384) #pythran export solve(int) def solve(max_route): ''' By starting at the top of the triangle below and moving to adjacent numbers on the row below, the maximum total from top to bottom is 23. 3 7 5 2 4 6 8 5 9 3 That is, 3 + 7 + 4 + 9 = 23. Find the maximum total from top to bottom of the triangle below: 75 95 64 17 47 82 18 35 87 10 20 04 82 47 65 19 01 23 75 03 34 88 02 77 73 07 63 67 99 65 04 28 06 16 70 92 41 41 26 56 83 40 80 70 33 41 48 72 33 47 32 37 16 94 29 53 71 44 65 25 43 91 52 97 51 14 70 11 33 28 77 73 17 78 39 68 17 57 91 71 52 38 17 14 91 43 58 50 27 29 48 63 66 04 68 89 53 67 30 73 16 69 87 40 31 04 62 98 27 23 09 70 98 73 93 38 53 60 04 23 NOTE: As there are only 16384 routes, it is possible to solve this problem by trying every route. However, Problem 67, is the same challenge with a triangle containing one-hundred rows; it cannot be solved by brute force, and requires a clever method! ;o) ''' triangle = [ [75, ], [95, 64, ], [17, 47, 82, ], [18, 35, 87, 10, ], [20, 4, 82, 47, 65, ], [19, 1, 23, 75, 3, 34, ], [88, 2, 77, 73, 7, 63, 67, ], [99, 65, 4, 28, 6, 16, 70, 92, ], [41, 41, 26, 56, 83, 40, 80, 70, 33, ], [41, 48, 72, 33, 47, 32, 37, 16, 94, 29, ], [53, 71, 44, 65, 25, 43, 91, 52, 97, 51, 14, ], [70, 11, 33, 28, 77, 73, 17, 78, 39, 68, 17, 57, ], [91, 71, 52, 38, 17, 14, 91, 43, 58, 50, 27, 29, 48, ], [63, 66, 4, 68, 89, 53, 67, 30, 73, 16, 69, 87, 40, 31, ], [ 4, 62, 98, 27, 23, 9, 70, 98, 73, 93, 38, 53, 60, 4, 23, ], ] def path(triangle, num): s = triangle[0][0] col = 0 for row in range(1, len(triangle)): if num % 2: col = col + 1 num = num / 2 s = s + triangle[row][col] return s return max(path(triangle, n) for n in range(0, max_route)) pythran-0.10.0+ds2/pythran/tests/euler/euler19.py000066400000000000000000000016061416264035500215550ustar00rootroot00000000000000#runas solve() #unittest.skip date time not supported #pythran export solve() def solve(): ''' You are given the following information, but you may prefer to do some research for yourself. 1 Jan 1900 was a Monday. Thirty days has September, April, June and November. All the rest have thirty-one, Saving February alone, Which has twenty-eight, rain or shine. And on leap years, twenty-nine. A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400. How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)? ''' import datetime sundays = 0 for year in range(1901, 2001): for month in range(1, 13): d = datetime.date(year, month, 1) if d.weekday() == 6: sundays = sundays + 1 return sundays pythran-0.10.0+ds2/pythran/tests/euler/euler21.py000066400000000000000000000013641416264035500215470ustar00rootroot00000000000000#runas solve(10000) #pythran export solve(int) ''' Let d(n) be defined as the sum of proper divisors of n (numbers less than n which divide evenly into n). If d(a) = b and d(b) = a, where a b, then a and b are an amicable pair and each of a and b are called amicable numbers. For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220. Evaluate the sum of all the amicable numbers under 10000. ''' def divisors(n): return list(i for i in range(1, n//2+1) if n % i == 0) def solve(m): pair = dict( ((n, sum(divisors(n))) for n in range(1, m)) ) return sum(n for n in range(1, m) if pair.get(pair[n], 0) == n and pair[n] != n) pythran-0.10.0+ds2/pythran/tests/euler/euler22.py000066400000000000000000000015361416264035500215510ustar00rootroot00000000000000#runas solve() #pythran export solve() ''' Using names.txt (right click and 'Save Link/Target As...'), a 46K text file containing over five-thousand first names, begin by sorting it into alphabetical order. Then working out the alphabetical value for each name, multiply this value by its alphabetical position in the list to obtain a name score. For example, when the list is sorted into alphabetical order, COLIN, which is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So, COLIN would obtain a score of 938 x 53 = 49714. What is the total of all the name scores in the file? ''' def worth(name): return sum(ord(letter) - ord('A') + 1 for letter in name) def solve(): names = open('pythran/tests/euler/names22.txt').read().replace('"', '').split(',') names.sort() return sum((i+1) * worth(names[i]) for i in range(0, len(names))) pythran-0.10.0+ds2/pythran/tests/euler/euler23.py000066400000000000000000000067651416264035500215630ustar00rootroot00000000000000#runas solve() #unittest.skip recursive generators #pythran export solve() ''' A perfect number is a number for which the sum of its proper divisors is exactly equal to the number. For example, the sum of the proper divisors of 28 would be 1 + 2 + 4 + 7 + 14 = 28, which means that 28 is a perfect number. A number whose proper divisors are less than the number is called deficient and a number whose proper divisors exceed the number is called abundant. As 12 is the smallest abundant number, 1 + 2 + 3 + 4 + 6 = 16, the smallest number that can be written as the sum of two abundant numbers is 24. By mathematical analysis, it can be shown that all integers greater than 28123 can be written as the sum of two abundant numbers. However, this upper limit cannot be reduced any further by analysis even though it is known that the greatest number that cannot be expressed as the sum of two abundant numbers is less than this limit. Find the sum of all the positive integers which cannot be written as the sum of two abundant numbers. ''' def solve(): prime_list = [2, 3, 5, 7, 11, 13, 17, 19, 23] # Ensure that this is initialised with at least 1 prime prime_dict = dict.fromkeys(prime_list, 1) lastn = prime_list[-1] def _isprime(n): ''' Raw check to see if n is prime. Assumes that prime_list is already populated ''' isprime = n >= 2 and 1 or 0 for prime in prime_list: # Check for factors with all primes if prime * prime > n: break # ... up to sqrt(n) if not n % prime: isprime = 0 break if isprime: prime_dict[n] = 1 # Maintain a dictionary for fast lookup return isprime def _refresh(x): ''' Refreshes primes upto x ''' lastn = prime_list[-1] while lastn <= x: # Keep working until we've got up to x lastn = lastn + 1 # Check the next number if _isprime(lastn): prime_list.append(lastn) # Maintain a list for sequential access def factors(n): ''' Returns a prime factors of n as a list ''' _refresh(n) x, xp, f = 0, prime_list[0], [] while xp <= n: if not n % xp: f.append(xp) n = n / xp else: x = x + 1 xp = prime_list[x] return f def all_factors(n): ''' Returns all factors of n, including 1 and n ''' f = factors(n) elts = sorted(set(f)) numelts = len(elts) def gen_inner(i): if i >= numelts: yield 1 return thiselt = elts[i] thismax = f.count(thiselt) powers = [1] for j in range(thismax): powers.append(powers[-1] * thiselt) for d in gen_inner(i+1): for prime_power in powers: yield prime_power * d for d in gen_inner(0): yield d MAX = 28124 _refresh(MAX/2) abundants = [n for n in range(1, MAX) if sum(all_factors(n)) > n+n] abundants_dict = dict.fromkeys(abundants, 1) total = 0 for n in range(1, MAX): sum_of_abundants = 0 for a in abundants: if a > n: break if abundants_dict.get(n - a): sum_of_abundants = 1 break if not sum_of_abundants: total = total + n return total pythran-0.10.0+ds2/pythran/tests/euler/euler24.py000066400000000000000000000016161416264035500215520ustar00rootroot00000000000000#runas solve(1000000) #pythran export solve(int) ''' A permutation is an ordered arrangement of objects. For example, 3124 is one possible permutation of the digits 1, 2, 3 and 4. If all of the permutations are listed numerically or alphabetically, we call it lexicographic order. The lexicographic permutations of 0, 1 and 2 are: 012 021 102 120 201 210 What is the millionth lexicographic permutation of the digits 0, 1, 2, 3, 4, 5, 6, 7, 8 and 9? ''' def fact(n): f = 1 for x in range(1, n+1): f = f * x return f def permutation(orig_nums, n): nums = list(orig_nums) perm = [] while len(nums): divider = fact(len(nums)-1) pos = n // divider n = n % divider perm.append(nums[pos]) nums = nums[0:pos] + nums[pos+1:] return perm def solve(perm): return ''.join(str(x) for x in permutation(range(0,10), perm - 1)) pythran-0.10.0+ds2/pythran/tests/euler/euler25.py000066400000000000000000000005241416264035500215500ustar00rootroot00000000000000#runas solve(1000) #pythran export solve(int) ''' What is the first term in the Fibonacci sequence to contain 1000 digits ''' import math def solve(digit): phi = (1 + pow(5, 0.5)) / 2 c = math.log10(5) / 2 logphi = math.log10(phi) n = 1 while True: if n * logphi - c >= digit - 1: return n break n = n + 1 pythran-0.10.0+ds2/pythran/tests/euler/euler27.py000066400000000000000000000052211416264035500215510ustar00rootroot00000000000000#runas solve(1000) #pythran export solve(int) ''' Euler published the remarkable quadratic formula: n^2 + n + 41 It turns out that the formula will produce 40 primes for the consecutive values n = 0 to 39. However, when n = 40, 402 + 40 + 41 = 40(40 + 1) + 41 is divisible by 41, and certainly when n = 41, 41^2 + 41 + 41 is clearly divisible by 41. Using computers, the incredible formula n^2 - 79n + 1601 was discovered, which produces 80 primes for the consecutive values n = 0 to 79. The product of the coefficients, -79 and 1601, is -126479. Considering quadratics of the form: n^2 + an + b, where |a| <= 1000 and |b| <= 1000 where |n| is the modulus/absolute value of n e.g. |11| = 11 and |4| = 4 Find the product of the coefficients, a and b, for the quadratic expression that produces the maximum number of primes for consecutive values of n, starting with n = 0. ''' def solve(edge): prime_list = [2, 3, 5, 7, 11, 13, 17, 19, 23] # Ensure that this is initialised with at least 1 prime prime_dict = dict.fromkeys(prime_list, 1) def _isprime(n): ''' Raw check to see if n is prime. Assumes that prime_list is already populated ''' isprime = n >= 2 and 1 or 0 for prime in prime_list: # Check for factors with all primes if prime * prime > n: break # ... up to sqrt(n) if not n % prime: isprime = 0 break if isprime: prime_dict[n] = 1 # Maintain a dictionary for fast lookup return isprime def _refresh(x): ''' Refreshes primes upto x ''' lastn = prime_list[-1] while lastn <= x: # Keep working until we've got up to x lastn = lastn + 1 # Check the next number if _isprime(lastn): prime_list.append(lastn) # Maintain a list for sequential access def prime(x): ''' Returns the xth prime ''' lastn = prime_list[-1] while len(prime_list) <= x: # Keep working until we've got the xth prime lastn = lastn + 1 # Check the next number if _isprime(lastn): prime_list.append(lastn) # Maintain a list for sequential access return prime_list[x] max_pair = (0,0,0) for a in range(-1 * edge + 1, edge): for b in range(max(2, 1-a), edge): # b >= 2, a + b + 1 >= 2 n, count = 0, 0 while True: v = n*n + a*n + b _refresh(v) if _isprime(v): count = count + 1 else: break n = n + 1 if count > max_pair[2]: max_pair = (a,b,count) return max_pair[0] * max_pair[1] pythran-0.10.0+ds2/pythran/tests/euler/euler30.py000066400000000000000000000012341416264035500215430ustar00rootroot00000000000000#runas solve(5) #pythran export solve(int) ''' Surprisingly there are only three numbers that can be written as the sum of fourth powers of their digits: 1634 = 1^4 + 6^4 + 3^4 + 4^4 8208 = 8^4 + 2^4 + 0^4 + 8^4 9474 = 9^4 + 4^4 + 7^4 + 4^4 As 1 = 1^4 is not a sum it is not included. The sum of these numbers is 1634 + 8208 + 9474 = 19316. Find the sum of all the numbers that can be written as the sum of fifth powers of their digits. ''' def power_of_digits(n, p): s = 0 while n > 0: d = n % 10 n = n // 10 s = s + pow(d, p) return s def solve(p): return sum(n for n in range(2, 200000) if power_of_digits(n, p) == n) pythran-0.10.0+ds2/pythran/tests/euler/euler31.py000066400000000000000000000017731416264035500215540ustar00rootroot00000000000000#runas solve() #unittest.skip recursive generator #pythran export solve() def solve(): ''' In England the currency is made up of pound, P, and pence, p, and there are eight coins in general circulation: 1p, 2p, 5p, 10p, 20p, 50p, P1 (100p) and P2 (200p). It is possible to make P2 in the following way: 1 P1 + 1 50p + 2 20p + 1 5p + 1 2p + 3 1p How many different ways can P2 be made using any number of coins? ''' coins = [1, 2, 5, 10, 20, 50, 100, 200] def balance(pattern): return sum(coins[x]*pattern[x] for x in range(0, len(pattern))) def gen(pattern, coinnum, num): coin = coins[coinnum] for p in range(0, num/coin + 1): newpat = pattern[:coinnum] + (p,) bal = balance(newpat) if bal > num: return elif bal == num: yield newpat elif coinnum < len(coins)-1: for pat in gen(newpat, coinnum+1, num): yield pat return sum(1 for pat in gen((), 0, 200)) pythran-0.10.0+ds2/pythran/tests/euler/euler32.py000066400000000000000000000037401416264035500215510ustar00rootroot00000000000000#runas solve() #unittest.skip recursive generator #pythran export solve() ''' From O'Reilly's Python Cookbook ''' def _combinators(_handle, items, n): if n==0: yield [] return for i, item in enumerate(items): this_one = [ item ] for cc in _combinators(_handle, _handle(items, i), n-1): yield this_one + cc def combinations(items, n): ''' take n distinct items, order matters ''' def skipIthItem(items, i): return items[:i] + items[i+1:] return _combinators(skipIthItem, items, n) def uniqueCombinations(items, n): ''' take n distinct items, order is irrelevant ''' def afterIthItem(items, i): return items[i+1:] return _combinators(afterIthItem, items, n) def selections(items, n): ''' take n (not necessarily distinct) items, order matters ''' def keepAllItems(items, i): return items return _combinators(keepAllItems, items, n) def permutations(items): ''' take all items, order matters ''' return combinations(items, len(items)) def solve(): ''' The product 7254 is unusual, as the identity, 39 x 186 = 7254, containing multiplicand, multiplier, and product is 1 through 9 pandigital. Find the sum of all products whose multiplicand/multiplier/product identity can be written as a 1 through 9 pandigital. HINT: Some products can be obtained in more than one way so be sure to only include it once in your sum. ''' ''' From O'Reilly's Python Cookbook ''' def num(l): s = 0 for n in l: s = s * 10 + n return s product = {} for perm in permutations(range(1,10)): for cross in range(1,4): # Number can't be more than 4 digits for eq in range(cross+1, 6): # Result can't be less than 4 digits a = num(perm[0:cross]) b = num(perm[cross:eq]) c = num(perm[eq:9]) if a * b == c: product[c] = 1 return sum(p for p in product) pythran-0.10.0+ds2/pythran/tests/euler/euler33.py000066400000000000000000000035541416264035500215550ustar00rootroot00000000000000#runas solve(2) #pythran export solve(int) def solve(digit): ''' The fraction 49/98 is a curious fraction, as an inexperienced mathematician in attempting to simplify it may incorrectly believe that 49/98 = 4/8, which is correct, is obtained by cancelling the 9s. We shall consider fractions like, 30/50 = 3/5, to be trivial examples. There are exactly four non-trivial examples of this type of fraction, less than one in value, and containing two digits in the numerator and denominator. If the product of these four fractions is given in its lowest common terms, find the value of the denominator. ''' def fractions(): for numerator in map(str, range(10 ** (digit - 1), 10 ** digit)): for denominator in map(str, range(int(numerator)+1, 10 ** digit)): if numerator == denominator: continue if numerator[1] == denominator[1] and numerator[1] == '0': continue if numerator[0] == denominator[0] and int(numerator) * int(denominator[1]) == int(denominator) * int(numerator[1]): yield(int(numerator), int(denominator)) if numerator[0] == denominator[1] and int(numerator) * int(denominator[0]) == int(denominator) * int(numerator[1]): yield(int(numerator), int(denominator)) if numerator[1] == denominator[1] and int(numerator) * int(denominator[0]) == int(denominator) * int(numerator[0]): yield(int(numerator), int(denominator)) if numerator[1] == denominator[0] and int(numerator) * int(denominator[1]) == int(denominator) * int(numerator[0]): yield(int(numerator), int(denominator)) def gcd(a,b): return b and gcd(b, a % b) or a numerator = 1 denominator = 1 for frac in fractions(): numerator = numerator * frac[0] denominator = denominator * frac[1] g = gcd(numerator, denominator) return denominator / g pythran-0.10.0+ds2/pythran/tests/euler/euler34.py000066400000000000000000000011361416264035500215500ustar00rootroot00000000000000#runas solve() # pythran export solve() def solve(): """ 145 is a curious number, as 1! + 4! + 5! = 1 + 24 + 120 = 145. Find the sum of all numbers which are equal to the sum of the factorial of their digits. Note: as 1! = 1 and 2! = 2 are not sums they are not included. """ fact = [1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880] def sum_of_digits_factorial(n): s = 0 while n > 0: d = n % 10 s = s + fact[d] n = n // 10 return s return sum(n for n in range(10, 100000) if n == sum_of_digits_factorial(n)) pythran-0.10.0+ds2/pythran/tests/euler/euler35.py000066400000000000000000000021621416264035500215510ustar00rootroot00000000000000#runas solve(1000000) # pythran export solve(int) def solve(a): """ The number, 197, is called a circular prime because all rotations of the digits: 197, 971, and 719, are themselves prime. There are thirteen such primes below 100: 2, 3, 5, 7, 11, 13, 17, 31, 37, 71, 73, 79, and 97. How many circular primes are there below one million? """ sieve = [True] * a sieve[0] = sieve[1] = False def mark(sieve, x): for i in range(x+x, len(sieve), x): sieve[i] = False for x in range(2, int(len(sieve) ** 0.5) + 1): mark(sieve, x) def circular(n): digits = [] while n > 0: digits.insert(0, str(n % 10)) n = n // 10 for d in range(1, len(digits)): yield int(''.join(digits[d:] + digits[0:d])) count = 0 for n, p in enumerate(sieve): if p: iscircularprime = 1 for m in circular(n): if not sieve[m]: iscircularprime = 0 break if iscircularprime: count = count + 1 return count pythran-0.10.0+ds2/pythran/tests/euler/euler36.py000066400000000000000000000012741416264035500215550ustar00rootroot00000000000000#runas solve() # pythran export solve() def solve(): ''' The decimal number, 585 = 10010010012 (binary), is palindromic in both bases. Find the sum of all numbers, less than one million, which are palindromic in base 10 and base 2. (Please note that the palindromic number, in either base, may not include leading zeros.) ''' def ispalindrome(n, base): digits = [] reverse = [] while n > 0: d = str(n % base) digits.append(d) reverse.insert(0, d) n = n // base return digits == reverse return sum(n for n in range(1, 1000000) if ispalindrome(n, 10) and ispalindrome(n, 2)) pythran-0.10.0+ds2/pythran/tests/euler/euler37.py000066400000000000000000000061051416264035500215540ustar00rootroot00000000000000#runas solve() #pythran export solve() def solve(): ''' The number 3797 has an interesting property. Being prime itself, it is possible to continuously remove digits from left to right, and remain prime at each stage: 3797, 797, 97, and 7. Similarly we can work from right to left: 3797, 379, 37, and 3. Find the sum of the only eleven primes that are both truncatable from left to right and right to left. NOTE: 2, 3, 5, and 7 are not considered to be truncatable primes. ''' import math prime_list = [2, 3, 5, 7, 11, 13, 17, 19, 23] # Ensure that this is initialised with at least 1 prime prime_dict = dict.fromkeys(prime_list, 1) def _isprime(n): ''' Raw check to see if n is prime. Assumes that prime_list is already populated ''' isprime = n >= 2 and 1 or 0 for prime in prime_list: # Check for factors with all primes if prime * prime > n: break # ... up to sqrt(n) if not n % prime: isprime = 0 break if isprime: prime_dict[n] = 1 # Maintain a dictionary for fast lookup return isprime def _refresh(x): ''' Refreshes primes upto x ''' lastn = prime_list[-1] while lastn <= x: # Keep working until we've got up to x lastn = lastn + 1 # Check the next number if _isprime(lastn): prime_list.append(lastn) # Maintain a list for sequential access def prime(x): ''' Returns the xth prime ''' lastn = prime_list[-1] while len(prime_list) <= x: # Keep working until we've got the xth prime lastn = lastn + 1 # Check the next number if _isprime(lastn): prime_list.append(lastn) # Maintain a list for sequential access return prime_list[x] digits = range(0, 10) prime_digits = (2, 3, 5, 7) def num(l): s = 0 for n in l: s = s * 10 + n return s def is_left_truncatable(l): is_truncatable = 1 for size in range(1, len(l)+1): n = num(l[:size]) _refresh(int(math.sqrt(n))) if not _isprime(n): is_truncatable = 0 break return is_truncatable def is_right_truncatable(l): is_truncatable = 1 for size in range(0, len(l)): n = num(l[size:]) _refresh(int(math.sqrt(n))) if not _isprime(n): is_truncatable = 0 break return is_truncatable def gen(result, number): if len(number) > 6: return number = list(number) number.append(0) for digit in digits: number[-1] = digit if is_left_truncatable(number): if is_right_truncatable(number) and len(number) > 1: result.append(num(number)) gen(result, number) result = [] gen(result, []) return sum(result) pythran-0.10.0+ds2/pythran/tests/euler/euler38.py000066400000000000000000000021051416264035500215510ustar00rootroot00000000000000#runas solve() #pythran export solve() def solve(): ''' Take the number 192 and multiply it by each of 1, 2, and 3: 192 x 1 = 192 192 x 2 = 384 192 x 3 = 576 By concatenating each product we get the 1 to 9 pandigital, 192384576. We will call 192384576 the concatenated product of 192 and (1,2,3) The same can be achieved by starting with 9 and multiplying by 1, 2, 3, 4, and 5, giving the pandigital, 918273645, which is the concatenated product of 9 and (1,2,3,4,5). What is the largest 1 to 9 pandigital 9-digit number that can be formed as the concatenated product of an integer with (1,2, ... , n) where n > 1? ''' def get_pandigital(n): pandigital = '' for x in range(1, 10): pandigital += str(x * n) if len(pandigital) >= 9: break if len(pandigital) == 9 and sorted(dict.fromkeys(list(pandigital)).keys()) == list("123456789"): return pandigital else: return '' max = '' for n in range(1, 10000): p = get_pandigital(n) if p and p > max: max = p return max pythran-0.10.0+ds2/pythran/tests/euler/euler39.py000066400000000000000000000014131416264035500215530ustar00rootroot00000000000000#runas solve(1000) # pythran export solve(int) def solve(n): """ If p is the perimeter of a right angle triangle with integral length sides, {a,b,c}, there are exactly three solutions for p = 120. {20,48,52}, {24,45,51}, {30,40,50} For which value of p < 1000, is the number of solutions maximised? """ maxp, maxsol = 0, 0 for p in range(12, n + 1, 2): solutions = 0 # a < b < c. So a is at most 1/3 of p. b is between a and (p-a)/2 for a in range(1, p//3): a2 = a*a for b in range(a, (p-a)//2): c = p - a - b if a2 + b*b == c*c: solutions = solutions + 1 if solutions > maxsol: maxp, maxsol = p, solutions return maxp pythran-0.10.0+ds2/pythran/tests/euler/euler41.py000066400000000000000000000051301416264035500215440ustar00rootroot00000000000000#runas solve() #unittest.skip recursive generator #pythran export solve() ''' From O'Reilly's Python Cookbook ''' def _combinators(_handle, items, n): if n==0: yield [] return for i, item in enumerate(items): this_one = [ item ] for cc in _combinators(_handle, _handle(items, i), n-1): yield this_one + cc def combinations(items, n): ''' take n distinct items, order matters ''' def skipIthItem(items, i): return items[:i] + items[i+1:] return _combinators(skipIthItem, items, n) def uniqueCombinations(items, n): ''' take n distinct items, order is irrelevant ''' def afterIthItem(items, i): return items[i+1:] return _combinators(afterIthItem, items, n) def selections(items, n): ''' take n (not necessarily distinct) items, order matters ''' def keepAllItems(items, i): return items return _combinators(keepAllItems, items, n) def permutations(items): ''' take all items, order matters ''' return combinations(items, len(items)) def solve(): ''' We shall say that an n-digit number is pandigital if it makes use of all the digits 1 to n exactly once. For example, 2143 is a 4-digit pandigital and is also prime. What is the largest n-digit pandigital prime that exists? ''' prime_list = [2, 3, 5, 7, 11, 13, 17, 19, 23] # Ensure that this is initialised with at least 1 prime prime_dict = dict.fromkeys(prime_list, 1) def _isprime(n): ''' Raw check to see if n is prime. Assumes that prime_list is already populated ''' isprime = n >= 2 and 1 or 0 for prime in prime_list: # Check for factors with all primes if prime * prime > n: break # ... up to sqrt(n) if not n % prime: isprime = 0 break if isprime: prime_dict[n] = 1 # Maintain a dictionary for fast lookup return isprime def _refresh(x): ''' Refreshes primes upto x ''' lastn = prime_list[-1] while lastn <= x: # Keep working until we've got up to x lastn = lastn + 1 # Check the next number if _isprime(lastn): prime_list.append(lastn) # Maintain a list for sequential access # Pan-digital primes are 4 or 7 digits. Others divisible by 3 _refresh(2766) # sqrt(7654321) for perm in permutations(range(7, 0, -1)): num = 0 for n in perm: num = num * 10 + n if _isprime(num): return num break pythran-0.10.0+ds2/pythran/tests/euler/euler42.py000066400000000000000000000017661416264035500215600ustar00rootroot00000000000000#runas solve() #pythran export solve() def solve(): ''' The nth term of the sequence of triangle numbers is given by, t_n = 1/2 x n(n+1); so the first ten triangle numbers are: 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ... By converting each letter in a word to a number corresponding to its alphabetical position and adding these values we form a word value. For example, the word value for SKY is 19 + 11 + 25 = 55 = t_10. If the word value is a triangle number then we shall call the word a triangle word. Using words.txt (right click and 'Save Link/Target As...'), a 16K text file containing nearly two-thousand common English words, how many are triangle words? ''' def worth(word): return sum(ord(letter) - ord('A') + 1 for letter in word) words = open('pythran/tests/euler/words42.txt').read().replace('"', '').split(',') triangle_numbers = dict.fromkeys(list(n*(n+1)/2 for n in range(1, 100)), 1) return sum(1 for word in words if worth(word) in triangle_numbers) pythran-0.10.0+ds2/pythran/tests/euler/euler43.py000066400000000000000000000036141416264035500215530ustar00rootroot00000000000000#runas solve() #unittest.skip recursive generator #pythran export solve() def solve(): ''' The number, 1406357289, is a 0 to 9 pandigital number because it is made up of each of the digits 0 to 9 in some order, but it also has a rather interesting sub-string divisibility property. Let d1 be the 1st digit, d2 be the 2nd digit, and so on. In this way, we note the following: d2 d3 d4 = 406 is divisible by 2 d3 d4 d5 = 063 is divisible by 3 d4 d5 d6 = 635 is divisible by 5 d5 d6 d7 = 357 is divisible by 7 d6 d7 d8 = 572 is divisible by 11 d7 d8 d9 = 728 is divisible by 13 d8 d9 d10= 289 is divisible by 17 Find the sum of all 0 to 9 pandigital numbers with this property. ''' def _combinators(_handle, items, n): if n==0: yield [] return for i, item in enumerate(items): this_one = [ item ] for cc in _combinators(_handle, _handle(items, i), n-1): yield this_one + cc def combinations(items, n): ''' take n distinct items, order matters ''' def skipIthItem(items, i): return items[:i] + items[i+1:] return _combinators(skipIthItem, items, n) def permutations(items): ''' take all items, order matters ''' return combinations(items, len(items)) def num(l): s = 0 for n in l: s = s * 10 + n return s def subdiv(l, n): return num(l) % n == 0 total = 0 for perm in permutations((0,1,2,3,4,6,7,8,9)): perm.insert(5, 5) # d6 must be 5 if (subdiv(perm[7:10], 17) and subdiv(perm[6:9], 13) and subdiv(perm[5:8], 11) and subdiv(perm[4:7], 7) and subdiv(perm[3:6], 5) and subdiv(perm[2:5], 3) and subdiv(perm[1:4], 2)): total += num(perm) return total pythran-0.10.0+ds2/pythran/tests/euler/euler44.py000066400000000000000000000016641416264035500215570ustar00rootroot00000000000000#runas solve() #pythran export solve() def solve(): ''' Pentagonal numbers are generated by the formula, P_n=n(3n-1)/2. The first ten pentagonal numbers are: 1, 5, 12, 22, 35, 51, 70, 92, 117, 145, ... It can be seen that P_4 + P_7 = 22 + 70 = 92 = P_8. However, their difference, 70 - 22 = 48, is not pentagonal. Find the pair of pentagonal numbers, P_j and P_k, for which their sum and difference is pentagonal and D = |P_k - P_j| is minimised; what is the value of D? ''' MAX = 2000 pent = [ n * (3*n - 1) / 2 for n in range(1, 2*MAX) ] pdic = dict.fromkeys(pent) def main2(): for j in range(0, MAX): for k in range(j+1, 2*MAX-1): p_j = pent[j] p_k = pent[k] p_sum = p_j + p_k p_diff = p_k - p_j if p_sum in pdic and p_diff in pdic: return p_diff return main2() pythran-0.10.0+ds2/pythran/tests/euler/euler46.py000066400000000000000000000045311416264035500215550ustar00rootroot00000000000000#runas solve() #pythran export solve() def solve(): ''' It was proposed by Christian Goldbach that every odd composite number can be written as the sum of a prime and twice a square. 9 = 7 + 2 x 1^2 15 = 7 + 2 x 2^2 21 = 3 + 2 x 3^2 25 = 7 + 2 x 3^2 27 = 19 + 2 x 2^2 33 = 31 + 2 x 1^2 It turns out that the conjecture was false. What is the smallest odd composite that cannot be written as the sum of a prime and twice a square? ''' prime_list = [2, 3, 5, 7, 11, 13, 17, 19, 23] # Ensure that this is initialised with at least 1 prime prime_dict = dict.fromkeys(prime_list, 1) def _isprime(n): ''' Raw check to see if n is prime. Assumes that prime_list is already populated ''' isprime = n >= 2 and 1 or 0 for prime in prime_list: # Check for factors with all primes if prime * prime > n: break # ... up to sqrt(n) if not n % prime: isprime = 0 break if isprime: prime_dict[n] = 1 # Maintain a dictionary for fast lookup return isprime def _refresh(x): ''' Refreshes primes upto x ''' lastn = prime_list[-1] while lastn <= x: # Keep working until we've got up to x lastn = lastn + 1 # Check the next number if _isprime(lastn): prime_list.append(lastn) # Maintain a list for sequential access def prime(x): ''' Returns the xth prime ''' lastn = prime_list[-1] while len(prime_list) <= x: # Keep working until we've got the xth prime lastn = lastn + 1 # Check the next number if _isprime(lastn): prime_list.append(lastn) # Maintain a list for sequential access return prime_list[x] MAX = 10000 squares = dict.fromkeys((x*x for x in range(1, MAX)), 1) _refresh(MAX) for x in range(35, MAX, 2): if not _isprime(x): is_goldbach = 0 for p in prime_list[1:]: if p >= x: break if ((x - p)/2) in squares: is_goldbach = 1 break if not is_goldbach: return x break pythran-0.10.0+ds2/pythran/tests/euler/euler47.py000066400000000000000000000034461416264035500215620ustar00rootroot00000000000000#runas solve() #pythran export solve() def solve(): ''' Find the first four consecutive integers to have four distinct prime factors ''' prime_list = [2, 3, 5, 7, 11, 13, 17, 19, 23] # Ensure that this is initialised with at least 1 prime prime_dict = dict.fromkeys(prime_list, 1) def _isprime(n): ''' Raw check to see if n is prime. Assumes that prime_list is already populated ''' isprime = n >= 2 and 1 or 0 for prime in prime_list: # Check for factors with all primes if prime * prime > n: break # ... up to sqrt(n) if not n % prime: isprime = 0 break if isprime: prime_dict[n] = 1 # Maintain a dictionary for fast lookup return isprime def _refresh(x): ''' Refreshes primes upto x ''' lastn = prime_list[-1] while lastn <= x: # Keep working until we've got up to x lastn = lastn + 1 # Check the next number if _isprime(lastn): prime_list.append(lastn) # Maintain a list for sequential access def primes_factors(n): ''' Returns a prime factors of n as a list ''' _refresh(n) x, xp, f = 0, prime_list[0], [] while xp <= n: if not n % xp: f.append(xp) n = n / xp else: x = x + 1 xp = prime_list[x] return f def distinct_factors(n): return len(dict.fromkeys(primes_factors(n)).keys()) factors = [0, 1, distinct_factors(2), distinct_factors(3)] while True: if factors[-4::] == [4,4,4,4]: break else: factors.append(distinct_factors(len(factors))) return len(factors)-4 pythran-0.10.0+ds2/pythran/tests/euler/euler49.py000066400000000000000000000062231416264035500215600ustar00rootroot00000000000000#runas solve() #unittest.skip recursive generator #pythran export solve() def solve(): ''' The arithmetic sequence, 1487, 4817, 8147, in which each of the terms increases by 3330, is unusual in two ways: (i) each of the three terms are prime, and, (ii) each of the 4-digit numbers are permutations of one another. There are no arithmetic sequences made up of three 1-, 2-, or 3-digit primes, exhibiting this property, but there is one other 4-digit increasing sequence. What 12-digit number do you form by concatenating the three terms in this sequence? ''' prime_list = [2, 3, 5, 7, 11, 13, 17, 19, 23] # Ensure that this is initialised with at least 1 prime prime_dict = dict.fromkeys(prime_list, 1) lastn = prime_list[-1] def _isprime(n): ''' Raw check to see if n is prime. Assumes that prime_list is already populated ''' isprime = n >= 2 and 1 or 0 for prime in prime_list: # Check for factors with all primes if prime * prime > n: break # ... up to sqrt(n) if not n % prime: isprime = 0 break if isprime: prime_dict[n] = 1 # Maintain a dictionary for fast lookup return isprime def _refresh(x): ''' Refreshes primes upto x ''' while lastn <= x: # Keep working until we've got up to x lastn = lastn + 1 # Check the next number if _isprime(lastn): prime_list.append(lastn) def isprime(x): ''' Returns 1 if x is prime, 0 if not. Uses a pre-computed dictionary ''' _refresh(x) # Compute primes up to x (which is a bit wasteful) return prime_dict.get(x, 0) def _combinators(_handle, items, n): if n==0: yield [] return for i, item in enumerate(items): this_one = [ item ] for cc in _combinators(_handle, _handle(items, i), n-1): yield this_one + cc def combinations(items, n): ''' take n distinct items, order matters ''' def skipIthItem(items, i): return items[:i] + items[i+1:] return _combinators(skipIthItem, items, n) def permutations(items): ''' take all items, order matters ''' return combinations(items, len(items)) _refresh(10000) for num in range(1000, 10000): if str(num).find('0') >= 0: continue if isprime(num): prime_permutations = { num: 1 } for x in permutations(list(str(num))): next_num = int(''.join(x)) if isprime(next_num): prime_permutations[next_num] = 1 primes = sorted(prime_permutations.keys()) for a in range(0, len(primes)): if primes[a] == 1487: continue for b in range(a+1, len(primes)): c = (primes[a] + primes[b]) / 2 if c in prime_permutations: return str(primes[a]) + str(c) + str(primes[b]) exit() pythran-0.10.0+ds2/pythran/tests/euler/euler51.py000066400000000000000000000100031416264035500215400ustar00rootroot00000000000000#runas solve() #unittest.skip recursive generator #pythran export solve() def solve(): ''' By replacing the 1st digit of *57, it turns out that six of the possible values: 157, 257, 457, 557, 757, and 857, are all prime. By replacing the 3rd and 4th digits of 56**3 with the same digit, this 5-digit number is the first example having seven primes, yielding the family: 56003, 56113, 56333, 56443, 56663, 56773, and 56993. Consequently 56003, being the first member of this family, is the smallest prime with this property. Find the smallest prime which, by replacing part of the number (not necessarily adjacent digits) with the same digit, is part of an eight prime value family. ''' prime_list = [2, 3, 5, 7, 11, 13, 17, 19, 23] # Ensure that this is initialised with at least 1 prime prime_dict = dict.fromkeys(prime_list, 1) lastn = prime_list[-1] def _isprime(n): ''' Raw check to see if n is prime. Assumes that prime_list is already populated ''' isprime = n >= 2 and 1 or 0 for prime in prime_list: # Check for factors with all primes if prime * prime > n: break # ... up to sqrt(n) if not n % prime: isprime = 0 break if isprime: prime_dict[n] = 1 # Maintain a dictionary for fast lookup return isprime def _refresh(x): ''' Refreshes primes upto x ''' while lastn <= x: # Keep working until we've got up to x lastn = lastn + 1 # Check the next number if _isprime(lastn): prime_list.append(lastn) # Maintain a list for sequential access def prime(x): ''' Returns the xth prime ''' while len(prime_list) <= x: # Keep working until we've got the xth prime lastn = lastn + 1 # Check the next number if _isprime(lastn): prime_list.append(lastn) # Maintain a list for sequential access return prime_list[x] def isprime(x): ''' Returns 1 if x is prime, 0 if not. Uses a pre-computed dictionary ''' _refresh(x) # Compute primes up to x (which is a bit wasteful) return prime_dict.get(x, 0) def _combinators(_handle, items, n): if n==0: yield [] return for i, item in enumerate(items): this_one = [ item ] for cc in _combinators(_handle, _handle(items, i), n-1): yield this_one + cc def uniqueCombinations(items, n): ''' take n distinct items, order is irrelevant ''' def afterIthItem(items, i): return items[i+1:] return _combinators(afterIthItem, items, n) cache = {} def prime_family_length(n, digits): if (n, digits) in cache: return cache[n, digits] num, nums, count = list(str(n)), [], 0 if len(dict.fromkeys(num[d] for d in digits).keys()) > 1: return cache.setdefault((n, digits), 0) # The digits must have the same number for d in range(0 in digits and 1 or 0, 10): # Ensure 0 is not the first digit for x in digits: num[x] = str(d) n = int(''.join(num)) if prime.isprime(n): count += 1 nums.append(n) for n in nums: cache[n, digits] = count return count prime._refresh(100000) n, max, max_count, combos = 10, 0, 0, {} while max_count < 8: p = prime.prime(n) digits = range(0, len(str(p))) for size in range(1, len(digits)): patterns = combos.setdefault((len(digits), size), tuple(tuple(sorted(p)) for p in uniqueCombinations(digits, size))) for pat in patterns: count = prime_family_length(p, pat) if count > max_count: max, max_count = p, count n += 1 return p pythran-0.10.0+ds2/pythran/tests/euler/euler52.py000066400000000000000000000013351416264035500215510ustar00rootroot00000000000000#runas solve() #pythran export solve() def solve(): ''' It can be seen that the number, 125874, and its double, 251748, contain exactly the same digits, but in a different order. Find the smallest positive integer, x, such that 2x, 3x, 4x, 5x, and 6x, contain the same digits. ''' def multiples_have_same_digits(n): digit_keys = dict.fromkeys(list(str(n))) for x in range(2, 4): for d in list(str(x * n)): if d not in digit_keys: return False return True n = 0 while True: n = n + 9 # n must be a multiple of 9 for this to happen if multiples_have_same_digits(n): return n break pythran-0.10.0+ds2/pythran/tests/euler/euler54.py000066400000000000000000000117441416264035500215600ustar00rootroot00000000000000#runas solve() #unittest.skip type can't be deducte #pythran export solve() def solve(): ''' In the card game poker, a hand consists of five cards and are ranked, from lowest to highest, in the following way: High Card: Highest value card. One Pair: Two cards of the same value. Two Pairs: Two different pairs. Three of a Kind: Three cards of the same value. Straight: All cards are consecutive values. Flush: All cards of the same suit. Full House: Three of a kind and a pair. Four of a Kind: Four cards of the same value. Straight Flush: All cards are consecutive values of same suit. Royal Flush: Ten, Jack, Queen, King, Ace, in same suit. The cards are valued in the order: 2, 3, 4, 5, 6, 7, 8, 9, 10, Jack, Queen, King, Ace. If two players have the same ranked hands then the rank made up of the highest value wins; for example, a pair of eights beats a pair of fives (see example 1 below). But if two ranks tie, for example, both players have a pair of queens, then highest cards in each hand are compared (see example 4 below); if the highest cards tie then the next highest cards are compared, and so on. Consider the following five hands dealt to two players: Hand Player 1 Player 2 Winner 1 5H 5C 6S 7S KD 2C 3S 8S 8D TD Player 2 Pair of Fives Pair of Eights 2 5D 8C 9S JS AC 2C 5C 7D 8S QH Player 1 Highest card Ace Highest card Queen 3 2D 9C AS AH AC 3D 6D 7D TD QD Player 2 Three Aces Flush with Diamonds 4 4D 6S 9H QH QC 3D 6D 7H QD QS Player 1 Pair of Queens Pair of Queens Highest card Nine Highest card Seven 5 2H 2D 4C 4D 4S 3C 3D 3S 9S 9D Player 1 Full House Full House With Three Fours with Three Threes The file, poker.txt, contains one-thousand random hands dealt to two players. Each line of the file contains ten cards (separated by a single space): the first five are Player 1's cards and the last five are Player 2's cards. You can assume that all hands are valid (no invalid characters or repeated cards), each player's hand is in no specific order, and in each hand there is a clear winner. How many hands does Player 1 win? ''' value = { '2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9,'T':10,'J':11,'Q':12,'K':13,'A':14 } all_kinds = tuple(reversed(sorted(value.values()))) all_suits = list('DCSH') def make_hand(cards): hand = {} for card in cards: hand.setdefault(value[card[0]], {})[card[1]] = 1 hand.setdefault(card[1], {})[value[card[0]]] = 1 return hand def get(hash, arr): return ((i, hash.get(i, {})) for i in arr) def has(hash, arr): return not sum(1 for i in arr if i not in hash) def rank(hand): # Royal flush for suit, kinds in get(hand, all_suits): if has(kinds, tuple('TJQKA')): return (9,0,0) # Straight flush for suit, kinds in get(hand, all_suits): kinds = sorted(kind for kind in kinds.keys()) if len(kinds) == 5 and kinds[4] - kinds[0] == 4: return (8, kinds[0],0) # Four of a kind for kind, suits in get(hand, all_kinds): if len(suits.keys()) == 4: return (7, kind,0) # Full house for kind, suits in get(hand, all_kinds): if len(suits.keys()) == 3: for kind2, suits2 in get(hand, all_kinds): if len(suits2.keys()) == 2: return (6, kind, kind2) # Flush for suit, kinds in get(hand, all_suits): if len(kinds.keys()) == 5: return (5,0,0) # Straight kinds = sorted(kind for kind in all_kinds if kind in hand) if len(kinds) == 5 and kinds[4] - kinds[0] == 4: return (4, kinds[0],0) # Three of a kind for kind, suits in get(hand, all_kinds): if len(suits.keys()) == 3: return (3, kind,0) # Two pairs for kind, suits in get(hand, all_kinds): if len(suits.keys()) == 2: for kind2, suits2 in get(hand, all_kinds): if kind != kind2 and len(suits2.keys()) == 2: return (2, kind, kind2) # One pair for kind, suits in get(hand, all_kinds): if len(suits.keys()) == 2: return (1, kind,0) for kind in all_kinds: if kind in hand: return (0, kind,0) return (0,0,0) count = 0 for hand in open('poker.txt'): hands = hand.split(' ') p1, p2 = make_hand(hands[0:5]), make_hand(hands[5:10]) v1, v2 = rank(p1), rank(p2) if v1 > v2: count += 1 return count pythran-0.10.0+ds2/pythran/tests/euler/euler58.py000066400000000000000000000043511416264035500215600ustar00rootroot00000000000000#runas solve() #pythran export solve() def solve(): ''' Starting with 1 and spiralling anticlockwise in the following way, a square spiral with side length 7 is formed. 37 36 35 34 33 32 31 38 17 16 15 14 13 30 39 18 5 4 3 12 29 40 19 6 1 2 11 28 41 20 7 8 9 10 27 42 21 22 23 24 25 26 43 44 45 46 47 48 49 It is interesting to note that the odd squares lie along the bottom right diagonal, but what is more interesting is that 8 out of the 13 numbers lying along both diagonals are prime; that is, a ratio of 8/13 ~ 62%. If one complete new layer is wrapped around the spiral above, a square spiral with side length 9 will be formed. If this process is continued, what is the side length of the square spiral for which the ratio of primes along both diagonals first falls below 10%? ''' prime_list = [2, 3, 5, 7, 11, 13, 17, 19, 23] # Ensure that this is initialised with at least 1 prime prime_dict = dict.fromkeys(prime_list, 1) def _isprime(n): ''' Raw check to see if n is prime. Assumes that prime_list is already populated ''' isprime = n >= 2 and 1 or 0 for prime in prime_list: # Check for factors with all primes if prime * prime > n: break # ... up to sqrt(n) if not n % prime: isprime = 0 break if isprime: prime_dict[n] = 1 # Maintain a dictionary for fast lookup return isprime def _refresh(x): ''' Refreshes primes upto x ''' lastn = prime_list[-1] while lastn <= x: # Keep working until we've got up to x lastn = lastn + 1 # Check the next number if _isprime(lastn): prime_list.append(lastn) # Maintain a list for sequential access _refresh(50000) width, diagonal, base, primes = 1, 1, 1, 0 while True: width = width + 2 increment = width - 1 for i in range(0, 4): diagonal = diagonal + increment if i < 3 and _isprime(diagonal): primes += 1 base = base + 4 if primes * 10 < base: return width break pythran-0.10.0+ds2/pythran/tests/euler/names22.txt000066400000000000000000001325571416264035500217370ustar00rootroot00000000000000"MARY","PATRICIA","LINDA","BARBARA","ELIZABETH","JENNIFER","MARIA","SUSAN","MARGARET","DOROTHY","LISA","NANCY","KAREN","BETTY","HELEN","SANDRA","DONNA","CAROL","RUTH","SHARON","MICHELLE","LAURA","SARAH","KIMBERLY","DEBORAH","JESSICA","SHIRLEY","CYNTHIA","ANGELA","MELISSA","BRENDA","AMY","ANNA","REBECCA","VIRGINIA","KATHLEEN","PAMELA","MARTHA","DEBRA","AMANDA","STEPHANIE","CAROLYN","CHRISTINE","MARIE","JANET","CATHERINE","FRANCES","ANN","JOYCE","DIANE","ALICE","JULIE","HEATHER","TERESA","DORIS","GLORIA","EVELYN","JEAN","CHERYL","MILDRED","KATHERINE","JOAN","ASHLEY","JUDITH","ROSE","JANICE","KELLY","NICOLE","JUDY","CHRISTINA","KATHY","THERESA","BEVERLY","DENISE","TAMMY","IRENE","JANE","LORI","RACHEL","MARILYN","ANDREA","KATHRYN","LOUISE","SARA","ANNE","JACQUELINE","WANDA","BONNIE","JULIA","RUBY","LOIS","TINA","PHYLLIS","NORMA","PAULA","DIANA","ANNIE","LILLIAN","EMILY","ROBIN","PEGGY","CRYSTAL","GLADYS","RITA","DAWN","CONNIE","FLORENCE","TRACY","EDNA","TIFFANY","CARMEN","ROSA","CINDY","GRACE","WENDY","VICTORIA","EDITH","KIM","SHERRY","SYLVIA","JOSEPHINE","THELMA","SHANNON","SHEILA","ETHEL","ELLEN","ELAINE","MARJORIE","CARRIE","CHARLOTTE","MONICA","ESTHER","PAULINE","EMMA","JUANITA","ANITA","RHONDA","HAZEL","AMBER","EVA","DEBBIE","APRIL","LESLIE","CLARA","LUCILLE","JAMIE","JOANNE","ELEANOR","VALERIE","DANIELLE","MEGAN","ALICIA","SUZANNE","MICHELE","GAIL","BERTHA","DARLENE","VERONICA","JILL","ERIN","GERALDINE","LAUREN","CATHY","JOANN","LORRAINE","LYNN","SALLY","REGINA","ERICA","BEATRICE","DOLORES","BERNICE","AUDREY","YVONNE","ANNETTE","JUNE","SAMANTHA","MARION","DANA","STACY","ANA","RENEE","IDA","VIVIAN","ROBERTA","HOLLY","BRITTANY","MELANIE","LORETTA","YOLANDA","JEANETTE","LAURIE","KATIE","KRISTEN","VANESSA","ALMA","SUE","ELSIE","BETH","JEANNE","VICKI","CARLA","TARA","ROSEMARY","EILEEN","TERRI","GERTRUDE","LUCY","TONYA","ELLA","STACEY","WILMA","GINA","KRISTIN","JESSIE","NATALIE","AGNES","VERA","WILLIE","CHARLENE","BESSIE","DELORES","MELINDA","PEARL","ARLENE","MAUREEN","COLLEEN","ALLISON","TAMARA","JOY","GEORGIA","CONSTANCE","LILLIE","CLAUDIA","JACKIE","MARCIA","TANYA","NELLIE","MINNIE","MARLENE","HEIDI","GLENDA","LYDIA","VIOLA","COURTNEY","MARIAN","STELLA","CAROLINE","DORA","JO","VICKIE","MATTIE","TERRY","MAXINE","IRMA","MABEL","MARSHA","MYRTLE","LENA","CHRISTY","DEANNA","PATSY","HILDA","GWENDOLYN","JENNIE","NORA","MARGIE","NINA","CASSANDRA","LEAH","PENNY","KAY","PRISCILLA","NAOMI","CAROLE","BRANDY","OLGA","BILLIE","DIANNE","TRACEY","LEONA","JENNY","FELICIA","SONIA","MIRIAM","VELMA","BECKY","BOBBIE","VIOLET","KRISTINA","TONI","MISTY","MAE","SHELLY","DAISY","RAMONA","SHERRI","ERIKA","KATRINA","CLAIRE","LINDSEY","LINDSAY","GENEVA","GUADALUPE","BELINDA","MARGARITA","SHERYL","CORA","FAYE","ADA","NATASHA","SABRINA","ISABEL","MARGUERITE","HATTIE","HARRIET","MOLLY","CECILIA","KRISTI","BRANDI","BLANCHE","SANDY","ROSIE","JOANNA","IRIS","EUNICE","ANGIE","INEZ","LYNDA","MADELINE","AMELIA","ALBERTA","GENEVIEVE","MONIQUE","JODI","JANIE","MAGGIE","KAYLA","SONYA","JAN","LEE","KRISTINE","CANDACE","FANNIE","MARYANN","OPAL","ALISON","YVETTE","MELODY","LUZ","SUSIE","OLIVIA","FLORA","SHELLEY","KRISTY","MAMIE","LULA","LOLA","VERNA","BEULAH","ANTOINETTE","CANDICE","JUANA","JEANNETTE","PAM","KELLI","HANNAH","WHITNEY","BRIDGET","KARLA","CELIA","LATOYA","PATTY","SHELIA","GAYLE","DELLA","VICKY","LYNNE","SHERI","MARIANNE","KARA","JACQUELYN","ERMA","BLANCA","MYRA","LETICIA","PAT","KRISTA","ROXANNE","ANGELICA","JOHNNIE","ROBYN","FRANCIS","ADRIENNE","ROSALIE","ALEXANDRA","BROOKE","BETHANY","SADIE","BERNADETTE","TRACI","JODY","KENDRA","JASMINE","NICHOLE","RACHAEL","CHELSEA","MABLE","ERNESTINE","MURIEL","MARCELLA","ELENA","KRYSTAL","ANGELINA","NADINE","KARI","ESTELLE","DIANNA","PAULETTE","LORA","MONA","DOREEN","ROSEMARIE","ANGEL","DESIREE","ANTONIA","HOPE","GINGER","JANIS","BETSY","CHRISTIE","FREDA","MERCEDES","MEREDITH","LYNETTE","TERI","CRISTINA","EULA","LEIGH","MEGHAN","SOPHIA","ELOISE","ROCHELLE","GRETCHEN","CECELIA","RAQUEL","HENRIETTA","ALYSSA","JANA","KELLEY","GWEN","KERRY","JENNA","TRICIA","LAVERNE","OLIVE","ALEXIS","TASHA","SILVIA","ELVIRA","CASEY","DELIA","SOPHIE","KATE","PATTI","LORENA","KELLIE","SONJA","LILA","LANA","DARLA","MAY","MINDY","ESSIE","MANDY","LORENE","ELSA","JOSEFINA","JEANNIE","MIRANDA","DIXIE","LUCIA","MARTA","FAITH","LELA","JOHANNA","SHARI","CAMILLE","TAMI","SHAWNA","ELISA","EBONY","MELBA","ORA","NETTIE","TABITHA","OLLIE","JAIME","WINIFRED","KRISTIE","MARINA","ALISHA","AIMEE","RENA","MYRNA","MARLA","TAMMIE","LATASHA","BONITA","PATRICE","RONDA","SHERRIE","ADDIE","FRANCINE","DELORIS","STACIE","ADRIANA","CHERI","SHELBY","ABIGAIL","CELESTE","JEWEL","CARA","ADELE","REBEKAH","LUCINDA","DORTHY","CHRIS","EFFIE","TRINA","REBA","SHAWN","SALLIE","AURORA","LENORA","ETTA","LOTTIE","KERRI","TRISHA","NIKKI","ESTELLA","FRANCISCA","JOSIE","TRACIE","MARISSA","KARIN","BRITTNEY","JANELLE","LOURDES","LAUREL","HELENE","FERN","ELVA","CORINNE","KELSEY","INA","BETTIE","ELISABETH","AIDA","CAITLIN","INGRID","IVA","EUGENIA","CHRISTA","GOLDIE","CASSIE","MAUDE","JENIFER","THERESE","FRANKIE","DENA","LORNA","JANETTE","LATONYA","CANDY","MORGAN","CONSUELO","TAMIKA","ROSETTA","DEBORA","CHERIE","POLLY","DINA","JEWELL","FAY","JILLIAN","DOROTHEA","NELL","TRUDY","ESPERANZA","PATRICA","KIMBERLEY","SHANNA","HELENA","CAROLINA","CLEO","STEFANIE","ROSARIO","OLA","JANINE","MOLLIE","LUPE","ALISA","LOU","MARIBEL","SUSANNE","BETTE","SUSANA","ELISE","CECILE","ISABELLE","LESLEY","JOCELYN","PAIGE","JONI","RACHELLE","LEOLA","DAPHNE","ALTA","ESTER","PETRA","GRACIELA","IMOGENE","JOLENE","KEISHA","LACEY","GLENNA","GABRIELA","KERI","URSULA","LIZZIE","KIRSTEN","SHANA","ADELINE","MAYRA","JAYNE","JACLYN","GRACIE","SONDRA","CARMELA","MARISA","ROSALIND","CHARITY","TONIA","BEATRIZ","MARISOL","CLARICE","JEANINE","SHEENA","ANGELINE","FRIEDA","LILY","ROBBIE","SHAUNA","MILLIE","CLAUDETTE","CATHLEEN","ANGELIA","GABRIELLE","AUTUMN","KATHARINE","SUMMER","JODIE","STACI","LEA","CHRISTI","JIMMIE","JUSTINE","ELMA","LUELLA","MARGRET","DOMINIQUE","SOCORRO","RENE","MARTINA","MARGO","MAVIS","CALLIE","BOBBI","MARITZA","LUCILE","LEANNE","JEANNINE","DEANA","AILEEN","LORIE","LADONNA","WILLA","MANUELA","GALE","SELMA","DOLLY","SYBIL","ABBY","LARA","DALE","IVY","DEE","WINNIE","MARCY","LUISA","JERI","MAGDALENA","OFELIA","MEAGAN","AUDRA","MATILDA","LEILA","CORNELIA","BIANCA","SIMONE","BETTYE","RANDI","VIRGIE","LATISHA","BARBRA","GEORGINA","ELIZA","LEANN","BRIDGETTE","RHODA","HALEY","ADELA","NOLA","BERNADINE","FLOSSIE","ILA","GRETA","RUTHIE","NELDA","MINERVA","LILLY","TERRIE","LETHA","HILARY","ESTELA","VALARIE","BRIANNA","ROSALYN","EARLINE","CATALINA","AVA","MIA","CLARISSA","LIDIA","CORRINE","ALEXANDRIA","CONCEPCION","TIA","SHARRON","RAE","DONA","ERICKA","JAMI","ELNORA","CHANDRA","LENORE","NEVA","MARYLOU","MELISA","TABATHA","SERENA","AVIS","ALLIE","SOFIA","JEANIE","ODESSA","NANNIE","HARRIETT","LORAINE","PENELOPE","MILAGROS","EMILIA","BENITA","ALLYSON","ASHLEE","TANIA","TOMMIE","ESMERALDA","KARINA","EVE","PEARLIE","ZELMA","MALINDA","NOREEN","TAMEKA","SAUNDRA","HILLARY","AMIE","ALTHEA","ROSALINDA","JORDAN","LILIA","ALANA","GAY","CLARE","ALEJANDRA","ELINOR","MICHAEL","LORRIE","JERRI","DARCY","EARNESTINE","CARMELLA","TAYLOR","NOEMI","MARCIE","LIZA","ANNABELLE","LOUISA","EARLENE","MALLORY","CARLENE","NITA","SELENA","TANISHA","KATY","JULIANNE","JOHN","LAKISHA","EDWINA","MARICELA","MARGERY","KENYA","DOLLIE","ROXIE","ROSLYN","KATHRINE","NANETTE","CHARMAINE","LAVONNE","ILENE","KRIS","TAMMI","SUZETTE","CORINE","KAYE","JERRY","MERLE","CHRYSTAL","LINA","DEANNE","LILIAN","JULIANA","ALINE","LUANN","KASEY","MARYANNE","EVANGELINE","COLETTE","MELVA","LAWANDA","YESENIA","NADIA","MADGE","KATHIE","EDDIE","OPHELIA","VALERIA","NONA","MITZI","MARI","GEORGETTE","CLAUDINE","FRAN","ALISSA","ROSEANN","LAKEISHA","SUSANNA","REVA","DEIDRE","CHASITY","SHEREE","CARLY","JAMES","ELVIA","ALYCE","DEIRDRE","GENA","BRIANA","ARACELI","KATELYN","ROSANNE","WENDI","TESSA","BERTA","MARVA","IMELDA","MARIETTA","MARCI","LEONOR","ARLINE","SASHA","MADELYN","JANNA","JULIETTE","DEENA","AURELIA","JOSEFA","AUGUSTA","LILIANA","YOUNG","CHRISTIAN","LESSIE","AMALIA","SAVANNAH","ANASTASIA","VILMA","NATALIA","ROSELLA","LYNNETTE","CORINA","ALFREDA","LEANNA","CAREY","AMPARO","COLEEN","TAMRA","AISHA","WILDA","KARYN","CHERRY","QUEEN","MAURA","MAI","EVANGELINA","ROSANNA","HALLIE","ERNA","ENID","MARIANA","LACY","JULIET","JACKLYN","FREIDA","MADELEINE","MARA","HESTER","CATHRYN","LELIA","CASANDRA","BRIDGETT","ANGELITA","JANNIE","DIONNE","ANNMARIE","KATINA","BERYL","PHOEBE","MILLICENT","KATHERYN","DIANN","CARISSA","MARYELLEN","LIZ","LAURI","HELGA","GILDA","ADRIAN","RHEA","MARQUITA","HOLLIE","TISHA","TAMERA","ANGELIQUE","FRANCESCA","BRITNEY","KAITLIN","LOLITA","FLORINE","ROWENA","REYNA","TWILA","FANNY","JANELL","INES","CONCETTA","BERTIE","ALBA","BRIGITTE","ALYSON","VONDA","PANSY","ELBA","NOELLE","LETITIA","KITTY","DEANN","BRANDIE","LOUELLA","LETA","FELECIA","SHARLENE","LESA","BEVERLEY","ROBERT","ISABELLA","HERMINIA","TERRA","CELINA","TORI","OCTAVIA","JADE","DENICE","GERMAINE","SIERRA","MICHELL","CORTNEY","NELLY","DORETHA","SYDNEY","DEIDRA","MONIKA","LASHONDA","JUDI","CHELSEY","ANTIONETTE","MARGOT","BOBBY","ADELAIDE","NAN","LEEANN","ELISHA","DESSIE","LIBBY","KATHI","GAYLA","LATANYA","MINA","MELLISA","KIMBERLEE","JASMIN","RENAE","ZELDA","ELDA","MA","JUSTINA","GUSSIE","EMILIE","CAMILLA","ABBIE","ROCIO","KAITLYN","JESSE","EDYTHE","ASHLEIGH","SELINA","LAKESHA","GERI","ALLENE","PAMALA","MICHAELA","DAYNA","CARYN","ROSALIA","SUN","JACQULINE","REBECA","MARYBETH","KRYSTLE","IOLA","DOTTIE","BENNIE","BELLE","AUBREY","GRISELDA","ERNESTINA","ELIDA","ADRIANNE","DEMETRIA","DELMA","CHONG","JAQUELINE","DESTINY","ARLEEN","VIRGINA","RETHA","FATIMA","TILLIE","ELEANORE","CARI","TREVA","BIRDIE","WILHELMINA","ROSALEE","MAURINE","LATRICE","YONG","JENA","TARYN","ELIA","DEBBY","MAUDIE","JEANNA","DELILAH","CATRINA","SHONDA","HORTENCIA","THEODORA","TERESITA","ROBBIN","DANETTE","MARYJANE","FREDDIE","DELPHINE","BRIANNE","NILDA","DANNA","CINDI","BESS","IONA","HANNA","ARIEL","WINONA","VIDA","ROSITA","MARIANNA","WILLIAM","RACHEAL","GUILLERMINA","ELOISA","CELESTINE","CAREN","MALISSA","LONA","CHANTEL","SHELLIE","MARISELA","LEORA","AGATHA","SOLEDAD","MIGDALIA","IVETTE","CHRISTEN","ATHENA","JANEL","CHLOE","VEDA","PATTIE","TESSIE","TERA","MARILYNN","LUCRETIA","KARRIE","DINAH","DANIELA","ALECIA","ADELINA","VERNICE","SHIELA","PORTIA","MERRY","LASHAWN","DEVON","DARA","TAWANA","OMA","VERDA","CHRISTIN","ALENE","ZELLA","SANDI","RAFAELA","MAYA","KIRA","CANDIDA","ALVINA","SUZAN","SHAYLA","LYN","LETTIE","ALVA","SAMATHA","ORALIA","MATILDE","MADONNA","LARISSA","VESTA","RENITA","INDIA","DELOIS","SHANDA","PHILLIS","LORRI","ERLINDA","CRUZ","CATHRINE","BARB","ZOE","ISABELL","IONE","GISELA","CHARLIE","VALENCIA","ROXANNA","MAYME","KISHA","ELLIE","MELLISSA","DORRIS","DALIA","BELLA","ANNETTA","ZOILA","RETA","REINA","LAURETTA","KYLIE","CHRISTAL","PILAR","CHARLA","ELISSA","TIFFANI","TANA","PAULINA","LEOTA","BREANNA","JAYME","CARMEL","VERNELL","TOMASA","MANDI","DOMINGA","SANTA","MELODIE","LURA","ALEXA","TAMELA","RYAN","MIRNA","KERRIE","VENUS","NOEL","FELICITA","CRISTY","CARMELITA","BERNIECE","ANNEMARIE","TIARA","ROSEANNE","MISSY","CORI","ROXANA","PRICILLA","KRISTAL","JUNG","ELYSE","HAYDEE","ALETHA","BETTINA","MARGE","GILLIAN","FILOMENA","CHARLES","ZENAIDA","HARRIETTE","CARIDAD","VADA","UNA","ARETHA","PEARLINE","MARJORY","MARCELA","FLOR","EVETTE","ELOUISE","ALINA","TRINIDAD","DAVID","DAMARIS","CATHARINE","CARROLL","BELVA","NAKIA","MARLENA","LUANNE","LORINE","KARON","DORENE","DANITA","BRENNA","TATIANA","SAMMIE","LOUANN","LOREN","JULIANNA","ANDRIA","PHILOMENA","LUCILA","LEONORA","DOVIE","ROMONA","MIMI","JACQUELIN","GAYE","TONJA","MISTI","JOE","GENE","CHASTITY","STACIA","ROXANN","MICAELA","NIKITA","MEI","VELDA","MARLYS","JOHNNA","AURA","LAVERN","IVONNE","HAYLEY","NICKI","MAJORIE","HERLINDA","GEORGE","ALPHA","YADIRA","PERLA","GREGORIA","DANIEL","ANTONETTE","SHELLI","MOZELLE","MARIAH","JOELLE","CORDELIA","JOSETTE","CHIQUITA","TRISTA","LOUIS","LAQUITA","GEORGIANA","CANDI","SHANON","LONNIE","HILDEGARD","CECIL","VALENTINA","STEPHANY","MAGDA","KAROL","GERRY","GABRIELLA","TIANA","ROMA","RICHELLE","RAY","PRINCESS","OLETA","JACQUE","IDELLA","ALAINA","SUZANNA","JOVITA","BLAIR","TOSHA","RAVEN","NEREIDA","MARLYN","KYLA","JOSEPH","DELFINA","TENA","STEPHENIE","SABINA","NATHALIE","MARCELLE","GERTIE","DARLEEN","THEA","SHARONDA","SHANTEL","BELEN","VENESSA","ROSALINA","ONA","GENOVEVA","COREY","CLEMENTINE","ROSALBA","RENATE","RENATA","MI","IVORY","GEORGIANNA","FLOY","DORCAS","ARIANA","TYRA","THEDA","MARIAM","JULI","JESICA","DONNIE","VIKKI","VERLA","ROSELYN","MELVINA","JANNETTE","GINNY","DEBRAH","CORRIE","ASIA","VIOLETA","MYRTIS","LATRICIA","COLLETTE","CHARLEEN","ANISSA","VIVIANA","TWYLA","PRECIOUS","NEDRA","LATONIA","LAN","HELLEN","FABIOLA","ANNAMARIE","ADELL","SHARYN","CHANTAL","NIKI","MAUD","LIZETTE","LINDY","KIA","KESHA","JEANA","DANELLE","CHARLINE","CHANEL","CARROL","VALORIE","LIA","DORTHA","CRISTAL","SUNNY","LEONE","LEILANI","GERRI","DEBI","ANDRA","KESHIA","IMA","EULALIA","EASTER","DULCE","NATIVIDAD","LINNIE","KAMI","GEORGIE","CATINA","BROOK","ALDA","WINNIFRED","SHARLA","RUTHANN","MEAGHAN","MAGDALENE","LISSETTE","ADELAIDA","VENITA","TRENA","SHIRLENE","SHAMEKA","ELIZEBETH","DIAN","SHANTA","MICKEY","LATOSHA","CARLOTTA","WINDY","SOON","ROSINA","MARIANN","LEISA","JONNIE","DAWNA","CATHIE","BILLY","ASTRID","SIDNEY","LAUREEN","JANEEN","HOLLI","FAWN","VICKEY","TERESSA","SHANTE","RUBYE","MARCELINA","CHANDA","CARY","TERESE","SCARLETT","MARTY","MARNIE","LULU","LISETTE","JENIFFER","ELENOR","DORINDA","DONITA","CARMAN","BERNITA","ALTAGRACIA","ALETA","ADRIANNA","ZORAIDA","RONNIE","NICOLA","LYNDSEY","KENDALL","JANINA","CHRISSY","AMI","STARLA","PHYLIS","PHUONG","KYRA","CHARISSE","BLANCH","SANJUANITA","RONA","NANCI","MARILEE","MARANDA","CORY","BRIGETTE","SANJUANA","MARITA","KASSANDRA","JOYCELYN","IRA","FELIPA","CHELSIE","BONNY","MIREYA","LORENZA","KYONG","ILEANA","CANDELARIA","TONY","TOBY","SHERIE","OK","MARK","LUCIE","LEATRICE","LAKESHIA","GERDA","EDIE","BAMBI","MARYLIN","LAVON","HORTENSE","GARNET","EVIE","TRESSA","SHAYNA","LAVINA","KYUNG","JEANETTA","SHERRILL","SHARA","PHYLISS","MITTIE","ANABEL","ALESIA","THUY","TAWANDA","RICHARD","JOANIE","TIFFANIE","LASHANDA","KARISSA","ENRIQUETA","DARIA","DANIELLA","CORINNA","ALANNA","ABBEY","ROXANE","ROSEANNA","MAGNOLIA","LIDA","KYLE","JOELLEN","ERA","CORAL","CARLEEN","TRESA","PEGGIE","NOVELLA","NILA","MAYBELLE","JENELLE","CARINA","NOVA","MELINA","MARQUERITE","MARGARETTE","JOSEPHINA","EVONNE","DEVIN","CINTHIA","ALBINA","TOYA","TAWNYA","SHERITA","SANTOS","MYRIAM","LIZABETH","LISE","KEELY","JENNI","GISELLE","CHERYLE","ARDITH","ARDIS","ALESHA","ADRIANE","SHAINA","LINNEA","KAROLYN","HONG","FLORIDA","FELISHA","DORI","DARCI","ARTIE","ARMIDA","ZOLA","XIOMARA","VERGIE","SHAMIKA","NENA","NANNETTE","MAXIE","LOVIE","JEANE","JAIMIE","INGE","FARRAH","ELAINA","CAITLYN","STARR","FELICITAS","CHERLY","CARYL","YOLONDA","YASMIN","TEENA","PRUDENCE","PENNIE","NYDIA","MACKENZIE","ORPHA","MARVEL","LIZBETH","LAURETTE","JERRIE","HERMELINDA","CAROLEE","TIERRA","MIRIAN","META","MELONY","KORI","JENNETTE","JAMILA","ENA","ANH","YOSHIKO","SUSANNAH","SALINA","RHIANNON","JOLEEN","CRISTINE","ASHTON","ARACELY","TOMEKA","SHALONDA","MARTI","LACIE","KALA","JADA","ILSE","HAILEY","BRITTANI","ZONA","SYBLE","SHERRYL","RANDY","NIDIA","MARLO","KANDICE","KANDI","DEB","DEAN","AMERICA","ALYCIA","TOMMY","RONNA","NORENE","MERCY","JOSE","INGEBORG","GIOVANNA","GEMMA","CHRISTEL","AUDRY","ZORA","VITA","VAN","TRISH","STEPHAINE","SHIRLEE","SHANIKA","MELONIE","MAZIE","JAZMIN","INGA","HOA","HETTIE","GERALYN","FONDA","ESTRELLA","ADELLA","SU","SARITA","RINA","MILISSA","MARIBETH","GOLDA","EVON","ETHELYN","ENEDINA","CHERISE","CHANA","VELVA","TAWANNA","SADE","MIRTA","LI","KARIE","JACINTA","ELNA","DAVINA","CIERRA","ASHLIE","ALBERTHA","TANESHA","STEPHANI","NELLE","MINDI","LU","LORINDA","LARUE","FLORENE","DEMETRA","DEDRA","CIARA","CHANTELLE","ASHLY","SUZY","ROSALVA","NOELIA","LYDA","LEATHA","KRYSTYNA","KRISTAN","KARRI","DARLINE","DARCIE","CINDA","CHEYENNE","CHERRIE","AWILDA","ALMEDA","ROLANDA","LANETTE","JERILYN","GISELE","EVALYN","CYNDI","CLETA","CARIN","ZINA","ZENA","VELIA","TANIKA","PAUL","CHARISSA","THOMAS","TALIA","MARGARETE","LAVONDA","KAYLEE","KATHLENE","JONNA","IRENA","ILONA","IDALIA","CANDIS","CANDANCE","BRANDEE","ANITRA","ALIDA","SIGRID","NICOLETTE","MARYJO","LINETTE","HEDWIG","CHRISTIANA","CASSIDY","ALEXIA","TRESSIE","MODESTA","LUPITA","LITA","GLADIS","EVELIA","DAVIDA","CHERRI","CECILY","ASHELY","ANNABEL","AGUSTINA","WANITA","SHIRLY","ROSAURA","HULDA","EUN","BAILEY","YETTA","VERONA","THOMASINA","SIBYL","SHANNAN","MECHELLE","LUE","LEANDRA","LANI","KYLEE","KANDY","JOLYNN","FERNE","EBONI","CORENE","ALYSIA","ZULA","NADA","MOIRA","LYNDSAY","LORRETTA","JUAN","JAMMIE","HORTENSIA","GAYNELL","CAMERON","ADRIA","VINA","VICENTA","TANGELA","STEPHINE","NORINE","NELLA","LIANA","LESLEE","KIMBERELY","ILIANA","GLORY","FELICA","EMOGENE","ELFRIEDE","EDEN","EARTHA","CARMA","BEA","OCIE","MARRY","LENNIE","KIARA","JACALYN","CARLOTA","ARIELLE","YU","STAR","OTILIA","KIRSTIN","KACEY","JOHNETTA","JOEY","JOETTA","JERALDINE","JAUNITA","ELANA","DORTHEA","CAMI","AMADA","ADELIA","VERNITA","TAMAR","SIOBHAN","RENEA","RASHIDA","OUIDA","ODELL","NILSA","MERYL","KRISTYN","JULIETA","DANICA","BREANNE","AUREA","ANGLEA","SHERRON","ODETTE","MALIA","LORELEI","LIN","LEESA","KENNA","KATHLYN","FIONA","CHARLETTE","SUZIE","SHANTELL","SABRA","RACQUEL","MYONG","MIRA","MARTINE","LUCIENNE","LAVADA","JULIANN","JOHNIE","ELVERA","DELPHIA","CLAIR","CHRISTIANE","CHAROLETTE","CARRI","AUGUSTINE","ASHA","ANGELLA","PAOLA","NINFA","LEDA","LAI","EDA","SUNSHINE","STEFANI","SHANELL","PALMA","MACHELLE","LISSA","KECIA","KATHRYNE","KARLENE","JULISSA","JETTIE","JENNIFFER","HUI","CORRINA","CHRISTOPHER","CAROLANN","ALENA","TESS","ROSARIA","MYRTICE","MARYLEE","LIANE","KENYATTA","JUDIE","JANEY","IN","ELMIRA","ELDORA","DENNA","CRISTI","CATHI","ZAIDA","VONNIE","VIVA","VERNIE","ROSALINE","MARIELA","LUCIANA","LESLI","KARAN","FELICE","DENEEN","ADINA","WYNONA","TARSHA","SHERON","SHASTA","SHANITA","SHANI","SHANDRA","RANDA","PINKIE","PARIS","NELIDA","MARILOU","LYLA","LAURENE","LACI","JOI","JANENE","DOROTHA","DANIELE","DANI","CAROLYNN","CARLYN","BERENICE","AYESHA","ANNELIESE","ALETHEA","THERSA","TAMIKO","RUFINA","OLIVA","MOZELL","MARYLYN","MADISON","KRISTIAN","KATHYRN","KASANDRA","KANDACE","JANAE","GABRIEL","DOMENICA","DEBBRA","DANNIELLE","CHUN","BUFFY","BARBIE","ARCELIA","AJA","ZENOBIA","SHAREN","SHAREE","PATRICK","PAGE","MY","LAVINIA","KUM","KACIE","JACKELINE","HUONG","FELISA","EMELIA","ELEANORA","CYTHIA","CRISTIN","CLYDE","CLARIBEL","CARON","ANASTACIA","ZULMA","ZANDRA","YOKO","TENISHA","SUSANN","SHERILYN","SHAY","SHAWANDA","SABINE","ROMANA","MATHILDA","LINSEY","KEIKO","JOANA","ISELA","GRETTA","GEORGETTA","EUGENIE","DUSTY","DESIRAE","DELORA","CORAZON","ANTONINA","ANIKA","WILLENE","TRACEE","TAMATHA","REGAN","NICHELLE","MICKIE","MAEGAN","LUANA","LANITA","KELSIE","EDELMIRA","BREE","AFTON","TEODORA","TAMIE","SHENA","MEG","LINH","KELI","KACI","DANYELLE","BRITT","ARLETTE","ALBERTINE","ADELLE","TIFFINY","STORMY","SIMONA","NUMBERS","NICOLASA","NICHOL","NIA","NAKISHA","MEE","MAIRA","LOREEN","KIZZY","JOHNNY","JAY","FALLON","CHRISTENE","BOBBYE","ANTHONY","YING","VINCENZA","TANJA","RUBIE","RONI","QUEENIE","MARGARETT","KIMBERLI","IRMGARD","IDELL","HILMA","EVELINA","ESTA","EMILEE","DENNISE","DANIA","CARL","CARIE","ANTONIO","WAI","SANG","RISA","RIKKI","PARTICIA","MUI","MASAKO","MARIO","LUVENIA","LOREE","LONI","LIEN","KEVIN","GIGI","FLORENCIA","DORIAN","DENITA","DALLAS","CHI","BILLYE","ALEXANDER","TOMIKA","SHARITA","RANA","NIKOLE","NEOMA","MARGARITE","MADALYN","LUCINA","LAILA","KALI","JENETTE","GABRIELE","EVELYNE","ELENORA","CLEMENTINA","ALEJANDRINA","ZULEMA","VIOLETTE","VANNESSA","THRESA","RETTA","PIA","PATIENCE","NOELLA","NICKIE","JONELL","DELTA","CHUNG","CHAYA","CAMELIA","BETHEL","ANYA","ANDREW","THANH","SUZANN","SPRING","SHU","MILA","LILLA","LAVERNA","KEESHA","KATTIE","GIA","GEORGENE","EVELINE","ESTELL","ELIZBETH","VIVIENNE","VALLIE","TRUDIE","STEPHANE","MICHEL","MAGALY","MADIE","KENYETTA","KARREN","JANETTA","HERMINE","HARMONY","DRUCILLA","DEBBI","CELESTINA","CANDIE","BRITNI","BECKIE","AMINA","ZITA","YUN","YOLANDE","VIVIEN","VERNETTA","TRUDI","SOMMER","PEARLE","PATRINA","OSSIE","NICOLLE","LOYCE","LETTY","LARISA","KATHARINA","JOSELYN","JONELLE","JENELL","IESHA","HEIDE","FLORINDA","FLORENTINA","FLO","ELODIA","DORINE","BRUNILDA","BRIGID","ASHLI","ARDELLA","TWANA","THU","TARAH","SUNG","SHEA","SHAVON","SHANE","SERINA","RAYNA","RAMONITA","NGA","MARGURITE","LUCRECIA","KOURTNEY","KATI","JESUS","JESENIA","DIAMOND","CRISTA","AYANA","ALICA","ALIA","VINNIE","SUELLEN","ROMELIA","RACHELL","PIPER","OLYMPIA","MICHIKO","KATHALEEN","JOLIE","JESSI","JANESSA","HANA","HA","ELEASE","CARLETTA","BRITANY","SHONA","SALOME","ROSAMOND","REGENA","RAINA","NGOC","NELIA","LOUVENIA","LESIA","LATRINA","LATICIA","LARHONDA","JINA","JACKI","HOLLIS","HOLLEY","EMMY","DEEANN","CORETTA","ARNETTA","VELVET","THALIA","SHANICE","NETA","MIKKI","MICKI","LONNA","LEANA","LASHUNDA","KILEY","JOYE","JACQULYN","IGNACIA","HYUN","HIROKO","HENRY","HENRIETTE","ELAYNE","DELINDA","DARNELL","DAHLIA","COREEN","CONSUELA","CONCHITA","CELINE","BABETTE","AYANNA","ANETTE","ALBERTINA","SKYE","SHAWNEE","SHANEKA","QUIANA","PAMELIA","MIN","MERRI","MERLENE","MARGIT","KIESHA","KIERA","KAYLENE","JODEE","JENISE","ERLENE","EMMIE","ELSE","DARYL","DALILA","DAISEY","CODY","CASIE","BELIA","BABARA","VERSIE","VANESA","SHELBA","SHAWNDA","SAM","NORMAN","NIKIA","NAOMA","MARNA","MARGERET","MADALINE","LAWANA","KINDRA","JUTTA","JAZMINE","JANETT","HANNELORE","GLENDORA","GERTRUD","GARNETT","FREEDA","FREDERICA","FLORANCE","FLAVIA","DENNIS","CARLINE","BEVERLEE","ANJANETTE","VALDA","TRINITY","TAMALA","STEVIE","SHONNA","SHA","SARINA","ONEIDA","MICAH","MERILYN","MARLEEN","LURLINE","LENNA","KATHERIN","JIN","JENI","HAE","GRACIA","GLADY","FARAH","ERIC","ENOLA","EMA","DOMINQUE","DEVONA","DELANA","CECILA","CAPRICE","ALYSHA","ALI","ALETHIA","VENA","THERESIA","TAWNY","SONG","SHAKIRA","SAMARA","SACHIKO","RACHELE","PAMELLA","NICKY","MARNI","MARIEL","MAREN","MALISA","LIGIA","LERA","LATORIA","LARAE","KIMBER","KATHERN","KAREY","JENNEFER","JANETH","HALINA","FREDIA","DELISA","DEBROAH","CIERA","CHIN","ANGELIKA","ANDREE","ALTHA","YEN","VIVAN","TERRESA","TANNA","SUK","SUDIE","SOO","SIGNE","SALENA","RONNI","REBBECCA","MYRTIE","MCKENZIE","MALIKA","MAIDA","LOAN","LEONARDA","KAYLEIGH","FRANCE","ETHYL","ELLYN","DAYLE","CAMMIE","BRITTNI","BIRGIT","AVELINA","ASUNCION","ARIANNA","AKIKO","VENICE","TYESHA","TONIE","TIESHA","TAKISHA","STEFFANIE","SINDY","SANTANA","MEGHANN","MANDA","MACIE","LADY","KELLYE","KELLEE","JOSLYN","JASON","INGER","INDIRA","GLINDA","GLENNIS","FERNANDA","FAUSTINA","ENEIDA","ELICIA","DOT","DIGNA","DELL","ARLETTA","ANDRE","WILLIA","TAMMARA","TABETHA","SHERRELL","SARI","REFUGIO","REBBECA","PAULETTA","NIEVES","NATOSHA","NAKITA","MAMMIE","KENISHA","KAZUKO","KASSIE","GARY","EARLEAN","DAPHINE","CORLISS","CLOTILDE","CAROLYNE","BERNETTA","AUGUSTINA","AUDREA","ANNIS","ANNABELL","YAN","TENNILLE","TAMICA","SELENE","SEAN","ROSANA","REGENIA","QIANA","MARKITA","MACY","LEEANNE","LAURINE","KYM","JESSENIA","JANITA","GEORGINE","GENIE","EMIKO","ELVIE","DEANDRA","DAGMAR","CORIE","COLLEN","CHERISH","ROMAINE","PORSHA","PEARLENE","MICHELINE","MERNA","MARGORIE","MARGARETTA","LORE","KENNETH","JENINE","HERMINA","FREDERICKA","ELKE","DRUSILLA","DORATHY","DIONE","DESIRE","CELENA","BRIGIDA","ANGELES","ALLEGRA","THEO","TAMEKIA","SYNTHIA","STEPHEN","SOOK","SLYVIA","ROSANN","REATHA","RAYE","MARQUETTA","MARGART","LING","LAYLA","KYMBERLY","KIANA","KAYLEEN","KATLYN","KARMEN","JOELLA","IRINA","EMELDA","ELENI","DETRA","CLEMMIE","CHERYLL","CHANTELL","CATHEY","ARNITA","ARLA","ANGLE","ANGELIC","ALYSE","ZOFIA","THOMASINE","TENNIE","SON","SHERLY","SHERLEY","SHARYL","REMEDIOS","PETRINA","NICKOLE","MYUNG","MYRLE","MOZELLA","LOUANNE","LISHA","LATIA","LANE","KRYSTA","JULIENNE","JOEL","JEANENE","JACQUALINE","ISAURA","GWENDA","EARLEEN","DONALD","CLEOPATRA","CARLIE","AUDIE","ANTONIETTA","ALISE","ALEX","VERDELL","VAL","TYLER","TOMOKO","THAO","TALISHA","STEVEN","SO","SHEMIKA","SHAUN","SCARLET","SAVANNA","SANTINA","ROSIA","RAEANN","ODILIA","NANA","MINNA","MAGAN","LYNELLE","LE","KARMA","JOEANN","IVANA","INELL","ILANA","HYE","HONEY","HEE","GUDRUN","FRANK","DREAMA","CRISSY","CHANTE","CARMELINA","ARVILLA","ARTHUR","ANNAMAE","ALVERA","ALEIDA","AARON","YEE","YANIRA","VANDA","TIANNA","TAM","STEFANIA","SHIRA","PERRY","NICOL","NANCIE","MONSERRATE","MINH","MELYNDA","MELANY","MATTHEW","LOVELLA","LAURE","KIRBY","KACY","JACQUELYNN","HYON","GERTHA","FRANCISCO","ELIANA","CHRISTENA","CHRISTEEN","CHARISE","CATERINA","CARLEY","CANDYCE","ARLENA","AMMIE","YANG","WILLETTE","VANITA","TUYET","TINY","SYREETA","SILVA","SCOTT","RONALD","PENNEY","NYLA","MICHAL","MAURICE","MARYAM","MARYA","MAGEN","LUDIE","LOMA","LIVIA","LANELL","KIMBERLIE","JULEE","DONETTA","DIEDRA","DENISHA","DEANE","DAWNE","CLARINE","CHERRYL","BRONWYN","BRANDON","ALLA","VALERY","TONDA","SUEANN","SORAYA","SHOSHANA","SHELA","SHARLEEN","SHANELLE","NERISSA","MICHEAL","MERIDITH","MELLIE","MAYE","MAPLE","MAGARET","LUIS","LILI","LEONILA","LEONIE","LEEANNA","LAVONIA","LAVERA","KRISTEL","KATHEY","KATHE","JUSTIN","JULIAN","JIMMY","JANN","ILDA","HILDRED","HILDEGARDE","GENIA","FUMIKO","EVELIN","ERMELINDA","ELLY","DUNG","DOLORIS","DIONNA","DANAE","BERNEICE","ANNICE","ALIX","VERENA","VERDIE","TRISTAN","SHAWNNA","SHAWANA","SHAUNNA","ROZELLA","RANDEE","RANAE","MILAGRO","LYNELL","LUISE","LOUIE","LOIDA","LISBETH","KARLEEN","JUNITA","JONA","ISIS","HYACINTH","HEDY","GWENN","ETHELENE","ERLINE","EDWARD","DONYA","DOMONIQUE","DELICIA","DANNETTE","CICELY","BRANDA","BLYTHE","BETHANN","ASHLYN","ANNALEE","ALLINE","YUKO","VELLA","TRANG","TOWANDA","TESHA","SHERLYN","NARCISA","MIGUELINA","MERI","MAYBELL","MARLANA","MARGUERITA","MADLYN","LUNA","LORY","LORIANN","LIBERTY","LEONORE","LEIGHANN","LAURICE","LATESHA","LARONDA","KATRICE","KASIE","KARL","KALEY","JADWIGA","GLENNIE","GEARLDINE","FRANCINA","EPIFANIA","DYAN","DORIE","DIEDRE","DENESE","DEMETRICE","DELENA","DARBY","CRISTIE","CLEORA","CATARINA","CARISA","BERNIE","BARBERA","ALMETA","TRULA","TEREASA","SOLANGE","SHEILAH","SHAVONNE","SANORA","ROCHELL","MATHILDE","MARGARETA","MAIA","LYNSEY","LAWANNA","LAUNA","KENA","KEENA","KATIA","JAMEY","GLYNDA","GAYLENE","ELVINA","ELANOR","DANUTA","DANIKA","CRISTEN","CORDIE","COLETTA","CLARITA","CARMON","BRYNN","AZUCENA","AUNDREA","ANGELE","YI","WALTER","VERLIE","VERLENE","TAMESHA","SILVANA","SEBRINA","SAMIRA","REDA","RAYLENE","PENNI","PANDORA","NORAH","NOMA","MIREILLE","MELISSIA","MARYALICE","LARAINE","KIMBERY","KARYL","KARINE","KAM","JOLANDA","JOHANA","JESUSA","JALEESA","JAE","JACQUELYNE","IRISH","ILUMINADA","HILARIA","HANH","GENNIE","FRANCIE","FLORETTA","EXIE","EDDA","DREMA","DELPHA","BEV","BARBAR","ASSUNTA","ARDELL","ANNALISA","ALISIA","YUKIKO","YOLANDO","WONDA","WEI","WALTRAUD","VETA","TEQUILA","TEMEKA","TAMEIKA","SHIRLEEN","SHENITA","PIEDAD","OZELLA","MIRTHA","MARILU","KIMIKO","JULIANE","JENICE","JEN","JANAY","JACQUILINE","HILDE","FE","FAE","EVAN","EUGENE","ELOIS","ECHO","DEVORAH","CHAU","BRINDA","BETSEY","ARMINDA","ARACELIS","APRYL","ANNETT","ALISHIA","VEOLA","USHA","TOSHIKO","THEOLA","TASHIA","TALITHA","SHERY","RUDY","RENETTA","REIKO","RASHEEDA","OMEGA","OBDULIA","MIKA","MELAINE","MEGGAN","MARTIN","MARLEN","MARGET","MARCELINE","MANA","MAGDALEN","LIBRADA","LEZLIE","LEXIE","LATASHIA","LASANDRA","KELLE","ISIDRA","ISA","INOCENCIA","GWYN","FRANCOISE","ERMINIA","ERINN","DIMPLE","DEVORA","CRISELDA","ARMANDA","ARIE","ARIANE","ANGELO","ANGELENA","ALLEN","ALIZA","ADRIENE","ADALINE","XOCHITL","TWANNA","TRAN","TOMIKO","TAMISHA","TAISHA","SUSY","SIU","RUTHA","ROXY","RHONA","RAYMOND","OTHA","NORIKO","NATASHIA","MERRIE","MELVIN","MARINDA","MARIKO","MARGERT","LORIS","LIZZETTE","LEISHA","KAILA","KA","JOANNIE","JERRICA","JENE","JANNET","JANEE","JACINDA","HERTA","ELENORE","DORETTA","DELAINE","DANIELL","CLAUDIE","CHINA","BRITTA","APOLONIA","AMBERLY","ALEASE","YURI","YUK","WEN","WANETA","UTE","TOMI","SHARRI","SANDIE","ROSELLE","REYNALDA","RAGUEL","PHYLICIA","PATRIA","OLIMPIA","ODELIA","MITZIE","MITCHELL","MISS","MINDA","MIGNON","MICA","MENDY","MARIVEL","MAILE","LYNETTA","LAVETTE","LAURYN","LATRISHA","LAKIESHA","KIERSTEN","KARY","JOSPHINE","JOLYN","JETTA","JANISE","JACQUIE","IVELISSE","GLYNIS","GIANNA","GAYNELLE","EMERALD","DEMETRIUS","DANYELL","DANILLE","DACIA","CORALEE","CHER","CEOLA","BRETT","BELL","ARIANNE","ALESHIA","YUNG","WILLIEMAE","TROY","TRINH","THORA","TAI","SVETLANA","SHERIKA","SHEMEKA","SHAUNDA","ROSELINE","RICKI","MELDA","MALLIE","LAVONNA","LATINA","LARRY","LAQUANDA","LALA","LACHELLE","KLARA","KANDIS","JOHNA","JEANMARIE","JAYE","HANG","GRAYCE","GERTUDE","EMERITA","EBONIE","CLORINDA","CHING","CHERY","CAROLA","BREANN","BLOSSOM","BERNARDINE","BECKI","ARLETHA","ARGELIA","ARA","ALITA","YULANDA","YON","YESSENIA","TOBI","TASIA","SYLVIE","SHIRL","SHIRELY","SHERIDAN","SHELLA","SHANTELLE","SACHA","ROYCE","REBECKA","REAGAN","PROVIDENCIA","PAULENE","MISHA","MIKI","MARLINE","MARICA","LORITA","LATOYIA","LASONYA","KERSTIN","KENDA","KEITHA","KATHRIN","JAYMIE","JACK","GRICELDA","GINETTE","ERYN","ELINA","ELFRIEDA","DANYEL","CHEREE","CHANELLE","BARRIE","AVERY","AURORE","ANNAMARIA","ALLEEN","AILENE","AIDE","YASMINE","VASHTI","VALENTINE","TREASA","TORY","TIFFANEY","SHERYLL","SHARIE","SHANAE","SAU","RAISA","PA","NEDA","MITSUKO","MIRELLA","MILDA","MARYANNA","MARAGRET","MABELLE","LUETTA","LORINA","LETISHA","LATARSHA","LANELLE","LAJUANA","KRISSY","KARLY","KARENA","JON","JESSIKA","JERICA","JEANELLE","JANUARY","JALISA","JACELYN","IZOLA","IVEY","GREGORY","EUNA","ETHA","DREW","DOMITILA","DOMINICA","DAINA","CREOLA","CARLI","CAMIE","BUNNY","BRITTNY","ASHANTI","ANISHA","ALEEN","ADAH","YASUKO","WINTER","VIKI","VALRIE","TONA","TINISHA","THI","TERISA","TATUM","TANEKA","SIMONNE","SHALANDA","SERITA","RESSIE","REFUGIA","PAZ","OLENE","NA","MERRILL","MARGHERITA","MANDIE","MAN","MAIRE","LYNDIA","LUCI","LORRIANE","LORETA","LEONIA","LAVONA","LASHAWNDA","LAKIA","KYOKO","KRYSTINA","KRYSTEN","KENIA","KELSI","JUDE","JEANICE","ISOBEL","GEORGIANN","GENNY","FELICIDAD","EILENE","DEON","DELOISE","DEEDEE","DANNIE","CONCEPTION","CLORA","CHERILYN","CHANG","CALANDRA","BERRY","ARMANDINA","ANISA","ULA","TIMOTHY","TIERA","THERESSA","STEPHANIA","SIMA","SHYLA","SHONTA","SHERA","SHAQUITA","SHALA","SAMMY","ROSSANA","NOHEMI","NERY","MORIAH","MELITA","MELIDA","MELANI","MARYLYNN","MARISHA","MARIETTE","MALORIE","MADELENE","LUDIVINA","LORIA","LORETTE","LORALEE","LIANNE","LEON","LAVENIA","LAURINDA","LASHON","KIT","KIMI","KEILA","KATELYNN","KAI","JONE","JOANE","JI","JAYNA","JANELLA","JA","HUE","HERTHA","FRANCENE","ELINORE","DESPINA","DELSIE","DEEDRA","CLEMENCIA","CARRY","CAROLIN","CARLOS","BULAH","BRITTANIE","BOK","BLONDELL","BIBI","BEAULAH","BEATA","ANNITA","AGRIPINA","VIRGEN","VALENE","UN","TWANDA","TOMMYE","TOI","TARRA","TARI","TAMMERA","SHAKIA","SADYE","RUTHANNE","ROCHEL","RIVKA","PURA","NENITA","NATISHA","MING","MERRILEE","MELODEE","MARVIS","LUCILLA","LEENA","LAVETA","LARITA","LANIE","KEREN","ILEEN","GEORGEANN","GENNA","GENESIS","FRIDA","EWA","EUFEMIA","EMELY","ELA","EDYTH","DEONNA","DEADRA","DARLENA","CHANELL","CHAN","CATHERN","CASSONDRA","CASSAUNDRA","BERNARDA","BERNA","ARLINDA","ANAMARIA","ALBERT","WESLEY","VERTIE","VALERI","TORRI","TATYANA","STASIA","SHERISE","SHERILL","SEASON","SCOTTIE","SANDA","RUTHE","ROSY","ROBERTO","ROBBI","RANEE","QUYEN","PEARLY","PALMIRA","ONITA","NISHA","NIESHA","NIDA","NEVADA","NAM","MERLYN","MAYOLA","MARYLOUISE","MARYLAND","MARX","MARTH","MARGENE","MADELAINE","LONDA","LEONTINE","LEOMA","LEIA","LAWRENCE","LAURALEE","LANORA","LAKITA","KIYOKO","KETURAH","KATELIN","KAREEN","JONIE","JOHNETTE","JENEE","JEANETT","IZETTA","HIEDI","HEIKE","HASSIE","HAROLD","GIUSEPPINA","GEORGANN","FIDELA","FERNANDE","ELWANDA","ELLAMAE","ELIZ","DUSTI","DOTTY","CYNDY","CORALIE","CELESTA","ARGENTINA","ALVERTA","XENIA","WAVA","VANETTA","TORRIE","TASHINA","TANDY","TAMBRA","TAMA","STEPANIE","SHILA","SHAUNTA","SHARAN","SHANIQUA","SHAE","SETSUKO","SERAFINA","SANDEE","ROSAMARIA","PRISCILA","OLINDA","NADENE","MUOI","MICHELINA","MERCEDEZ","MARYROSE","MARIN","MARCENE","MAO","MAGALI","MAFALDA","LOGAN","LINN","LANNIE","KAYCE","KAROLINE","KAMILAH","KAMALA","JUSTA","JOLINE","JENNINE","JACQUETTA","IRAIDA","GERALD","GEORGEANNA","FRANCHESCA","FAIRY","EMELINE","ELANE","EHTEL","EARLIE","DULCIE","DALENE","CRIS","CLASSIE","CHERE","CHARIS","CAROYLN","CARMINA","CARITA","BRIAN","BETHANIE","AYAKO","ARICA","AN","ALYSA","ALESSANDRA","AKILAH","ADRIEN","ZETTA","YOULANDA","YELENA","YAHAIRA","XUAN","WENDOLYN","VICTOR","TIJUANA","TERRELL","TERINA","TERESIA","SUZI","SUNDAY","SHERELL","SHAVONDA","SHAUNTE","SHARDA","SHAKITA","SENA","RYANN","RUBI","RIVA","REGINIA","REA","RACHAL","PARTHENIA","PAMULA","MONNIE","MONET","MICHAELE","MELIA","MARINE","MALKA","MAISHA","LISANDRA","LEO","LEKISHA","LEAN","LAURENCE","LAKENDRA","KRYSTIN","KORTNEY","KIZZIE","KITTIE","KERA","KENDAL","KEMBERLY","KANISHA","JULENE","JULE","JOSHUA","JOHANNE","JEFFREY","JAMEE","HAN","HALLEY","GIDGET","GALINA","FREDRICKA","FLETA","FATIMAH","EUSEBIA","ELZA","ELEONORE","DORTHEY","DORIA","DONELLA","DINORAH","DELORSE","CLARETHA","CHRISTINIA","CHARLYN","BONG","BELKIS","AZZIE","ANDERA","AIKO","ADENA","YER","YAJAIRA","WAN","VANIA","ULRIKE","TOSHIA","TIFANY","STEFANY","SHIZUE","SHENIKA","SHAWANNA","SHAROLYN","SHARILYN","SHAQUANA","SHANTAY","SEE","ROZANNE","ROSELEE","RICKIE","REMONA","REANNA","RAELENE","QUINN","PHUNG","PETRONILA","NATACHA","NANCEY","MYRL","MIYOKO","MIESHA","MERIDETH","MARVELLA","MARQUITTA","MARHTA","MARCHELLE","LIZETH","LIBBIE","LAHOMA","LADAWN","KINA","KATHELEEN","KATHARYN","KARISA","KALEIGH","JUNIE","JULIEANN","JOHNSIE","JANEAN","JAIMEE","JACKQUELINE","HISAKO","HERMA","HELAINE","GWYNETH","GLENN","GITA","EUSTOLIA","EMELINA","ELIN","EDRIS","DONNETTE","DONNETTA","DIERDRE","DENAE","DARCEL","CLAUDE","CLARISA","CINDERELLA","CHIA","CHARLESETTA","CHARITA","CELSA","CASSY","CASSI","CARLEE","BRUNA","BRITTANEY","BRANDE","BILLI","BAO","ANTONETTA","ANGLA","ANGELYN","ANALISA","ALANE","WENONA","WENDIE","VERONIQUE","VANNESA","TOBIE","TEMPIE","SUMIKO","SULEMA","SPARKLE","SOMER","SHEBA","SHAYNE","SHARICE","SHANEL","SHALON","SAGE","ROY","ROSIO","ROSELIA","RENAY","REMA","REENA","PORSCHE","PING","PEG","OZIE","ORETHA","ORALEE","ODA","NU","NGAN","NAKESHA","MILLY","MARYBELLE","MARLIN","MARIS","MARGRETT","MARAGARET","MANIE","LURLENE","LILLIA","LIESELOTTE","LAVELLE","LASHAUNDA","LAKEESHA","KEITH","KAYCEE","KALYN","JOYA","JOETTE","JENAE","JANIECE","ILLA","GRISEL","GLAYDS","GENEVIE","GALA","FREDDA","FRED","ELMER","ELEONOR","DEBERA","DEANDREA","DAN","CORRINNE","CORDIA","CONTESSA","COLENE","CLEOTILDE","CHARLOTT","CHANTAY","CECILLE","BEATRIS","AZALEE","ARLEAN","ARDATH","ANJELICA","ANJA","ALFREDIA","ALEISHA","ADAM","ZADA","YUONNE","XIAO","WILLODEAN","WHITLEY","VENNIE","VANNA","TYISHA","TOVA","TORIE","TONISHA","TILDA","TIEN","TEMPLE","SIRENA","SHERRIL","SHANTI","SHAN","SENAIDA","SAMELLA","ROBBYN","RENDA","REITA","PHEBE","PAULITA","NOBUKO","NGUYET","NEOMI","MOON","MIKAELA","MELANIA","MAXIMINA","MARG","MAISIE","LYNNA","LILLI","LAYNE","LASHAUN","LAKENYA","LAEL","KIRSTIE","KATHLINE","KASHA","KARLYN","KARIMA","JOVAN","JOSEFINE","JENNELL","JACQUI","JACKELYN","HYO","HIEN","GRAZYNA","FLORRIE","FLORIA","ELEONORA","DWANA","DORLA","DONG","DELMY","DEJA","DEDE","DANN","CRYSTA","CLELIA","CLARIS","CLARENCE","CHIEKO","CHERLYN","CHERELLE","CHARMAIN","CHARA","CAMMY","BEE","ARNETTE","ARDELLE","ANNIKA","AMIEE","AMEE","ALLENA","YVONE","YUKI","YOSHIE","YEVETTE","YAEL","WILLETTA","VONCILE","VENETTA","TULA","TONETTE","TIMIKA","TEMIKA","TELMA","TEISHA","TAREN","TA","STACEE","SHIN","SHAWNTA","SATURNINA","RICARDA","POK","PASTY","ONIE","NUBIA","MORA","MIKE","MARIELLE","MARIELLA","MARIANELA","MARDELL","MANY","LUANNA","LOISE","LISABETH","LINDSY","LILLIANA","LILLIAM","LELAH","LEIGHA","LEANORA","LANG","KRISTEEN","KHALILAH","KEELEY","KANDRA","JUNKO","JOAQUINA","JERLENE","JANI","JAMIKA","JAME","HSIU","HERMILA","GOLDEN","GENEVIVE","EVIA","EUGENA","EMMALINE","ELFREDA","ELENE","DONETTE","DELCIE","DEEANNA","DARCEY","CUC","CLARINDA","CIRA","CHAE","CELINDA","CATHERYN","CATHERIN","CASIMIRA","CARMELIA","CAMELLIA","BREANA","BOBETTE","BERNARDINA","BEBE","BASILIA","ARLYNE","AMAL","ALAYNA","ZONIA","ZENIA","YURIKO","YAEKO","WYNELL","WILLOW","WILLENA","VERNIA","TU","TRAVIS","TORA","TERRILYN","TERICA","TENESHA","TAWNA","TAJUANA","TAINA","STEPHNIE","SONA","SOL","SINA","SHONDRA","SHIZUKO","SHERLENE","SHERICE","SHARIKA","ROSSIE","ROSENA","RORY","RIMA","RIA","RHEBA","RENNA","PETER","NATALYA","NANCEE","MELODI","MEDA","MAXIMA","MATHA","MARKETTA","MARICRUZ","MARCELENE","MALVINA","LUBA","LOUETTA","LEIDA","LECIA","LAURAN","LASHAWNA","LAINE","KHADIJAH","KATERINE","KASI","KALLIE","JULIETTA","JESUSITA","JESTINE","JESSIA","JEREMY","JEFFIE","JANYCE","ISADORA","GEORGIANNE","FIDELIA","EVITA","EURA","EULAH","ESTEFANA","ELSY","ELIZABET","ELADIA","DODIE","DION","DIA","DENISSE","DELORAS","DELILA","DAYSI","DAKOTA","CURTIS","CRYSTLE","CONCHA","COLBY","CLARETTA","CHU","CHRISTIA","CHARLSIE","CHARLENA","CARYLON","BETTYANN","ASLEY","ASHLEA","AMIRA","AI","AGUEDA","AGNUS","YUETTE","VINITA","VICTORINA","TYNISHA","TREENA","TOCCARA","TISH","THOMASENA","TEGAN","SOILA","SHILOH","SHENNA","SHARMAINE","SHANTAE","SHANDI","SEPTEMBER","SARAN","SARAI","SANA","SAMUEL","SALLEY","ROSETTE","ROLANDE","REGINE","OTELIA","OSCAR","OLEVIA","NICHOLLE","NECOLE","NAIDA","MYRTA","MYESHA","MITSUE","MINTA","MERTIE","MARGY","MAHALIA","MADALENE","LOVE","LOURA","LOREAN","LEWIS","LESHA","LEONIDA","LENITA","LAVONE","LASHELL","LASHANDRA","LAMONICA","KIMBRA","KATHERINA","KARRY","KANESHA","JULIO","JONG","JENEVA","JAQUELYN","HWA","GILMA","GHISLAINE","GERTRUDIS","FRANSISCA","FERMINA","ETTIE","ETSUKO","ELLIS","ELLAN","ELIDIA","EDRA","DORETHEA","DOREATHA","DENYSE","DENNY","DEETTA","DAINE","CYRSTAL","CORRIN","CAYLA","CARLITA","CAMILA","BURMA","BULA","BUENA","BLAKE","BARABARA","AVRIL","AUSTIN","ALAINE","ZANA","WILHEMINA","WANETTA","VIRGIL","VI","VERONIKA","VERNON","VERLINE","VASILIKI","TONITA","TISA","TEOFILA","TAYNA","TAUNYA","TANDRA","TAKAKO","SUNNI","SUANNE","SIXTA","SHARELL","SEEMA","RUSSELL","ROSENDA","ROBENA","RAYMONDE","PEI","PAMILA","OZELL","NEIDA","NEELY","MISTIE","MICHA","MERISSA","MAURITA","MARYLN","MARYETTA","MARSHALL","MARCELL","MALENA","MAKEDA","MADDIE","LOVETTA","LOURIE","LORRINE","LORILEE","LESTER","LAURENA","LASHAY","LARRAINE","LAREE","LACRESHA","KRISTLE","KRISHNA","KEVA","KEIRA","KAROLE","JOIE","JINNY","JEANNETTA","JAMA","HEIDY","GILBERTE","GEMA","FAVIOLA","EVELYNN","ENDA","ELLI","ELLENA","DIVINA","DAGNY","COLLENE","CODI","CINDIE","CHASSIDY","CHASIDY","CATRICE","CATHERINA","CASSEY","CAROLL","CARLENA","CANDRA","CALISTA","BRYANNA","BRITTENY","BEULA","BARI","AUDRIE","AUDRIA","ARDELIA","ANNELLE","ANGILA","ALONA","ALLYN","DOUGLAS","ROGER","JONATHAN","RALPH","NICHOLAS","BENJAMIN","BRUCE","HARRY","WAYNE","STEVE","HOWARD","ERNEST","PHILLIP","TODD","CRAIG","ALAN","PHILIP","EARL","DANNY","BRYAN","STANLEY","LEONARD","NATHAN","MANUEL","RODNEY","MARVIN","VINCENT","JEFFERY","JEFF","CHAD","JACOB","ALFRED","BRADLEY","HERBERT","FREDERICK","EDWIN","DON","RICKY","RANDALL","BARRY","BERNARD","LEROY","MARCUS","THEODORE","CLIFFORD","MIGUEL","JIM","TOM","CALVIN","BILL","LLOYD","DEREK","WARREN","DARRELL","JEROME","FLOYD","ALVIN","TIM","GORDON","GREG","JORGE","DUSTIN","PEDRO","DERRICK","ZACHARY","HERMAN","GLEN","HECTOR","RICARDO","RICK","BRENT","RAMON","GILBERT","MARC","REGINALD","RUBEN","NATHANIEL","RAFAEL","EDGAR","MILTON","RAUL","BEN","CHESTER","DUANE","FRANKLIN","BRAD","RON","ROLAND","ARNOLD","HARVEY","JARED","ERIK","DARRYL","NEIL","JAVIER","FERNANDO","CLINTON","TED","MATHEW","TYRONE","DARREN","LANCE","KURT","ALLAN","NELSON","GUY","CLAYTON","HUGH","MAX","DWAYNE","DWIGHT","ARMANDO","FELIX","EVERETT","IAN","WALLACE","KEN","BOB","ALFREDO","ALBERTO","DAVE","IVAN","BYRON","ISAAC","MORRIS","CLIFTON","WILLARD","ROSS","ANDY","SALVADOR","KIRK","SERGIO","SETH","KENT","TERRANCE","EDUARDO","TERRENCE","ENRIQUE","WADE","STUART","FREDRICK","ARTURO","ALEJANDRO","NICK","LUTHER","WENDELL","JEREMIAH","JULIUS","OTIS","TREVOR","OLIVER","LUKE","HOMER","GERARD","DOUG","KENNY","HUBERT","LYLE","MATT","ALFONSO","ORLANDO","REX","CARLTON","ERNESTO","NEAL","PABLO","LORENZO","OMAR","WILBUR","GRANT","HORACE","RODERICK","ABRAHAM","WILLIS","RICKEY","ANDRES","CESAR","JOHNATHAN","MALCOLM","RUDOLPH","DAMON","KELVIN","PRESTON","ALTON","ARCHIE","MARCO","WM","PETE","RANDOLPH","GARRY","GEOFFREY","JONATHON","FELIPE","GERARDO","ED","DOMINIC","DELBERT","COLIN","GUILLERMO","EARNEST","LUCAS","BENNY","SPENCER","RODOLFO","MYRON","EDMUND","GARRETT","SALVATORE","CEDRIC","LOWELL","GREGG","SHERMAN","WILSON","SYLVESTER","ROOSEVELT","ISRAEL","JERMAINE","FORREST","WILBERT","LELAND","SIMON","CLARK","IRVING","BRYANT","OWEN","RUFUS","WOODROW","KRISTOPHER","MACK","LEVI","MARCOS","GUSTAVO","JAKE","LIONEL","GILBERTO","CLINT","NICOLAS","ISMAEL","ORVILLE","ERVIN","DEWEY","AL","WILFRED","JOSH","HUGO","IGNACIO","CALEB","TOMAS","SHELDON","ERICK","STEWART","DOYLE","DARREL","ROGELIO","TERENCE","SANTIAGO","ALONZO","ELIAS","BERT","ELBERT","RAMIRO","CONRAD","NOAH","GRADY","PHIL","CORNELIUS","LAMAR","ROLANDO","CLAY","PERCY","DEXTER","BRADFORD","DARIN","AMOS","MOSES","IRVIN","SAUL","ROMAN","RANDAL","TIMMY","DARRIN","WINSTON","BRENDAN","ABEL","DOMINICK","BOYD","EMILIO","ELIJAH","DOMINGO","EMMETT","MARLON","EMANUEL","JERALD","EDMOND","EMIL","DEWAYNE","WILL","OTTO","TEDDY","REYNALDO","BRET","JESS","TRENT","HUMBERTO","EMMANUEL","STEPHAN","VICENTE","LAMONT","GARLAND","MILES","EFRAIN","HEATH","RODGER","HARLEY","ETHAN","ELDON","ROCKY","PIERRE","JUNIOR","FREDDY","ELI","BRYCE","ANTOINE","STERLING","CHASE","GROVER","ELTON","CLEVELAND","DYLAN","CHUCK","DAMIAN","REUBEN","STAN","AUGUST","LEONARDO","JASPER","RUSSEL","ERWIN","BENITO","HANS","MONTE","BLAINE","ERNIE","CURT","QUENTIN","AGUSTIN","MURRAY","JAMAL","ADOLFO","HARRISON","TYSON","BURTON","BRADY","ELLIOTT","WILFREDO","BART","JARROD","VANCE","DENIS","DAMIEN","JOAQUIN","HARLAN","DESMOND","ELLIOT","DARWIN","GREGORIO","BUDDY","XAVIER","KERMIT","ROSCOE","ESTEBAN","ANTON","SOLOMON","SCOTTY","NORBERT","ELVIN","WILLIAMS","NOLAN","ROD","QUINTON","HAL","BRAIN","ROB","ELWOOD","KENDRICK","DARIUS","MOISES","FIDEL","THADDEUS","CLIFF","MARCEL","JACKSON","RAPHAEL","BRYON","ARMAND","ALVARO","JEFFRY","DANE","JOESPH","THURMAN","NED","RUSTY","MONTY","FABIAN","REGGIE","MASON","GRAHAM","ISAIAH","VAUGHN","GUS","LOYD","DIEGO","ADOLPH","NORRIS","MILLARD","ROCCO","GONZALO","DERICK","RODRIGO","WILEY","RIGOBERTO","ALPHONSO","TY","NOE","VERN","REED","JEFFERSON","ELVIS","BERNARDO","MAURICIO","HIRAM","DONOVAN","BASIL","RILEY","NICKOLAS","MAYNARD","SCOT","VINCE","QUINCY","EDDY","SEBASTIAN","FEDERICO","ULYSSES","HERIBERTO","DONNELL","COLE","DAVIS","GAVIN","EMERY","WARD","ROMEO","JAYSON","DANTE","CLEMENT","COY","MAXWELL","JARVIS","BRUNO","ISSAC","DUDLEY","BROCK","SANFORD","CARMELO","BARNEY","NESTOR","STEFAN","DONNY","ART","LINWOOD","BEAU","WELDON","GALEN","ISIDRO","TRUMAN","DELMAR","JOHNATHON","SILAS","FREDERIC","DICK","IRWIN","MERLIN","CHARLEY","MARCELINO","HARRIS","CARLO","TRENTON","KURTIS","HUNTER","AURELIO","WINFRED","VITO","COLLIN","DENVER","CARTER","LEONEL","EMORY","PASQUALE","MOHAMMAD","MARIANO","DANIAL","LANDON","DIRK","BRANDEN","ADAN","BUFORD","GERMAN","WILMER","EMERSON","ZACHERY","FLETCHER","JACQUES","ERROL","DALTON","MONROE","JOSUE","EDWARDO","BOOKER","WILFORD","SONNY","SHELTON","CARSON","THERON","RAYMUNDO","DAREN","HOUSTON","ROBBY","LINCOLN","GENARO","BENNETT","OCTAVIO","CORNELL","HUNG","ARRON","ANTONY","HERSCHEL","GIOVANNI","GARTH","CYRUS","CYRIL","RONNY","LON","FREEMAN","DUNCAN","KENNITH","CARMINE","ERICH","CHADWICK","WILBURN","RUSS","REID","MYLES","ANDERSON","MORTON","JONAS","FOREST","MITCHEL","MERVIN","ZANE","RICH","JAMEL","LAZARO","ALPHONSE","RANDELL","MAJOR","JARRETT","BROOKS","ABDUL","LUCIANO","SEYMOUR","EUGENIO","MOHAMMED","VALENTIN","CHANCE","ARNULFO","LUCIEN","FERDINAND","THAD","EZRA","ALDO","RUBIN","ROYAL","MITCH","EARLE","ABE","WYATT","MARQUIS","LANNY","KAREEM","JAMAR","BORIS","ISIAH","EMILE","ELMO","ARON","LEOPOLDO","EVERETTE","JOSEF","ELOY","RODRICK","REINALDO","LUCIO","JERROD","WESTON","HERSHEL","BARTON","PARKER","LEMUEL","BURT","JULES","GIL","ELISEO","AHMAD","NIGEL","EFREN","ANTWAN","ALDEN","MARGARITO","COLEMAN","DINO","OSVALDO","LES","DEANDRE","NORMAND","KIETH","TREY","NORBERTO","NAPOLEON","JEROLD","FRITZ","ROSENDO","MILFORD","CHRISTOPER","ALFONZO","LYMAN","JOSIAH","BRANT","WILTON","RICO","JAMAAL","DEWITT","BRENTON","OLIN","FOSTER","FAUSTINO","CLAUDIO","JUDSON","GINO","EDGARDO","ALEC","TANNER","JARRED","DONN","TAD","PRINCE","PORFIRIO","ODIS","LENARD","CHAUNCEY","TOD","MEL","MARCELO","KORY","AUGUSTUS","KEVEN","HILARIO","BUD","SAL","ORVAL","MAURO","ZACHARIAH","OLEN","ANIBAL","MILO","JED","DILLON","AMADO","NEWTON","LENNY","RICHIE","HORACIO","BRICE","MOHAMED","DELMER","DARIO","REYES","MAC","JONAH","JERROLD","ROBT","HANK","RUPERT","ROLLAND","KENTON","DAMION","ANTONE","WALDO","FREDRIC","BRADLY","KIP","BURL","WALKER","TYREE","JEFFEREY","AHMED","WILLY","STANFORD","OREN","NOBLE","MOSHE","MIKEL","ENOCH","BRENDON","QUINTIN","JAMISON","FLORENCIO","DARRICK","TOBIAS","HASSAN","GIUSEPPE","DEMARCUS","CLETUS","TYRELL","LYNDON","KEENAN","WERNER","GERALDO","COLUMBUS","CHET","BERTRAM","MARKUS","HUEY","HILTON","DWAIN","DONTE","TYRON","OMER","ISAIAS","HIPOLITO","FERMIN","ADALBERTO","BO","BARRETT","TEODORO","MCKINLEY","MAXIMO","GARFIELD","RALEIGH","LAWERENCE","ABRAM","RASHAD","KING","EMMITT","DARON","SAMUAL","MIQUEL","EUSEBIO","DOMENIC","DARRON","BUSTER","WILBER","RENATO","JC","HOYT","HAYWOOD","EZEKIEL","CHAS","FLORENTINO","ELROY","CLEMENTE","ARDEN","NEVILLE","EDISON","DESHAWN","NATHANIAL","JORDON","DANILO","CLAUD","SHERWOOD","RAYMON","RAYFORD","CRISTOBAL","AMBROSE","TITUS","HYMAN","FELTON","EZEQUIEL","ERASMO","STANTON","LONNY","LEN","IKE","MILAN","LINO","JAROD","HERB","ANDREAS","WALTON","RHETT","PALMER","DOUGLASS","CORDELL","OSWALDO","ELLSWORTH","VIRGILIO","TONEY","NATHANAEL","DEL","BENEDICT","MOSE","JOHNSON","ISREAL","GARRET","FAUSTO","ASA","ARLEN","ZACK","WARNER","MODESTO","FRANCESCO","MANUAL","GAYLORD","GASTON","FILIBERTO","DEANGELO","MICHALE","GRANVILLE","WES","MALIK","ZACKARY","TUAN","ELDRIDGE","CRISTOPHER","CORTEZ","ANTIONE","MALCOM","LONG","KOREY","JOSPEH","COLTON","WAYLON","VON","HOSEA","SHAD","SANTO","RUDOLF","ROLF","REY","RENALDO","MARCELLUS","LUCIUS","KRISTOFER","BOYCE","BENTON","HAYDEN","HARLAND","ARNOLDO","RUEBEN","LEANDRO","KRAIG","JERRELL","JEROMY","HOBERT","CEDRICK","ARLIE","WINFORD","WALLY","LUIGI","KENETH","JACINTO","GRAIG","FRANKLYN","EDMUNDO","SID","PORTER","LEIF","JERAMY","BUCK","WILLIAN","VINCENZO","SHON","LYNWOOD","JERE","HAI","ELDEN","DORSEY","DARELL","BRODERICK","ALONSO"pythran-0.10.0+ds2/pythran/tests/euler/poker.txt000066400000000000000000000724601416264035500216040ustar00rootroot000000000000008C TS KC 9H 4S 7D 2S 5D 3S AC 5C AD 5D AC 9C 7C 5H 8D TD KS 3H 7H 6S KC JS QH TD JC 2D 8S TH 8H 5C QS TC 9H 4D JC KS JS 7C 5H KC QH JD AS KH 4C AD 4S 5H KS 9C 7D 9H 8D 3S 5D 5C AH 6H 4H 5C 3H 2H 3S QH 5S 6S AS TD 8C 4H 7C TC KC 4C 3H 7S KS 7C 9C 6D KD 3H 4C QS QC AC KH JC 6S 5H 2H 2D KD 9D 7C AS JS AD QH TH 9D 8H TS 6D 3S AS AC 2H 4S 5C 5S TC KC JD 6C TS 3C QD AS 6H JS 2C 3D 9H KC 4H 8S KD 8S 9S 7C 2S 3S 6D 6S 4H KC 3C 8C 2D 7D 4D 9S 4S QH 4H JD 8C KC 7S TC 2D TS 8H QD AC 5C 3D KH QD 6C 6S AD AS 8H 2H QS 6S 8D 4C 8S 6C QH TC 6D 7D 9D 2S 8D 8C 4C TS 9S 9D 9C AC 3D 3C QS 2S 4H JH 3D 2D TD 8S 9H 5H QS 8S 6D 3C 8C JD AS 7H 7D 6H TD 9D AS JH 6C QC 9S KD JC AH 8S QS 4D TH AC TS 3C 3D 5C 5S 4D JS 3D 8H 6C TS 3S AD 8C 6D 7C 5D 5H 3S 5C JC 2H 5S 3D 5H 6H 2S KS 3D 5D JD 7H JS 8H KH 4H AS JS QS QC TC 6D 7C KS 3D QS TS 2H JS 4D AS 9S JC KD QD 5H 4D 5D KH 7H 3D JS KD 4H 2C 9H 6H 5C 9D 6C JC 2D TH 9S 7D 6D AS QD JH 4D JS 7C QS 5C 3H KH QD AD 8C 8H 3S TH 9D 5S AH 9S 4D 9D 8S 4H JS 3C TC 8D 2C KS 5H QD 3S TS 9H AH AD 8S 5C 7H 5D KD 9H 4D 3D 2D KS AD KS KC 9S 6D 2C QH 9D 9H TS TC 9C 6H 5D QH 4D AD 6D QC JS KH 9S 3H 9D JD 5C 4D 9H AS TC QH 2C 6D JC 9C 3C AD 9S KH 9D 7D KC 9C 7C JC JS KD 3H AS 3C 7D QD KH QS 2C 3S 8S 8H 9H 9C JC QH 8D 3C KC 4C 4H 6D AD 9H 9D 3S KS QS 7H KH 7D 5H 5D JD AD 2H 2C 6H TH TC 7D 8D 4H 8C AS 4S 2H AC QC 3S 6D TH 4D 4C KH 4D TC KS AS 7C 3C 6D 2D 9H 6C 8C TD 5D QS 2C 7H 4C 9C 3H 9H 5H JH TS 7S TD 6H AD QD 8H 8S 5S AD 9C 8C 7C 8D 5H 9D 8S 2S 4H KH KS 9S 2S KC 5S AD 4S 7D QS 9C QD 6H JS 5D AC 8D 2S AS KH AC JC 3S 9D 9S 3C 9C 5S JS AD 3C 3D KS 3S 5C 9C 8C TS 4S JH 8D 5D 6H KD QS QD 3D 6C KC 8S JD 6C 3S 8C TC QC 3C QH JS KC JC 8H 2S 9H 9C JH 8S 8C 9S 8S 2H QH 4D QC 9D KC AS TH 3C 8S 6H TH 7C 2H 6S 3C 3H AS 7S QH 5S JS 4H 5H TS 8H AH AC JC 9D 8H 2S 4S TC JC 3C 7H 3H 5C 3D AD 3C 3S 4C QC AS 5D TH 8C 6S 9D 4C JS KH AH TS JD 8H AD 4C 6S 9D 7S AC 4D 3D 3S TC JD AD 7H 6H 4H JH KC TD TS 7D 6S 8H JH TC 3S 8D 8C 9S 2C 5C 4D 2C 9D KC QH TH QS JC 9C 4H TS QS 3C QD 8H KH 4H 8D TD 8S AC 7C 3C TH 5S 8H 8C 9C JD TC KD QC TC JD TS 8C 3H 6H KD 7C TD JH QS KS 9C 6D 6S AS 9H KH 6H 2H 4D AH 2D JH 6H TD 5D 4H JD KD 8C 9S JH QD JS 2C QS 5C 7C 4S TC 7H 8D 2S 6H 7S 9C 7C KC 8C 5D 7H 4S TD QC 8S JS 4H KS AD 8S JH 6D TD KD 7C 6C 2D 7D JC 6H 6S JS 4H QH 9H AH 4C 3C 6H 5H AS 7C 7S 3D KH KC 5D 5C JC 3D TD AS 4D 6D 6S QH JD KS 8C 7S 8S QH 2S JD 5C 7H AH QD 8S 3C 6H 6C 2C 8D TD 7D 4C 4D 5D QH KH 7C 2S 7H JS 6D QC QD AD 6C 6S 7D TH 6H 2H 8H KH 4H KS JS KD 5D 2D KH 7D 9C 8C 3D 9C 6D QD 3C KS 3S 7S AH JD 2D AH QH AS JC 8S 8H 4C KC TH 7D JC 5H TD 7C 5D KD 4C AD 8H JS KC 2H AC AH 7D JH KH 5D 7S 6D 9S 5S 9C 6H 8S TD JD 9H 6C AC 7D 8S 6D TS KD 7H AC 5S 7C 5D AH QC JC 4C TC 8C 2H TS 2C 7D KD KC 6S 3D 7D 2S 8S 3H 5S 5C 8S 5D 8H 4C 6H KC 3H 7C 5S KD JH 8C 3D 3C 6C KC TD 7H 7C 4C JC KC 6H TS QS TD KS 8H 8C 9S 6C 5S 9C QH 7D AH KS KC 9S 2C 4D 4S 8H TD 9C 3S 7D 9D AS TH 6S 7D 3C 6H 5D KD 2C 5C 9D 9C 2H KC 3D AD 3H QD QS 8D JC 4S 8C 3H 9C 7C AD 5D JC 9D JS AS 5D 9H 5C 7H 6S 6C QC JC QD 9S JC QS JH 2C 6S 9C QC 3D 4S TC 4H 5S 8D 3D 4D 2S KC 2H JS 2C TD 3S TH KD 4D 7H JH JS KS AC 7S 8C 9S 2D 8S 7D 5C AD 9D AS 8C 7H 2S 6C TH 3H 4C 3S 8H AC KD 5H JC 8H JD 2D 4H TD JH 5C 3D AS QH KS 7H JD 8S 5S 6D 5H 9S 6S TC QS JC 5C 5D 9C TH 8C 5H 3S JH 9H 2S 2C 6S 7S AS KS 8C QD JC QS TC QC 4H AC KH 6C TC 5H 7D JH 4H 2H 8D JC KS 4D 5S 9C KH KD 9H 5C TS 3D 7D 2D 5H AS TC 4D 8C 2C TS 9D 3H 8D 6H 8D 2D 9H JD 6C 4S 5H 5S 6D AD 9C JC 7D 6H 9S 6D JS 9H 3C AD JH TC QS 4C 5D 9S 7C 9C AH KD 6H 2H TH 8S QD KS 9D 9H AS 4H 8H 8D 5H 6C AH 5S AS AD 8S QS 5D 4S 2H TD KS 5H AC 3H JC 9C 7D QD KD AC 6D 5H QH 6H 5S KC AH QH 2H 7D QS 3H KS 7S JD 6C 8S 3H 6D KS QD 5D 5C 8H TC 9H 4D 4S 6S 9D KH QC 4H 6C JD TD 2D QH 4S 6H JH KD 3C QD 8C 4S 6H 7C QD 9D AS AH 6S AD 3C 2C KC TH 6H 8D AH 5C 6D 8S 5D TD TS 7C AD JC QD 9H 3C KC 7H 5D 4D 5S 8H 4H 7D 3H JD KD 2D JH TD 6H QS 4S KD 5C 8S 7D 8H AC 3D AS 8C TD 7H KH 5D 6C JD 9D KS 7C 6D QH TC JD KD AS KC JH 8S 5S 7S 7D AS 2D 3D AD 2H 2H 5D AS 3C QD KC 6H 9H 9S 2C 9D 5D TH 4C JH 3H 8D TC 8H 9H 6H KD 2C TD 2H 6C 9D 2D JS 8C KD 7S 3C 7C AS QH TS AD 8C 2S QS 8H 6C JS 4C 9S QC AD TD TS 2H 7C TS TC 8C 3C 9H 2D 6D JC TC 2H 8D JH KS 6D 3H TD TH 8H 9D TD 9H QC 5D 6C 8H 8C KC TS 2H 8C 3D AH 4D TH TC 7D 8H KC TS 5C 2D 8C 6S KH AH 5H 6H KC 5S 5D AH TC 4C JD 8D 6H 8C 6C KC QD 3D 8H 2D JC 9H 4H AD 2S TD 6S 7D JS KD 4H QS 2S 3S 8C 4C 9H JH TS 3S 4H QC 5S 9S 9C 2C KD 9H JS 9S 3H JC TS 5D AC AS 2H 5D AD 5H JC 7S TD JS 4C 2D 4S 8H 3D 7D 2C AD KD 9C TS 7H QD JH 5H JS AC 3D TH 4C 8H 6D KH KC QD 5C AD 7C 2D 4H AC 3D 9D TC 8S QD 2C JC 4H JD AH 6C TD 5S TC 8S AH 2C 5D AS AC TH 7S 3D AS 6C 4C 7H 7D 4H AH 5C 2H KS 6H 7S 4H 5H 3D 3C 7H 3C 9S AC 7S QH 2H 3D 6S 3S 3H 2D 3H AS 2C 6H TC JS 6S 9C 6C QH KD QD 6D AC 6H KH 2C TS 8C 8H 7D 3S 9H 5D 3H 4S QC 9S 5H 2D 9D 7H 6H 3C 8S 5H 4D 3S 4S KD 9S 4S TC 7S QC 3S 8S 2H 7H TC 3D 8C 3H 6C 2H 6H KS KD 4D KC 3D 9S 3H JS 4S 8H 2D 6C 8S 6H QS 6C TC QD 9H 7D 7C 5H 4D TD 9D 8D 6S 6C TC 5D TS JS 8H 4H KC JD 9H TC 2C 6S 5H 8H AS JS 9C 5C 6S 9D JD 8H KC 4C 6D 4D 8D 8S 6C 7C 6H 7H 8H 5C KC TC 3D JC 6D KS 9S 6H 7S 9C 2C 6C 3S KD 5H TS 7D 9H 9S 6H KH 3D QD 4C 6H TS AC 3S 5C 2H KD 4C AS JS 9S 7C TS 7H 9H JC KS 4H 8C JD 3H 6H AD 9S 4S 5S KS 4C 2C 7D 3D AS 9C 2S QS KC 6C 8S 5H 3D 2S AC 9D 6S 3S 4D TD QD TH 7S TS 3D AC 7H 6C 5D QC TC QD AD 9C QS 5C 8D KD 3D 3C 9D 8H AS 3S 7C 8S JD 2D 8D KC 4C TH AC QH JS 8D 7D 7S 9C KH 9D 8D 4C JH 2C 2S QD KD TS 4H 4D 6D 5D 2D JH 3S 8S 3H TC KH AD 4D 2C QS 8C KD JH JD AH 5C 5C 6C 5H 2H JH 4H KS 7C TC 3H 3C 4C QC 5D JH 9C QD KH 8D TC 3H 9C JS 7H QH AS 7C 9H 5H JC 2D 5S QD 4S 3C KC 6S 6C 5C 4C 5D KH 2D TS 8S 9C AS 9S 7C 4C 7C AH 8C 8D 5S KD QH QS JH 2C 8C 9D AH 2H AC QC 5S 8H 7H 2C QD 9H 5S QS QC 9C 5H JC TH 4H 6C 6S 3H 5H 3S 6H KS 8D AC 7S AC QH 7H 8C 4S KC 6C 3D 3S TC 9D 3D JS TH AC 5H 3H 8S 3S TC QD KH JS KS 9S QC 8D AH 3C AC 5H 6C KH 3S 9S JH 2D QD AS 8C 6C 4D 7S 7H 5S JC 6S 9H 4H JH AH 5S 6H 9S AD 3S TH 2H 9D 8C 4C 8D 9H 7C QC AD 4S 9C KC 5S 9D 6H 4D TC 4C JH 2S 5D 3S AS 2H 6C 7C KH 5C AD QS TH JD 8S 3S 4S 7S AH AS KC JS 2S AD TH JS KC 2S 7D 8C 5C 9C TS 5H 9D 7S 9S 4D TD JH JS KH 6H 5D 2C JD JS JC TH 2D 3D QD 8C AC 5H 7S KH 5S 9D 5D TD 4S 6H 3C 2D 4S 5D AC 8D 4D 7C AD AS AH 9C 6S TH TS KS 2C QC AH AS 3C 4S 2H 8C 3S JC 5C 7C 3H 3C KH JH 7S 3H JC 5S 6H 4C 2S 4D KC 7H 4D 7C 4H 9S 8S 6S AD TC 6C JC KH QS 3S TC 4C 8H 8S AC 3C TS QD QS TH 3C TS 7H 7D AH TD JC TD JD QC 4D 9S 7S TS AD 7D AC AH 7H 4S 6D 7C 2H 9D KS JC TD 7C AH JD 4H 6D QS TS 2H 2C 5C TC KC 8C 9S 4C JS 3C JC 6S AH AS 7D QC 3D 5S JC JD 9D TD KH TH 3C 2S 6H AH AC 5H 5C 7S 8H QC 2D AC QD 2S 3S JD QS 6S 8H KC 4H 3C 9D JS 6H 3S 8S AS 8C 7H KC 7D JD 2H JC QH 5S 3H QS 9H TD 3S 8H 7S AC 5C 6C AH 7C 8D 9H AH JD TD QS 7D 3S 9C 8S AH QH 3C JD KC 4S 5S 5D TD KS 9H 7H 6S JH TH 4C 7C AD 5C 2D 7C KD 5S TC 9D 6S 6C 5D 2S TH KC 9H 8D 5H 7H 4H QC 3D 7C AS 6S 8S QC TD 4S 5C TH QS QD 2S 8S 5H TH QC 9H 6S KC 7D 7C 5C 7H KD AH 4D KH 5C 4S 2D KC QH 6S 2C TD JC AS 4D 6C 8C 4H 5S JC TC JD 5S 6S 8D AS 9D AD 3S 6D 6H 5D 5S TC 3D 7D QS 9D QD 4S 6C 8S 3S 7S AD KS 2D 7D 7C KC QH JC AC QD 5D 8D QS 7H 7D JS AH 8S 5H 3D TD 3H 4S 6C JH 4S QS 7D AS 9H JS KS 6D TC 5C 2D 5C 6H TC 4D QH 3D 9H 8S 6C 6D 7H TC TH 5S JD 5C 9C KS KD 8D TD QH 6S 4S 6C 8S KC 5C TC 5S 3D KS AC 4S 7D QD 4C TH 2S TS 8H 9S 6S 7S QH 3C AH 7H 8C 4C 8C TS JS QC 3D 7D 5D 7S JH 8S 7S 9D QC AC 7C 6D 2H JH KC JS KD 3C 6S 4S 7C AH QC KS 5H KS 6S 4H JD QS TC 8H KC 6H AS KH 7C TC 6S TD JC 5C 7D AH 3S 3H 4C 4H TC TH 6S 7H 6D 9C QH 7D 5H 4S 8C JS 4D 3D 8S QH KC 3H 6S AD 7H 3S QC 8S 4S 7S JS 3S JD KH TH 6H QS 9C 6C 2D QD 4S QH 4D 5H KC 7D 6D 8D TH 5S TD AD 6S 7H KD KH 9H 5S KC JC 3H QC AS TS 4S QD KS 9C 7S KC TS 6S QC 6C TH TC 9D 5C 5D KD JS 3S 4H KD 4C QD 6D 9S JC 9D 8S JS 6D 4H JH 6H 6S 6C KS KH AC 7D 5D TC 9S KH 6S QD 6H AS AS 7H 6D QH 8D TH 2S KH 5C 5H 4C 7C 3D QC TC 4S KH 8C 2D JS 6H 5D 7S 5H 9C 9H JH 8S TH 7H AS JS 2S QD KH 8H 4S AC 8D 8S 3H 4C TD KD 8C JC 5C QS 2D JD TS 7D 5D 6C 2C QS 2H 3C AH KS 4S 7C 9C 7D JH 6C 5C 8H 9D QD 2S TD 7S 6D 9C 9S QS KH QH 5C JC 6S 9C QH JH 8D 7S JS KH 2H 8D 5H TH KC 4D 4S 3S 6S 3D QS 2D JD 4C TD 7C 6D TH 7S JC AH QS 7S 4C TH 9D TS AD 4D 3H 6H 2D 3H 7D JD 3D AS 2S 9C QC 8S 4H 9H 9C 2C 7S JH KD 5C 5D 6H TC 9H 8H JC 3C 9S 8D KS AD KC TS 5H JD QS QH QC 8D 5D KH AH 5D AS 8S 6S 4C AH QC QD TH 7H 3H 4H 7D 6S 4S 9H AS 8H JS 9D JD 8C 2C 9D 7D 5H 5S 9S JC KD KD 9C 4S QD AH 7C AD 9D AC TD 6S 4H 4S 9C 8D KS TC 9D JH 7C 5S JC 5H 4S QH AC 2C JS 2S 9S 8C 5H AS QD AD 5C 7D 8S QC TD JC 4C 8D 5C KH QS 4D 6H 2H 2C TH 4S 2D KC 3H QD AC 7H AD 9D KH QD AS 8H TH KC 8D 7S QH 8C JC 6C 7D 8C KH AD QS 2H 6S 2D JC KH 2D 7D JS QC 5H 4C 5D AD TS 3S AD 4S TD 2D TH 6S 9H JH 9H 2D QS 2C 4S 3D KH AS AC 9D KH 6S 8H 4S KD 7D 9D TS QD QC JH 5H AH KS AS AD JC QC 5S KH 5D 7D 6D KS KD 3D 7C 4D JD 3S AC JS 8D 5H 9C 3H 4H 4D TS 2C 6H KS KH 9D 7C 2S 6S 8S 2H 3D 6H AC JS 7S 3S TD 8H 3H 4H TH 9H TC QC KC 5C KS 6H 4H AC 8S TC 7D QH 4S JC TS 6D 6C AC KH QH 7D 7C JH QS QD TH 3H 5D KS 3D 5S 8D JS 4C 2C KS 7H 9C 4H 5H 8S 4H TD 2C 3S QD QC 3H KC QC JS KD 9C AD 5S 9D 7D 7H TS 8C JC KH 7C 7S 6C TS 2C QD TH 5S 9D TH 3C 7S QH 8S 9C 2H 5H 5D 9H 6H 2S JS KH 3H 7C 2H 5S JD 5D 5S 2C TC 2S 6S 6C 3C 8S 4D KH 8H 4H 2D KS 3H 5C 2S 9H 3S 2D TD 7H 8S 6H JD KC 9C 8D 6S QD JH 7C 9H 5H 8S 8H TH TD QS 7S TD 7D TS JC KD 7C 3C 2C 3C JD 8S 4H 2D 2S TD AS 4D AC AH KS 6C 4C 4S 7D 8C 9H 6H AS 5S 3C 9S 2C QS KD 4D 4S AC 5D 2D TS 2C JS KH QH 5D 8C AS KC KD 3H 6C TH 8S 7S KH 6H 9S AC 6H 7S 6C QS AH 2S 2H 4H 5D 5H 5H JC QD 2C 2S JD AS QC 6S 7D 6C TC AS KD 8H 9D 2C 7D JH 9S 2H 4C 6C AH 8S TD 3H TH 7C TS KD 4S TS 6C QH 8D 9D 9C AH 7D 6D JS 5C QD QC 9C 5D 8C 2H KD 3C QH JH AD 6S AH KC 8S 6D 6H 3D 7C 4C 7S 5S 3S 6S 5H JC 3C QH 7C 5H 3C 3S 8C TS 4C KD 9C QD 3S 7S 5H 7H QH JC 7C 8C KD 3C KD KH 2S 4C TS AC 6S 2C 7C 2C KH 3C 4C 6H 4D 5H 5S 7S QD 4D 7C 8S QD TS 9D KS 6H KD 3C QS 4D TS 7S 4C 3H QD 8D 9S TC TS QH AC 6S 3C 9H 9D QS 8S 6H 3S 7S 5D 4S JS 2D 6C QH 6S TH 4C 4H AS JS 5D 3D TS 9C AC 8S 6S 9C 7C 3S 5C QS AD AS 6H 3C 9S 8C 7H 3H 6S 7C AS 9H JD KH 3D 3H 7S 4D 6C 7C AC 2H 9C TH 4H 5S 3H AC TC TH 9C 9H 9S 8D 8D 9H 5H 4D 6C 2H QD 6S 5D 3S 4C 5C JD QS 4D 3H TH AC QH 8C QC 5S 3C 7H AD 4C KS 4H JD 6D QS AH 3H KS 9H 2S JS JH 5H 2H 2H 5S TH 6S TS 3S KS 3C 5H JS 2D 9S 7H 3D KC JH 6D 7D JS TD AC JS 8H 2C 8C JH JC 2D TH 7S 5D 9S 8H 2H 3D TC AH JC KD 9C 9D QD JC 2H 6D KH TS 9S QH TH 2C 8D 4S JD 5H 3H TH TC 9C KC AS 3D 9H 7D 4D TH KH 2H 7S 3H 4H 7S KS 2S JS TS 8S 2H QD 8D 5S 6H JH KS 8H 2S QC AC 6S 3S JC AS AD QS 8H 6C KH 4C 4D QD 2S 3D TS TD 9S KS 6S QS 5C 8D 3C 6D 4S QC KC JH QD TH KH AD 9H AH 4D KS 2S 8D JH JC 7C QS 2D 6C TH 3C 8H QD QH 2S 3S KS 6H 5D 9S 4C TS TD JS QD 9D JD 5H 8H KH 8S KS 7C TD AD 4S KD 2C 7C JC 5S AS 6C 7D 8S 5H 9C 6S QD 9S TS KH QS 5S QH 3C KC 7D 3H 3C KD 5C AS JH 7H 6H JD 9D 5C 9H KC 8H KS 4S AD 4D 2S 3S JD QD 8D 2S 7C 5S 6S 5H TS 6D 9S KC TD 3S 6H QD JD 5C 8D 5H 9D TS KD 8D 6H TD QC 4C 7D 6D 4S JD 9D AH 9S AS TD 9H QD 2D 5S 2H 9C 6H 9S TD QC 7D TC 3S 2H KS TS 2C 9C 8S JS 9D 7D 3C KC 6D 5D 6C 6H 8S AS 7S QS JH 9S 2H 8D 4C 8H 9H AD TH KH QC AS 2S JS 5C 6H KD 3H 7H 2C QD 8H 2S 8D 3S 6D AH 2C TC 5C JD JS TS 8S 3H 5D TD KC JC 6H 6S QS TC 3H 5D AH JC 7C 7D 4H 7C 5D 8H 9C 2H 9H JH KH 5S 2C 9C 7H 6S TH 3S QC QD 4C AC JD 2H 5D 9S 7D KC 3S QS 2D AS KH 2S 4S 2H 7D 5C TD TH QH 9S 4D 6D 3S TS 6H 4H KS 9D 8H 5S 2D 9H KS 4H 3S 5C 5D KH 6H 6S JS KC AS 8C 4C JC KH QC TH QD AH 6S KH 9S 2C 5H TC 3C 7H JC 4D JD 4S 6S 5S 8D 7H 7S 4D 4C 2H 7H 9H 5D KH 9C 7C TS TC 7S 5H 4C 8D QC TS 4S 9H 3D AD JS 7C 8C QS 5C 5D 3H JS AH KC 4S 9D TS JD 8S QS TH JH KH 2D QD JS JD QC 5D 6S 9H 3S 2C 8H 9S TS 2S 4C AD 7H JC 5C 2D 6D 4H 3D 7S JS 2C 4H 8C AD QD 9C 3S TD JD TS 4C 6H 9H 7D QD 6D 3C AS AS 7C 4C 6S 5D 5S 5C JS QC 4S KD 6S 9S 7C 3C 5S 7D JH QD JS 4S 7S JH 2C 8S 5D 7H 3D QH AD TD 6H 2H 8D 4H 2D 7C AD KH 5D TS 3S 5H 2C QD AH 2S 5C KH TD KC 4D 8C 5D AS 6C 2H 2S 9H 7C KD JS QC TS QS KH JH 2C 5D AD 3S 5H KC 6C 9H 3H 2H AD 7D 7S 7S JS JH KD 8S 7D 2S 9H 7C 2H 9H 2D 8D QC 6S AD AS 8H 5H 6C 2S 7H 6C 6D 7D 8C 5D 9D JC 3C 7C 9C 7H JD 2H KD 3S KH AD 4S QH AS 9H 4D JD KS KD TS KH 5H 4C 8H 5S 3S 3D 7D TD AD 7S KC JS 8S 5S JC 8H TH 9C 4D 5D KC 7C 5S 9C QD 2C QH JS 5H 8D KH TD 2S KS 3D AD KC 7S TC 3C 5D 4C 2S AD QS 6C 9S QD TH QH 5C 8C AD QS 2D 2S KC JD KS 6C JC 8D 4D JS 2H 5D QD 7S 7D QH TS 6S 7H 3S 8C 8S 9D QS 8H 6C 9S 4S TC 2S 5C QD 4D QS 6D TH 6S 3S 5C 9D 6H 8D 4C 7D TC 7C TD AH 6S AS 7H 5S KD 3H 5H AC 4C 8D 8S AH KS QS 2C AD 6H 7D 5D 6H 9H 9S 2H QS 8S 9C 5D 2D KD TS QC 5S JH 7D 7S TH 9S 9H AC 7H 3H 6S KC 4D 6D 5C 4S QD TS TD 2S 7C QD 3H JH 9D 4H 7S 7H KS 3D 4H 5H TC 2S AS 2D 6D 7D 8H 3C 7H TD 3H AD KC TH 9C KH TC 4C 2C 9S 9D 9C 5C 2H JD 3C 3H AC TS 5D AD 8D 6H QC 6S 8C 2S TS 3S JD 7H 8S QH 4C 5S 8D AC 4S 6C 3C KH 3D 7C 2D 8S 2H 4H 6C 8S TH 2H 4S 8H 9S 3H 7S 7C 4C 9C 2C 5C AS 5D KD 4D QH 9H 4H TS AS 7D 8D 5D 9S 8C 2H QC KD AC AD 2H 7S AS 3S 2D 9S 2H QC 8H TC 6D QD QS 5D KH 3C TH JD QS 4C 2S 5S AD 7H 3S AS 7H JS 3D 6C 3S 6D AS 9S AC QS 9C TS AS 8C TC 8S 6H 9D 8D 6C 4D JD 9C KC 7C 6D KS 3S 8C AS 3H 6S TC 8D TS 3S KC 9S 7C AS 8C QC 4H 4S 8S 6C 3S TC AH AC 4D 7D 5C AS 2H 6S TS QC AD TC QD QC 8S 4S TH 3D AH TS JH 4H 5C 2D 9S 2C 3H 3C 9D QD QH 7D KC 9H 6C KD 7S 3C 4D AS TC 2D 3D JS 4D 9D KS 7D TH QC 3H 3C 8D 5S 2H 9D 3H 8C 4C 4H 3C TH JC TH 4S 6S JD 2D 4D 6C 3D 4C TS 3S 2D 4H AC 2C 6S 2H JH 6H TD 8S AD TC AH AC JH 9S 6S 7S 6C KC 4S JD 8D 9H 5S 7H QH AH KD 8D TS JH 5C 5H 3H AD AS JS 2D 4H 3D 6C 8C 7S AD 5D 5C 8S TD 5D 7S 9C 4S 5H 6C 8C 4C 8S JS QH 9C AS 5C QS JC 3D QC 7C JC 9C KH JH QS QC 2C TS 3D AD 5D JH AC 5C 9S TS 4C JD 8C KS KC AS 2D KH 9H 2C 5S 4D 3D 6H TH AH 2D 8S JC 3D 8C QH 7S 3S 8H QD 4H JC AS KH KS 3C 9S 6D 9S QH 7D 9C 4S AC 7H KH 4D KD AH AD TH 6D 9C 9S KD KS QH 4H QD 6H 9C 7C QS 6D 6S 9D 5S JH AH 8D 5H QD 2H JC KS 4H KH 5S 5C 2S JS 8D 9C 8C 3D AS KC AH JD 9S 2H QS 8H 5S 8C TH 5C 4C QC QS 8C 2S 2C 3S 9C 4C KS KH 2D 5D 8S AH AD TD 2C JS KS 8C TC 5S 5H 8H QC 9H 6H JD 4H 9S 3C JH 4H 9H AH 4S 2H 4C 8D AC 8S TH 4D 7D 6D QD QS 7S TC 7C KH 6D 2D JD 5H JS QD JH 4H 4S 9C 7S JH 4S 3S TS QC 8C TC 4H QH 9D 4D JH QS 3S 2C 7C 6C 2D 4H 9S JD 5C 5H AH 9D TS 2D 4C KS JH TS 5D 2D AH JS 7H AS 8D JS AH 8C AD KS 5S 8H 2C 6C TH 2H 5D AD AC KS 3D 8H TS 6H QC 6D 4H TS 9C 5H JS JH 6S JD 4C JH QH 4H 2C 6D 3C 5D 4C QS KC 6H 4H 6C 7H 6S 2S 8S KH QC 8C 3H 3D 5D KS 4H TD AD 3S 4D TS 5S 7C 8S 7D 2C KS 7S 6C 8C JS 5D 2H 3S 7C 5C QD 5H 6D 9C 9H JS 2S KD 9S 8D TD TS AC 8C 9D 5H QD 2S AC 8C 9H KS 7C 4S 3C KH AS 3H 8S 9C JS QS 4S AD 4D AS 2S TD AD 4D 9H JC 4C 5H QS 5D 7C 4H TC 2D 6C JS 4S KC 3S 4C 2C 5D AC 9H 3D JD 8S QS QH 2C 8S 6H 3C QH 6D TC KD AC AH QC 6C 3S QS 4S AC 8D 5C AD KH 5S 4C AC KH AS QC 2C 5C 8D 9C 8H JD 3C KH 8D 5C 9C QD QH 9D 7H TS 2C 8C 4S TD JC 9C 5H QH JS 4S 2C 7C TH 6C AS KS 7S JD JH 7C 9H 7H TC 5H 3D 6D 5D 4D 2C QD JH 2H 9D 5S 3D TD AD KS JD QH 3S 4D TH 7D 6S QS KS 4H TC KS 5S 8D 8H AD 2S 2D 4C JH 5S JH TC 3S 2D QS 9D 4C KD 9S AC KH 3H AS 9D KC 9H QD 6C 6S 9H 7S 3D 5C 7D KC TD 8H 4H 6S 3C 7H 8H TC QD 4D 7S 6S QH 6C 6D AD 4C QD 6C 5D 7D 9D KS TS JH 2H JD 9S 7S TS KH 8D 5D 8H 2D 9S 4C 7D 9D 5H QD 6D AC 6S 7S 6D JC QD JH 4C 6S QS 2H 7D 8C TD JH KD 2H 5C QS 2C JS 7S TC 5H 4H JH QD 3S 5S 5D 8S KH KS KH 7C 2C 5D JH 6S 9C 6D JC 5H AH JD 9C JS KC 2H 6H 4D 5S AS 3C TH QC 6H 9C 8S 8C TD 7C KC 2C QD 9C KH 4D 7S 3C TS 9H 9C QC 2S TS 8C TD 9S QD 3S 3C 4D 9D TH JH AH 6S 2S JD QH JS QD 9H 6C KD 7D 7H 5D 6S 8H AH 8H 3C 4S 2H 5H QS QH 7S 4H AC QS 3C 7S 9S 4H 3S AH KS 9D 7C AD 5S 6S 2H 2D 5H TC 4S 3C 8C QH TS 6S 4D JS KS JH AS 8S 6D 2C 8S 2S TD 5H AS TC TS 6C KC KC TS 8H 2H 3H 7C 4C 5S TH TD KD AD KH 7H 7S 5D 5H 5S 2D 9C AD 9S 3D 7S 8C QC 7C 9C KD KS 3C QC 9S 8C 4D 5C AS QD 6C 2C 2H KC 8S JD 7S AC 8D 5C 2S 4D 9D QH 3D 2S TC 3S KS 3C 9H TD KD 6S AC 2C 7H 5H 3S 6C 6H 8C QH TC 8S 6S KH TH 4H 5D TS 4D 8C JS 4H 6H 2C 2H 7D AC QD 3D QS KC 6S 2D 5S 4H TD 3H JH 4C 7S 5H 7H 8H KH 6H QS TH KD 7D 5H AD KD 7C KH 5S TD 6D 3C 6C 8C 9C 5H JD 7C KC KH 7H 2H 3S 7S 4H AD 4D 8S QS TH 3D 7H 5S 8D TC KS KD 9S 6D AD JD 5C 2S 7H 8H 6C QD 2H 6H 9D TC 9S 7C 8D 6D 4C 7C 6C 3C TH KH JS JH 5S 3S 8S JS 9H AS AD 8H 7S KD JH 7C 2C KC 5H AS AD 9C 9S JS AD AC 2C 6S QD 7C 3H TH KS KD 9D JD 4H 8H 4C KH 7S TS 8C KC 3S 5S 2H 7S 6H 7D KS 5C 6D AD 5S 8C 9H QS 7H 7S 2H 6C 7D TD QS 5S TD AC 9D KC 3D TC 2D 4D TD 2H 7D JD QD 4C 7H 5D KC 3D 4C 3H 8S KD QH 5S QC 9H TC 5H 9C QD TH 5H TS 5C 9H AH QH 2C 4D 6S 3C AC 6C 3D 2C 2H TD TH AC 9C 5D QC 4D AD 8D 6D 8C KC AD 3C 4H AC 8D 8H 7S 9S TD JC 4H 9H QH JS 2D TH TD TC KD KS 5S 6S 9S 8D TH AS KH 5H 5C 8S JD 2S 9S 6S 5S 8S 5D 7S 7H 9D 5D 8C 4C 9D AD TS 2C 7D KD TC 8S QS 4D KC 5C 8D 4S KH JD KD AS 5C AD QH 7D 2H 9S 7H 7C TC 2S 8S JD KH 7S 6C 6D AD 5D QC 9H 6H 3S 8C 8H AH TC 4H JS TD 2C TS 4D 7H 2D QC 9C 5D TH 7C 6C 8H QC 5D TS JH 5C 5H 9H 4S 2D QC 7H AS JS 8S 2H 4C 4H 8D JS 6S AC KD 3D 3C 4S 7H TH KC QH KH 6S QS 5S 4H 3C QD 3S 3H 7H AS KH 8C 4H 9C 5S 3D 6S TS 9C 7C 3H 5S QD 2C 3D AD AC 5H JH TD 2D 4C TS 3H KH AD 3S 7S AS 4C 5H 4D 6S KD JC 3C 6H 2D 3H 6S 8C 2D TH 4S AH QH AD 5H 7C 2S 9H 7H KC 5C 6D 5S 3H JC 3C TC 9C 4H QD TD JH 6D 9H 5S 7C 6S 5C 5D 6C 4S 7H 9H 6H AH AD 2H 7D KC 2C 4C 2S 9S 7H 3S TH 4C 8S 6S 3S AD KS AS JH TD 5C TD 4S 4D AD 6S 5D TC 9C 7D 8H 3S 4D 4S 5S 6H 5C AC 3H 3D 9H 3C AC 4S QS 8S 9D QH 5H 4D JC 6C 5H TS AC 9C JD 8C 7C QD 8S 8H 9C JD 2D QC QH 6H 3C 8D KS JS 2H 6H 5H QH QS 3H 7C 6D TC 3H 4S 7H QC 2H 3S 8C JS KH AH 8H 5S 4C 9H JD 3H 7S JC AC 3C 2D 4C 5S 6C 4S QS 3S JD 3D 5H 2D TC AH KS 6D 7H AD 8C 6H 6C 7S 3C JD 7C 8H KS KH AH 6D AH 7D 3H 8H 8S 7H QS 5H 9D 2D JD AC 4H 7S 8S 9S KS AS 9D QH 7S 2C 8S 5S JH QS JC AH KD 4C AH 2S 9H 4H 8D TS TD 6H QH JD 4H JC 3H QS 6D 7S 9C 8S 9D 8D 5H TD 4S 9S 4C 8C 8D 7H 3H 3D QS KH 3S 2C 2S 3C 7S TD 4S QD 7C TD 4D 5S KH AC AS 7H 4C 6C 2S 5H 6D JD 9H QS 8S 2C 2H TD 2S TS 6H 9H 7S 4H JC 4C 5D 5S 2C 5H 7D 4H 3S QH JC JS 6D 8H 4C QH 7C QD 3S AD TH 8S 5S TS 9H TC 2S TD JC 7D 3S 3D TH QH 7D 4C 8S 5C JH 8H 6S 3S KC 3H JC 3H KH TC QH TH 6H 2C AC 5H QS 2H 9D 2C AS 6S 6C 2S 8C 8S 9H 7D QC TH 4H KD QS AC 7S 3C 4D JH 6S 5S 8H KS 9S QC 3S AS JD 2D 6S 7S TC 9H KC 3H 7D KD 2H KH 7C 4D 4S 3H JS QD 7D KC 4C JC AS 9D 3C JS 6C 8H QD 4D AH JS 3S 6C 4C 3D JH 6D 9C 9H 9H 2D 8C 7H 5S KS 6H 9C 2S TC 6C 8C AD 7H 6H 3D KH AS 5D TH KS 8C 3S TS 8S 4D 5S 9S 6C 4H 9H 4S 4H 5C 7D KC 2D 2H 9D JH 5C JS TC 9D 9H 5H 7S KH JC 6S 7C 9H 8H 4D JC KH JD 2H TD TC 8H 6C 2H 2C KH 6H 9D QS QH 5H AC 7D 2S 3D QD JC 2D 8D JD JH 2H JC 2D 7H 2C 3C 8D KD TD 4H 3S 4H 6D 8D TS 3H TD 3D 6H TH JH JC 3S AC QH 9H 7H 8S QC 2C 7H TD QS 4S 8S 9C 2S 5D 4D 2H 3D TS 3H 2S QC 8H 6H KC JC KS 5D JD 7D TC 8C 6C 9S 3D 8D AC 8H 6H JH 6C 5D 8D 8S 4H AD 2C 9D 4H 2D 2C 3S TS AS TC 3C 5D 4D TH 5H KS QS 6C 4S 2H 3D AD 5C KC 6H 2C 5S 3C 4D 2D 9H 9S JD 4C 3H TH QH 9H 5S AH 8S AC 7D 9S 6S 2H TD 9C 4H 8H QS 4C 3C 6H 5D 4H 8C 9C KC 6S QD QS 3S 9H KD TC 2D JS 8C 6S 4H 4S 2S 4C 8S QS 6H KH 3H TH 8C 5D 2C KH 5S 3S 7S 7H 6C 9D QD 8D 8H KS AC 2D KH TS 6C JS KC 7H 9C KS 5C TD QC AH 6C 5H 9S 7C 5D 4D 3H 4H 6S 7C 7S AH QD TD 2H 7D QC 6S TC TS AH 7S 9D 3H TH 5H QD 9S KS 7S 7C 6H 8C TD TH 2D 4D QC 5C 7D JD AH 9C 4H 4H 3H AH 8D 6H QC QH 9H 2H 2C 2D AD 4C TS 6H 7S TH 4H QS TD 3C KD 2H 3H QS JD TC QC 5D 8H KS JC QD TH 9S KD 8D 8C 2D 9C 3C QD KD 6D 4D 8D AH AD QC 8S 8H 3S 9D 2S 3H KS 6H 4C 7C KC TH 9S 5C 3D 7D 6H AC 7S 4D 2C 5C 3D JD 4D 2D 6D 5H 9H 4C KH AS 7H TD 6C 2H 3D QD KS 4C 4S JC 3C AC 7C JD JS 8H 9S QC 5D JD 6S 5S 2H AS 8C 7D 5H JH 3D 8D TC 5S 9S 8S 3H JC 5H 7S AS 5C TD 3D 7D 4H 8D 7H 4D 5D JS QS 9C KS TD 2S 8S 5C 2H 4H AS TH 7S 4H 7D 3H JD KD 5D 2S KC JD 7H 4S 8H 4C JS 6H QH 5S 4H 2C QS 8C 5S 3H QC 2S 6C QD AD 8C 3D JD TC 4H 2H AD 5S AC 2S 5D 2C JS 2D AD 9D 3D 4C 4S JH 8D 5H 5D 6H 7S 4D KS 9D TD JD 3D 6D 9C 2S AS 7D 5S 5C 8H JD 7C 8S 3S 6S 5H JD TC AD 7H 7S 2S 9D TS 4D AC 8D 6C QD JD 3H 9S KH 2C 3C AC 3D 5H 6H 8D 5D KS 3D 2D 6S AS 4C 2S 7C 7H KH AC 2H 3S JC 5C QH 4D 2D 5H 7S TS AS JD 8C 6H JC 8S 5S 2C 5D 7S QH 7H 6C QC 8H 2D 7C JD 2S 2C QD 2S 2H JC 9C 5D 2D JD JH 7C 5C 9C 8S 7D 6D 8D 6C 9S JH 2C AD 6S 5H 3S KS 7S 9D KH 4C 7H 6C 2C 5C TH 9D 8D 3S QC AH 5S KC 6H TC 5H 8S TH 6D 3C AH 9C KD 4H AD TD 9S 4S 7D 6H 5D 7H 5C 5H 6D AS 4C KD KH 4H 9D 3C 2S 5C 6C JD QS 2H 9D 7D 3H AC 2S 6S 7S JS QD 5C QS 6H AD 5H TH QC 7H TC 3S 7C 6D KC 3D 4H 3D QC 9S 8H 2C 3S JC KS 5C 4S 6S 2C 6H 8S 3S 3D 9H 3H JS 4S 8C 4D 2D 8H 9H 7D 9D AH TS 9S 2C 9H 4C 8D AS 7D 3D 6D 5S 6S 4C 7H 8C 3H 5H JC AH 9D 9C 2S 7C 5S JD 8C 3S 3D 4D 7D 6S 3C KC 4S 5D 7D 3D JD 7H 3H 4H 9C 9H 4H 4D TH 6D QD 8S 9S 7S 2H AC 8S 4S AD 8C 2C AH 7D TC TS 9H 3C AD KS TC 3D 8C 8H JD QC 8D 2C 3C 7D 7C JD 9H 9C 6C AH 6S JS JH 5D AS QC 2C JD TD 9H KD 2H 5D 2D 3S 7D TC AH TS TD 8H AS 5D AH QC AC 6S TC 5H KS 4S 7H 4D 8D 9C TC 2H 6H 3H 3H KD 4S QD QH 3D 8H 8C TD 7S 8S JD TC AH JS QS 2D KH KS 4D 3C AD JC KD JS KH 4S TH 9H 2C QC 5S JS 9S KS AS 7C QD 2S JD KC 5S QS 3S 2D AC 5D 9H 8H KS 6H 9C TC AD 2C 6D 5S JD 6C 7C QS KH TD QD 2C 3H 8S 2S QC AH 9D 9H JH TC QH 3C 2S JS 5C 7H 6C 3S 3D 2S 4S QD 2D TH 5D 2C 2D 6H 6D 2S JC QH AS 7H 4H KH 5H 6S KS AD TC TS 7C AC 4S 4H AD 3C 4H QS 8C 9D KS 2H 2D 4D 4S 9D 6C 6D 9C AC 8D 3H 7H KD JC AH 6C TS JD 6D AD 3S 5D QD JC JH JD 3S 7S 8S JS QC 3H 4S JD TH 5C 2C AD JS 7H 9S 2H 7S 8D 3S JH 4D QC AS JD 2C KC 6H 2C AC 5H KD 5S 7H QD JH AH 2D JC QH 8D 8S TC 5H 5C AH 8C 6C 3H JS 8S QD JH 3C 4H 6D 5C 3S 6D 4S 4C AH 5H 5S 3H JD 7C 8D 8H AH 2H 3H JS 3C 7D QC 4H KD 6S 2H KD 5H 8H 2D 3C 8S 7S QD 2S 7S KC QC AH TC QS 6D 4C 8D 5S 9H 2C 3S QD 7S 6C 2H 7C 9D 3C 6C 5C 5S JD JC KS 3S 5D TS 7C KS 6S 5S 2S 2D TC 2H 5H QS AS 7H 6S TS 5H 9S 9D 3C KD 2H 4S JS QS 3S 4H 7C 2S AC 6S 9D 8C JH 2H 5H 7C 5D QH QS KH QC 3S TD 3H 7C KC 8D 5H 8S KH 8C 4H KH JD TS 3C 7H AS QC JS 5S AH 9D 2C 8D 4D 2D 6H 6C KC 6S 2S 6H 9D 3S 7H 4D KH 8H KD 3D 9C TC AC JH KH 4D JD 5H TD 3S 7S 4H 9D AS 4C 7D QS 9S 2S KH 3S 8D 8S KS 8C JC 5C KH 2H 5D 8S QH 2C 4D KC JS QC 9D AC 6H 8S 8C 7C JS JD 6S 4C 9C AC 4S QH 5D 2C 7D JC 8S 2D JS JH 4C JS 4C 7S TS JH KC KH 5H QD 4S QD 8C 8D 2D 6S TD 9D AC QH 5S QH QC JS 3D 3C 5C 4H KH 8S 7H 7C 2C 5S JC 8S 3H QC 5D 2H KC 5S 8D KD 6H 4H QD QH 6D AH 3D 7S KS 6C 2S 4D AC QS 5H TS JD 7C 2D TC 5D QS AC JS QC 6C KC 2C KS 4D 3H TS 8S AD 4H 7S 9S QD 9H QH 5H 4H 4D KH 3S JC AD 4D AC KC 8D 6D 4C 2D KH 2C JD 2C 9H 2D AH 3H 6D 9C 7D TC KS 8C 3H KD 7C 5C 2S 4S 5H AS AH TH JD 4H KD 3H TC 5C 3S AC KH 6D 7H AH 7S QC 6H 2D TD JD AS JH 5D 7H TC 9S 7D JC AS 5S KH 2H 8C AD TH 6H QD KD 9H 6S 6C QH KC 9D 4D 3S JS JH 4H 2C 9H TC 7H KH 4H JC 7D 9S 3H QS 7S AD 7D JH 6C 7H 4H 3S 3H 4D QH JD 2H 5C AS 6C QC 4D 3C TC JH AC JD 3H 6H 4C JC AD 7D 7H 9H 4H TC TS 2C 8C 6S KS 2H JD 9S 4C 3H QS QC 9S 9H 6D KC 9D 9C 5C AD 8C 2C QH TH QD JC 8D 8H QC 2C 2S QD 9C 4D 3S 8D JH QS 9D 3S 2C 7S 7C JC TD 3C TC 9H 3C TS 8H 5C 4C 2C 6S 8D 7C 4H KS 7H 2H TC 4H 2C 3S AS AH QS 8C 2D 2H 2C 4S 4C 6S 7D 5S 3S TH QC 5D TD 3C QS KD KC KS AS 4D AH KD 9H KS 5C 4C 6H JC 7S KC 4H 5C QS TC 2H JC 9S AH QH 4S 9H 3H 5H 3C QD 2H QC JH 8H 5D AS 7H 2C 3D JH 6H 4C 6S 7D 9C JD 9H AH JS 8S QH 3H KS 8H 3S AC QC TS 4D AD 3D AH 8S 9H 7H 3H QS 9C 9S 5H JH JS AH AC 8D 3C JD 2H AC 9C 7H 5S 4D 8H 7C JH 9H 6C JS 9S 7H 8C 9D 4H 2D AS 9S 6H 4D JS JH 9H AD QD 6H 7S JH KH AH 7H TD 5S 6S 2C 8H JH 6S 5H 5S 9D TC 4C QC 9S 7D 2C KD 3H 5H AS QD 7H JS 4D TS QH 6C 8H TH 5H 3C 3H 9C 9D AD KH JS 5D 3H AS AC 9S 5C KC 2C KH 8C JC QS 6D AH 2D KC TC 9D 3H 2S 7C 4D 6D KH KS 8D 7D 9H 2S TC JH AC QC 3H 5S 3S 8H 3S AS KD 8H 4C 3H 7C JH QH TS 7S 6D 7H 9D JH 4C 3D 3S 6C AS 4S 2H 2C 4C 8S 5H KC 8C QC QD 3H 3S 6C QS QC 2D 6S 5D 2C 9D 2H 8D JH 2S 3H 2D 6C 5C 7S AD 9H JS 5D QH 8S TS 2H 7S 6S AD 6D QC 9S 7H 5H 5C 7D KC JD 4H QC 5S 9H 9C 4D 6S KS 2S 4C 7C 9H 7C 4H 8D 3S 6H 5C 8H JS 7S 2D 6H JS TD 4H 4D JC TH 5H KC AC 7C 8D TH 3H 9S 2D 4C KC 4D KD QS 9C 7S 3D KS AD TS 4C 4H QH 9C 8H 2S 7D KS 7H 5D KD 4C 9C 2S 2H JC 6S 6C TC QC JH 5C 7S AC 8H KC 8S 6H QS JC 3D 6S JS 2D JH 8C 4S 6H 8H 6D 5D AD 6H 7D 2S 4H 9H 7C AS AC 8H 5S 3C JS 4S 6D 5H 2S QH 6S 9C 2C 3D 5S 6S 9S 4C QS 8D QD 8S TC 9C 3D AH 9H 5S 2C 7D AD JC 3S 7H TC AS 3C 6S 6D 7S KH KC 9H 3S TC 8H 6S 5H JH 8C 7D AC 2S QD 9D 9C 3S JC 8C KS 8H 5D 4D JS AH JD 6D 9D 8C 9H 9S 8H 3H 2D 6S 4C 4D 8S AD 4S TC AH 9H TS AC QC TH KC 6D 4H 7S 8C 2H 3C QD JS 9D 5S JC AH 2H TS 9H 3H 4D QH 5D 9C 5H 7D 4S JC 3S 8S TH 3H 7C 2H JD JS TS AC 8D 9C 2H TD KC JD 2S 8C 5S AD 2C 3D KD 7C 5H 4D QH QD TC 6H 7D 7H 2C KC 5S KD 6H AH QC 7S QH 6H 5C AC 5H 2C 9C 2D 7C TD 2S 4D 9D AH 3D 7C JD 4H 8C 4C KS TH 3C JS QH 8H 4C AS 3D QS QC 4D 7S 5H JH 6D 7D 6H JS KH 3C QD 8S 7D 2H 2C 7C JC 2S 5H 8C QH 8S 9D TC 2H AD 7C 8D QD 6S 3S 7C AD 9H 2H 9S JD TS 4C 2D 3S AS 4H QC 2C 8H 8S 7S TD TC JH TH TD 3S 4D 4H 5S 5D QS 2C 8C QD QH TC 6D 4S 9S 9D 4H QC 8C JS 9D 6H JD 3H AD 6S TD QC KC 8S 3D 7C TD 7D 8D 9H 4S 3S 6C 4S 3D 9D KD TC KC KS AC 5S 7C 6S QH 3D JS KD 6H 6D 2D 8C JD 2S 5S 4H 8S AC 2D 6S TS 5C 5H 8C 5S 3C 4S 3D 7C 8D AS 3H AS TS 7C 3H AD 7D JC QS 6C 6H 3S 9S 4C AC QH 5H 5D 9H TS 4H 6C 5C 7H 7S TD AD JD 5S 2H 2S 7D 6C KC 3S JD 8D 8S TS QS KH 8S QS 8D 6C TH AC AH 2C 8H 9S 7H TD KH QH 8S 3D 4D AH JD AS TS 3D 2H JC 2S JH KH 6C QC JS KC TH 2D 6H 7S 2S TC 8C 9D QS 3C 9D 6S KH 8H 6D 5D TH 2C 2H 6H TC 7D AD 4D 8S TS 9H TD 7S JS 6D JD JC 2H AC 6C 3D KH 8D KH JD 9S 5D 4H 4C 3H 7S QS 5C 4H JD 5D 3S 3C 4D KH QH QS 7S JD TS 8S QD AH 4C 6H 3S 5S 2C QS 3D JD AS 8D TH 7C 6S QC KS 7S 2H 8C QC 7H AC 6D 2D TH KH 5S 6C 7H KH 7D AH 8C 5C 7S 3D 3C KD AD 7D 6C 4D KS 2D 8C 4S 7C 8D 5S 2D 2S AH AD 2C 9D TD 3C AD 4S KS JH 7C 5C 8C 9C TH AS TD 4D 7C JD 8C QH 3C 5H 9S 3H 9C 8S 9S 6S QD KS AH 5H JH QC 9C 5S 4H 2H TD 7D AS 8C 9D 8C 2C 9D KD TC 7S 3D KH QC 3C 4D AS 4C QS 5S 9D 6S JD QH KS 6D AH 6C 4C 5H TS 9H 7D 3D 5S QS JD 7C 8D 9C AC 3S 6S 6C KH 8H JH 5D 9S 6D AS 6S 3S QC 7H QD AD 5C JH 2H AH 4H AS KC 2C JH 9C 2C 6H 2D JS 5D 9H KC 6D 7D 9D KD TH 3H AS 6S QC 6H AD JD 4H 7D KC 3H JS 3C TH 3D QS 4C 3H 8C QD 5H 6H AS 8H AD JD TH 8S KD 5D QC 7D JS 5S 5H TS 7D KC 9D QS 3H 3C 6D TS 7S AH 7C 4H 7H AH QC AC 4D 5D 6D TH 3C 4H 2S KD 8H 5H JH TC 6C JD 4S 8C 3D 4H JS TD 7S JH QS KD 7C QC KD 4D 7H 6S AD TD TC KH 5H 9H KC 3H 4D 3D AD 6S QD 6H TH 7C 6H TS QH 5S 2C KC TD 6S 7C 4D 5S JD JH 7D AC KD KH 4H 7D 6C 8D 8H 5C JH 8S QD TH JD 8D 7D 6C 7C 9D KD AS 5C QH JH 9S 2C 8C 3C 4C KS JH 2D 8D 4H 7S 6C JH KH 8H 3H 9D 2D AH 6D 4D TC 9C 8D 7H TD KS TH KD 3C JD 9H 8D QD AS KD 9D 2C 2S 9C 8D 3H 5C 7H KS 5H QH 2D 8C 9H 2D TH 6D QD 6C KC 3H 3S AD 4C 4H 3H JS 9D 3C TC 5H QH QC JC 3D 5C 6H 3S 3C JC 5S 7S 2S QH AC 5C 8C 4D 5D 4H 2S QD 3C 3H 2C TD AH 9C KD JS 6S QD 4C QC QS 8C 3S 4H TC JS 3H 7C JC AD 5H 4D 9C KS JC TD 9S TS 8S 9H QD TS 7D AS AC 2C TD 6H 8H AH 6S AD 8C 4S 9H 8D 9D KH 8S 3C QS 4D 2D 7S KH JS JC AD 4C 3C QS 9S 7H KC TD TH 5H JS AC JH 6D AC 2S QS 7C AS KS 6S KH 5S 6D 8H KH 3C QS 2H 5C 9C 9D 6C JS 2C 4C 6H 7D JC AC QD TD 3H 4H QC 8H JD 4C KD KS 5C KC 7S 6D 2D 3H 2S QD 5S 7H AS TH 6S AS 6D 8D 2C 8S TD 8H QD JC AH 9C 9H 2D TD QH 2H 5C TC 3D 8H KC 8S 3D KH 2S TS TC 6S 4D JH 9H 9D QS AC KC 6H 5D 4D 8D AH 9S 5C QS 4H 7C 7D 2H 8S AD JS 3D AC 9S AS 2C 2D 2H 3H JC KH 7H QH KH JD TC KS 5S 8H 4C 8D 2H 7H 3S 2S 5H QS 3C AS 9H KD AD 3D JD 6H 5S 9C 6D AC 9S 3S 3D 5D 9C 2D AC 4S 2S AD 6C 6S QC 4C 2D 3H 6S KC QH QD 2H JH QC 3C 8S 4D 9S 2H 5C 8H QS QD 6D KD 6S 7H 3S KH 2H 5C JC 6C 3S 9S TC 6S 8H 2D AD 7S 8S TS 3C 6H 9C 3H 5C JC 8H QH TD QD 3C JS QD 5D TD 2C KH 9H TH AS 9S TC JD 3D 5C 5H AD QH 9H KC TC 7H 4H 8H 3H TD 6S AC 7C 2S QS 9D 5D 3C JC KS 4D 6C JH 2S 9S 6S 3C 7H TS 4C KD 6D 3D 9C 2D 9H AH AC 7H 2S JH 3S 7C QC QD 9H 3C 2H AC AS 8S KD 8C KH 2D 7S TD TH 6D JD 8D 4D 2H 5S 8S QH KD JD QS JH 4D KC 5H 3S 3C KH QC 6D 8H 3S AH 7D TD 2D 5S 9H QH 4S 6S 6C 6D TS TH 7S 6C 4C 6D QS JS 9C TS 3H 8D 8S JS 5C 7S AS 2C AH 2H AD 5S TC KD 6C 9C 9D TS 2S JC 4H 2C QD QS 9H TC 3H KC KS 4H 3C AD TH KH 9C 2H KD 9D TC 7S KC JH 2D 7C 3S KC AS 8C 5D 9C 9S QH 3H 2D 8C TD 4C 2H QC 5D TC 2C 7D KS 4D 6C QH TD KH 5D 7C AD 8D 2S 9S 8S 4C 8C 3D 6H QD 7C 7H 6C 8S QH 5H TS 5C 3C 4S 2S 2H 8S 6S 2H JC 3S 3H 9D 8C 2S 7H QC 2C 8H 9C AC JD 4C 4H 6S 3S 3H 3S 7D 4C 9S 5H 8H JC 3D TC QH 2S 2D 9S KD QD 9H AD 6D 9C 8D 2D KS 9S JC 4C JD KC 4S TH KH TS 6D 4D 5C KD 5H AS 9H AD QD JS 7C 6D 5D 5C TH 5H QH QS 9D QH KH 5H JH 4C 4D TC TH 6C KH AS TS 9D KD 9C 7S 4D 8H 5S KH AS 2S 7D 9D 4C TS TH AH 7C KS 4D AC 8S 9S 8D TH QH 9D 5C 5D 5C 8C QS TC 4C 3D 3S 2C 8D 9D KS 2D 3C KC 4S 8C KH 6C JC 8H AH 6H 7D 7S QD 3C 4C 6C KC 3H 2C QH 8H AS 7D 4C 8C 4H KC QD 5S 4H 2C TD AH JH QH 4C 8S 3H QS 5S JS 8H 2S 9H 9C 3S 2C 6H TS 7S JC QD AC TD KC 5S 3H QH AS QS 7D JC KC 2C 4C 5C 5S QH 3D AS JS 4H 8D 7H JC 2S 9C 5D 4D 2S 4S 9D 9C 2D QS 8H 7H 6D 7H 3H JS TS AC 2D JH 7C 8S JH 5H KC 3C TC 5S 9H 4C 8H 9D 8S KC 5H 9H AD KS 9D KH 8D AH JC 2H 9H KS 6S 3H QC 5H AH 9C 5C KH 5S AD 6C JC 9H QC 9C TD 5S 5D JC QH 2D KS 8H QS 2H TS JH 5H 5S AH 7H 3C 8S AS TD KH 6H 3D JD 2C 4C KC 7S AH 6C JH 4C KS 9D AD 7S KC 7D 8H 3S 9C 7H 5C 5H 3C 8H QC 3D KH 6D JC 2D 4H 5D 7D QC AD AH 9H QH 8H KD 8C JS 9D 3S 3C 2H 5D 6D 2S 8S 6S TS 3C 6H 8D 5S 3H TD 6C KS 3D JH 9C 7C 9S QS 5S 4H 6H 7S 6S TH 4S KC KD 3S JC JH KS 7C 3C 2S 6D QH 2C 7S 5H 8H AH KC 8D QD 6D KH 5C 7H 9D 3D 9C 6H 2D 8S JS 9S 2S 6D KC 7C TC KD 9C JH 7H KC 8S 2S 7S 3D 6H 4H 9H 2D 4C 8H 7H 5S 8S 2H 8D AD 7C 3C 7S 5S 4D 9H 3D JC KH 5D AS 7D 6D 9C JC 4C QH QS KH KD JD 7D 3D QS QC 8S 6D JS QD 6S 8C 5S QH TH 9H AS AC 2C JD QC KS QH 7S 3C 4C 5C KC 5D AH 6C 4H 9D AH 2C 3H KD 3D TS 5C TD 8S QS AS JS 3H KD AC 4H KS 7D 5D TS 9H 4H 4C 9C 2H 8C QC 2C 7D 9H 4D KS 4C QH AD KD JS QD AD AH KH 9D JS 9H JC KD JD 8S 3C 4S TS 7S 4D 5C 2S 6H 7C JS 7S 5C KD 6D QH 8S TD 2H 6S QH 6C TC 6H TD 4C 9D 2H QC 8H 3D TS 4D 2H 6H 6S 2C 7H 8S 6C 9H 9D JD JH 3S AH 2C 6S 3H 8S 2C QS 8C 5S 3H 2S 7D 3C AD 4S 5C QC QH AS TS 4S 6S 4C 5H JS JH 5C TD 4C 6H JS KD KH QS 4H TC KH JC 4D 9H 9D 8D KC 3C 8H 2H TC 8S AD 9S 4H TS 7H 2C 5C 4H 2S 6C 5S KS AH 9C 7C 8H KD TS QH TD QS 3C JH AH 2C 8D 7D 5D KC 3H 5S AC 4S 7H QS 4C 2H 3D 7D QC KH JH 6D 6C TD TH KD 5S 8D TH 6C 9D 7D KH 8C 9S 6D JD QS 7S QC 2S QH JC 4S KS 8D 7S 5S 9S JD KD 9C JC AD 2D 7C 4S 5H AH JH 9C 5D TD 7C 2D 6S KC 6C 7H 6S 9C QD 5S 4H KS TD 6S 8D KS 2D TH TD 9H JD TS 3S KH JS 4H 5D 9D TC TD QC JD TS QS QD AC AD 4C 6S 2D AS 3H KC 4C 7C 3C TD QS 9C KC AS 8D AD KC 7H QC 6D 8H 6S 5S AH 7S 8C 3S AD 9H JC 6D JD AS KH 6S JH AD 3D TS KS 7H JH 2D JS QD AC 9C JD 7C 6D TC 6H 6C JC 3D 3S QC KC 3S JC KD 2C 8D AH QS TS AS KD 3D JD 8H 7C 8C 5C QD 6C pythran-0.10.0+ds2/pythran/tests/euler/words.txt000066400000000000000000000377311416264035500216240ustar00rootroot00000000000000"A","ABILITY","ABLE","ABOUT","ABOVE","ABSENCE","ABSOLUTELY","ACADEMIC","ACCEPT","ACCESS","ACCIDENT","ACCOMPANY","ACCORDING","ACCOUNT","ACHIEVE","ACHIEVEMENT","ACID","ACQUIRE","ACROSS","ACT","ACTION","ACTIVE","ACTIVITY","ACTUAL","ACTUALLY","ADD","ADDITION","ADDITIONAL","ADDRESS","ADMINISTRATION","ADMIT","ADOPT","ADULT","ADVANCE","ADVANTAGE","ADVICE","ADVISE","AFFAIR","AFFECT","AFFORD","AFRAID","AFTER","AFTERNOON","AFTERWARDS","AGAIN","AGAINST","AGE","AGENCY","AGENT","AGO","AGREE","AGREEMENT","AHEAD","AID","AIM","AIR","AIRCRAFT","ALL","ALLOW","ALMOST","ALONE","ALONG","ALREADY","ALRIGHT","ALSO","ALTERNATIVE","ALTHOUGH","ALWAYS","AMONG","AMONGST","AMOUNT","AN","ANALYSIS","ANCIENT","AND","ANIMAL","ANNOUNCE","ANNUAL","ANOTHER","ANSWER","ANY","ANYBODY","ANYONE","ANYTHING","ANYWAY","APART","APPARENT","APPARENTLY","APPEAL","APPEAR","APPEARANCE","APPLICATION","APPLY","APPOINT","APPOINTMENT","APPROACH","APPROPRIATE","APPROVE","AREA","ARGUE","ARGUMENT","ARISE","ARM","ARMY","AROUND","ARRANGE","ARRANGEMENT","ARRIVE","ART","ARTICLE","ARTIST","AS","ASK","ASPECT","ASSEMBLY","ASSESS","ASSESSMENT","ASSET","ASSOCIATE","ASSOCIATION","ASSUME","ASSUMPTION","AT","ATMOSPHERE","ATTACH","ATTACK","ATTEMPT","ATTEND","ATTENTION","ATTITUDE","ATTRACT","ATTRACTIVE","AUDIENCE","AUTHOR","AUTHORITY","AVAILABLE","AVERAGE","AVOID","AWARD","AWARE","AWAY","AYE","BABY","BACK","BACKGROUND","BAD","BAG","BALANCE","BALL","BAND","BANK","BAR","BASE","BASIC","BASIS","BATTLE","BE","BEAR","BEAT","BEAUTIFUL","BECAUSE","BECOME","BED","BEDROOM","BEFORE","BEGIN","BEGINNING","BEHAVIOUR","BEHIND","BELIEF","BELIEVE","BELONG","BELOW","BENEATH","BENEFIT","BESIDE","BEST","BETTER","BETWEEN","BEYOND","BIG","BILL","BIND","BIRD","BIRTH","BIT","BLACK","BLOCK","BLOOD","BLOODY","BLOW","BLUE","BOARD","BOAT","BODY","BONE","BOOK","BORDER","BOTH","BOTTLE","BOTTOM","BOX","BOY","BRAIN","BRANCH","BREAK","BREATH","BRIDGE","BRIEF","BRIGHT","BRING","BROAD","BROTHER","BUDGET","BUILD","BUILDING","BURN","BUS","BUSINESS","BUSY","BUT","BUY","BY","CABINET","CALL","CAMPAIGN","CAN","CANDIDATE","CAPABLE","CAPACITY","CAPITAL","CAR","CARD","CARE","CAREER","CAREFUL","CAREFULLY","CARRY","CASE","CASH","CAT","CATCH","CATEGORY","CAUSE","CELL","CENTRAL","CENTRE","CENTURY","CERTAIN","CERTAINLY","CHAIN","CHAIR","CHAIRMAN","CHALLENGE","CHANCE","CHANGE","CHANNEL","CHAPTER","CHARACTER","CHARACTERISTIC","CHARGE","CHEAP","CHECK","CHEMICAL","CHIEF","CHILD","CHOICE","CHOOSE","CHURCH","CIRCLE","CIRCUMSTANCE","CITIZEN","CITY","CIVIL","CLAIM","CLASS","CLEAN","CLEAR","CLEARLY","CLIENT","CLIMB","CLOSE","CLOSELY","CLOTHES","CLUB","COAL","CODE","COFFEE","COLD","COLLEAGUE","COLLECT","COLLECTION","COLLEGE","COLOUR","COMBINATION","COMBINE","COME","COMMENT","COMMERCIAL","COMMISSION","COMMIT","COMMITMENT","COMMITTEE","COMMON","COMMUNICATION","COMMUNITY","COMPANY","COMPARE","COMPARISON","COMPETITION","COMPLETE","COMPLETELY","COMPLEX","COMPONENT","COMPUTER","CONCENTRATE","CONCENTRATION","CONCEPT","CONCERN","CONCERNED","CONCLUDE","CONCLUSION","CONDITION","CONDUCT","CONFERENCE","CONFIDENCE","CONFIRM","CONFLICT","CONGRESS","CONNECT","CONNECTION","CONSEQUENCE","CONSERVATIVE","CONSIDER","CONSIDERABLE","CONSIDERATION","CONSIST","CONSTANT","CONSTRUCTION","CONSUMER","CONTACT","CONTAIN","CONTENT","CONTEXT","CONTINUE","CONTRACT","CONTRAST","CONTRIBUTE","CONTRIBUTION","CONTROL","CONVENTION","CONVERSATION","COPY","CORNER","CORPORATE","CORRECT","COS","COST","COULD","COUNCIL","COUNT","COUNTRY","COUNTY","COUPLE","COURSE","COURT","COVER","CREATE","CREATION","CREDIT","CRIME","CRIMINAL","CRISIS","CRITERION","CRITICAL","CRITICISM","CROSS","CROWD","CRY","CULTURAL","CULTURE","CUP","CURRENT","CURRENTLY","CURRICULUM","CUSTOMER","CUT","DAMAGE","DANGER","DANGEROUS","DARK","DATA","DATE","DAUGHTER","DAY","DEAD","DEAL","DEATH","DEBATE","DEBT","DECADE","DECIDE","DECISION","DECLARE","DEEP","DEFENCE","DEFENDANT","DEFINE","DEFINITION","DEGREE","DELIVER","DEMAND","DEMOCRATIC","DEMONSTRATE","DENY","DEPARTMENT","DEPEND","DEPUTY","DERIVE","DESCRIBE","DESCRIPTION","DESIGN","DESIRE","DESK","DESPITE","DESTROY","DETAIL","DETAILED","DETERMINE","DEVELOP","DEVELOPMENT","DEVICE","DIE","DIFFERENCE","DIFFERENT","DIFFICULT","DIFFICULTY","DINNER","DIRECT","DIRECTION","DIRECTLY","DIRECTOR","DISAPPEAR","DISCIPLINE","DISCOVER","DISCUSS","DISCUSSION","DISEASE","DISPLAY","DISTANCE","DISTINCTION","DISTRIBUTION","DISTRICT","DIVIDE","DIVISION","DO","DOCTOR","DOCUMENT","DOG","DOMESTIC","DOOR","DOUBLE","DOUBT","DOWN","DRAW","DRAWING","DREAM","DRESS","DRINK","DRIVE","DRIVER","DROP","DRUG","DRY","DUE","DURING","DUTY","EACH","EAR","EARLY","EARN","EARTH","EASILY","EAST","EASY","EAT","ECONOMIC","ECONOMY","EDGE","EDITOR","EDUCATION","EDUCATIONAL","EFFECT","EFFECTIVE","EFFECTIVELY","EFFORT","EGG","EITHER","ELDERLY","ELECTION","ELEMENT","ELSE","ELSEWHERE","EMERGE","EMPHASIS","EMPLOY","EMPLOYEE","EMPLOYER","EMPLOYMENT","EMPTY","ENABLE","ENCOURAGE","END","ENEMY","ENERGY","ENGINE","ENGINEERING","ENJOY","ENOUGH","ENSURE","ENTER","ENTERPRISE","ENTIRE","ENTIRELY","ENTITLE","ENTRY","ENVIRONMENT","ENVIRONMENTAL","EQUAL","EQUALLY","EQUIPMENT","ERROR","ESCAPE","ESPECIALLY","ESSENTIAL","ESTABLISH","ESTABLISHMENT","ESTATE","ESTIMATE","EVEN","EVENING","EVENT","EVENTUALLY","EVER","EVERY","EVERYBODY","EVERYONE","EVERYTHING","EVIDENCE","EXACTLY","EXAMINATION","EXAMINE","EXAMPLE","EXCELLENT","EXCEPT","EXCHANGE","EXECUTIVE","EXERCISE","EXHIBITION","EXIST","EXISTENCE","EXISTING","EXPECT","EXPECTATION","EXPENDITURE","EXPENSE","EXPENSIVE","EXPERIENCE","EXPERIMENT","EXPERT","EXPLAIN","EXPLANATION","EXPLORE","EXPRESS","EXPRESSION","EXTEND","EXTENT","EXTERNAL","EXTRA","EXTREMELY","EYE","FACE","FACILITY","FACT","FACTOR","FACTORY","FAIL","FAILURE","FAIR","FAIRLY","FAITH","FALL","FAMILIAR","FAMILY","FAMOUS","FAR","FARM","FARMER","FASHION","FAST","FATHER","FAVOUR","FEAR","FEATURE","FEE","FEEL","FEELING","FEMALE","FEW","FIELD","FIGHT","FIGURE","FILE","FILL","FILM","FINAL","FINALLY","FINANCE","FINANCIAL","FIND","FINDING","FINE","FINGER","FINISH","FIRE","FIRM","FIRST","FISH","FIT","FIX","FLAT","FLIGHT","FLOOR","FLOW","FLOWER","FLY","FOCUS","FOLLOW","FOLLOWING","FOOD","FOOT","FOOTBALL","FOR","FORCE","FOREIGN","FOREST","FORGET","FORM","FORMAL","FORMER","FORWARD","FOUNDATION","FREE","FREEDOM","FREQUENTLY","FRESH","FRIEND","FROM","FRONT","FRUIT","FUEL","FULL","FULLY","FUNCTION","FUND","FUNNY","FURTHER","FUTURE","GAIN","GAME","GARDEN","GAS","GATE","GATHER","GENERAL","GENERALLY","GENERATE","GENERATION","GENTLEMAN","GET","GIRL","GIVE","GLASS","GO","GOAL","GOD","GOLD","GOOD","GOVERNMENT","GRANT","GREAT","GREEN","GREY","GROUND","GROUP","GROW","GROWING","GROWTH","GUEST","GUIDE","GUN","HAIR","HALF","HALL","HAND","HANDLE","HANG","HAPPEN","HAPPY","HARD","HARDLY","HATE","HAVE","HE","HEAD","HEALTH","HEAR","HEART","HEAT","HEAVY","HELL","HELP","HENCE","HER","HERE","HERSELF","HIDE","HIGH","HIGHLY","HILL","HIM","HIMSELF","HIS","HISTORICAL","HISTORY","HIT","HOLD","HOLE","HOLIDAY","HOME","HOPE","HORSE","HOSPITAL","HOT","HOTEL","HOUR","HOUSE","HOUSEHOLD","HOUSING","HOW","HOWEVER","HUGE","HUMAN","HURT","HUSBAND","I","IDEA","IDENTIFY","IF","IGNORE","ILLUSTRATE","IMAGE","IMAGINE","IMMEDIATE","IMMEDIATELY","IMPACT","IMPLICATION","IMPLY","IMPORTANCE","IMPORTANT","IMPOSE","IMPOSSIBLE","IMPRESSION","IMPROVE","IMPROVEMENT","IN","INCIDENT","INCLUDE","INCLUDING","INCOME","INCREASE","INCREASED","INCREASINGLY","INDEED","INDEPENDENT","INDEX","INDICATE","INDIVIDUAL","INDUSTRIAL","INDUSTRY","INFLUENCE","INFORM","INFORMATION","INITIAL","INITIATIVE","INJURY","INSIDE","INSIST","INSTANCE","INSTEAD","INSTITUTE","INSTITUTION","INSTRUCTION","INSTRUMENT","INSURANCE","INTEND","INTENTION","INTEREST","INTERESTED","INTERESTING","INTERNAL","INTERNATIONAL","INTERPRETATION","INTERVIEW","INTO","INTRODUCE","INTRODUCTION","INVESTIGATE","INVESTIGATION","INVESTMENT","INVITE","INVOLVE","IRON","IS","ISLAND","ISSUE","IT","ITEM","ITS","ITSELF","JOB","JOIN","JOINT","JOURNEY","JUDGE","JUMP","JUST","JUSTICE","KEEP","KEY","KID","KILL","KIND","KING","KITCHEN","KNEE","KNOW","KNOWLEDGE","LABOUR","LACK","LADY","LAND","LANGUAGE","LARGE","LARGELY","LAST","LATE","LATER","LATTER","LAUGH","LAUNCH","LAW","LAWYER","LAY","LEAD","LEADER","LEADERSHIP","LEADING","LEAF","LEAGUE","LEAN","LEARN","LEAST","LEAVE","LEFT","LEG","LEGAL","LEGISLATION","LENGTH","LESS","LET","LETTER","LEVEL","LIABILITY","LIBERAL","LIBRARY","LIE","LIFE","LIFT","LIGHT","LIKE","LIKELY","LIMIT","LIMITED","LINE","LINK","LIP","LIST","LISTEN","LITERATURE","LITTLE","LIVE","LIVING","LOAN","LOCAL","LOCATION","LONG","LOOK","LORD","LOSE","LOSS","LOT","LOVE","LOVELY","LOW","LUNCH","MACHINE","MAGAZINE","MAIN","MAINLY","MAINTAIN","MAJOR","MAJORITY","MAKE","MALE","MAN","MANAGE","MANAGEMENT","MANAGER","MANNER","MANY","MAP","MARK","MARKET","MARRIAGE","MARRIED","MARRY","MASS","MASTER","MATCH","MATERIAL","MATTER","MAY","MAYBE","ME","MEAL","MEAN","MEANING","MEANS","MEANWHILE","MEASURE","MECHANISM","MEDIA","MEDICAL","MEET","MEETING","MEMBER","MEMBERSHIP","MEMORY","MENTAL","MENTION","MERELY","MESSAGE","METAL","METHOD","MIDDLE","MIGHT","MILE","MILITARY","MILK","MIND","MINE","MINISTER","MINISTRY","MINUTE","MISS","MISTAKE","MODEL","MODERN","MODULE","MOMENT","MONEY","MONTH","MORE","MORNING","MOST","MOTHER","MOTION","MOTOR","MOUNTAIN","MOUTH","MOVE","MOVEMENT","MUCH","MURDER","MUSEUM","MUSIC","MUST","MY","MYSELF","NAME","NARROW","NATION","NATIONAL","NATURAL","NATURE","NEAR","NEARLY","NECESSARILY","NECESSARY","NECK","NEED","NEGOTIATION","NEIGHBOUR","NEITHER","NETWORK","NEVER","NEVERTHELESS","NEW","NEWS","NEWSPAPER","NEXT","NICE","NIGHT","NO","NOBODY","NOD","NOISE","NONE","NOR","NORMAL","NORMALLY","NORTH","NORTHERN","NOSE","NOT","NOTE","NOTHING","NOTICE","NOTION","NOW","NUCLEAR","NUMBER","NURSE","OBJECT","OBJECTIVE","OBSERVATION","OBSERVE","OBTAIN","OBVIOUS","OBVIOUSLY","OCCASION","OCCUR","ODD","OF","OFF","OFFENCE","OFFER","OFFICE","OFFICER","OFFICIAL","OFTEN","OIL","OKAY","OLD","ON","ONCE","ONE","ONLY","ONTO","OPEN","OPERATE","OPERATION","OPINION","OPPORTUNITY","OPPOSITION","OPTION","OR","ORDER","ORDINARY","ORGANISATION","ORGANISE","ORGANIZATION","ORIGIN","ORIGINAL","OTHER","OTHERWISE","OUGHT","OUR","OURSELVES","OUT","OUTCOME","OUTPUT","OUTSIDE","OVER","OVERALL","OWN","OWNER","PACKAGE","PAGE","PAIN","PAINT","PAINTING","PAIR","PANEL","PAPER","PARENT","PARK","PARLIAMENT","PART","PARTICULAR","PARTICULARLY","PARTLY","PARTNER","PARTY","PASS","PASSAGE","PAST","PATH","PATIENT","PATTERN","PAY","PAYMENT","PEACE","PENSION","PEOPLE","PER","PERCENT","PERFECT","PERFORM","PERFORMANCE","PERHAPS","PERIOD","PERMANENT","PERSON","PERSONAL","PERSUADE","PHASE","PHONE","PHOTOGRAPH","PHYSICAL","PICK","PICTURE","PIECE","PLACE","PLAN","PLANNING","PLANT","PLASTIC","PLATE","PLAY","PLAYER","PLEASE","PLEASURE","PLENTY","PLUS","POCKET","POINT","POLICE","POLICY","POLITICAL","POLITICS","POOL","POOR","POPULAR","POPULATION","POSITION","POSITIVE","POSSIBILITY","POSSIBLE","POSSIBLY","POST","POTENTIAL","POUND","POWER","POWERFUL","PRACTICAL","PRACTICE","PREFER","PREPARE","PRESENCE","PRESENT","PRESIDENT","PRESS","PRESSURE","PRETTY","PREVENT","PREVIOUS","PREVIOUSLY","PRICE","PRIMARY","PRIME","PRINCIPLE","PRIORITY","PRISON","PRISONER","PRIVATE","PROBABLY","PROBLEM","PROCEDURE","PROCESS","PRODUCE","PRODUCT","PRODUCTION","PROFESSIONAL","PROFIT","PROGRAM","PROGRAMME","PROGRESS","PROJECT","PROMISE","PROMOTE","PROPER","PROPERLY","PROPERTY","PROPORTION","PROPOSE","PROPOSAL","PROSPECT","PROTECT","PROTECTION","PROVE","PROVIDE","PROVIDED","PROVISION","PUB","PUBLIC","PUBLICATION","PUBLISH","PULL","PUPIL","PURPOSE","PUSH","PUT","QUALITY","QUARTER","QUESTION","QUICK","QUICKLY","QUIET","QUITE","RACE","RADIO","RAILWAY","RAIN","RAISE","RANGE","RAPIDLY","RARE","RATE","RATHER","REACH","REACTION","READ","READER","READING","READY","REAL","REALISE","REALITY","REALIZE","REALLY","REASON","REASONABLE","RECALL","RECEIVE","RECENT","RECENTLY","RECOGNISE","RECOGNITION","RECOGNIZE","RECOMMEND","RECORD","RECOVER","RED","REDUCE","REDUCTION","REFER","REFERENCE","REFLECT","REFORM","REFUSE","REGARD","REGION","REGIONAL","REGULAR","REGULATION","REJECT","RELATE","RELATION","RELATIONSHIP","RELATIVE","RELATIVELY","RELEASE","RELEVANT","RELIEF","RELIGION","RELIGIOUS","RELY","REMAIN","REMEMBER","REMIND","REMOVE","REPEAT","REPLACE","REPLY","REPORT","REPRESENT","REPRESENTATION","REPRESENTATIVE","REQUEST","REQUIRE","REQUIREMENT","RESEARCH","RESOURCE","RESPECT","RESPOND","RESPONSE","RESPONSIBILITY","RESPONSIBLE","REST","RESTAURANT","RESULT","RETAIN","RETURN","REVEAL","REVENUE","REVIEW","REVOLUTION","RICH","RIDE","RIGHT","RING","RISE","RISK","RIVER","ROAD","ROCK","ROLE","ROLL","ROOF","ROOM","ROUND","ROUTE","ROW","ROYAL","RULE","RUN","RURAL","SAFE","SAFETY","SALE","SAME","SAMPLE","SATISFY","SAVE","SAY","SCALE","SCENE","SCHEME","SCHOOL","SCIENCE","SCIENTIFIC","SCIENTIST","SCORE","SCREEN","SEA","SEARCH","SEASON","SEAT","SECOND","SECONDARY","SECRETARY","SECTION","SECTOR","SECURE","SECURITY","SEE","SEEK","SEEM","SELECT","SELECTION","SELL","SEND","SENIOR","SENSE","SENTENCE","SEPARATE","SEQUENCE","SERIES","SERIOUS","SERIOUSLY","SERVANT","SERVE","SERVICE","SESSION","SET","SETTLE","SETTLEMENT","SEVERAL","SEVERE","SEX","SEXUAL","SHAKE","SHALL","SHAPE","SHARE","SHE","SHEET","SHIP","SHOE","SHOOT","SHOP","SHORT","SHOT","SHOULD","SHOULDER","SHOUT","SHOW","SHUT","SIDE","SIGHT","SIGN","SIGNAL","SIGNIFICANCE","SIGNIFICANT","SILENCE","SIMILAR","SIMPLE","SIMPLY","SINCE","SING","SINGLE","SIR","SISTER","SIT","SITE","SITUATION","SIZE","SKILL","SKIN","SKY","SLEEP","SLIGHTLY","SLIP","SLOW","SLOWLY","SMALL","SMILE","SO","SOCIAL","SOCIETY","SOFT","SOFTWARE","SOIL","SOLDIER","SOLICITOR","SOLUTION","SOME","SOMEBODY","SOMEONE","SOMETHING","SOMETIMES","SOMEWHAT","SOMEWHERE","SON","SONG","SOON","SORRY","SORT","SOUND","SOURCE","SOUTH","SOUTHERN","SPACE","SPEAK","SPEAKER","SPECIAL","SPECIES","SPECIFIC","SPEECH","SPEED","SPEND","SPIRIT","SPORT","SPOT","SPREAD","SPRING","STAFF","STAGE","STAND","STANDARD","STAR","START","STATE","STATEMENT","STATION","STATUS","STAY","STEAL","STEP","STICK","STILL","STOCK","STONE","STOP","STORE","STORY","STRAIGHT","STRANGE","STRATEGY","STREET","STRENGTH","STRIKE","STRONG","STRONGLY","STRUCTURE","STUDENT","STUDIO","STUDY","STUFF","STYLE","SUBJECT","SUBSTANTIAL","SUCCEED","SUCCESS","SUCCESSFUL","SUCH","SUDDENLY","SUFFER","SUFFICIENT","SUGGEST","SUGGESTION","SUITABLE","SUM","SUMMER","SUN","SUPPLY","SUPPORT","SUPPOSE","SURE","SURELY","SURFACE","SURPRISE","SURROUND","SURVEY","SURVIVE","SWITCH","SYSTEM","TABLE","TAKE","TALK","TALL","TAPE","TARGET","TASK","TAX","TEA","TEACH","TEACHER","TEACHING","TEAM","TEAR","TECHNICAL","TECHNIQUE","TECHNOLOGY","TELEPHONE","TELEVISION","TELL","TEMPERATURE","TEND","TERM","TERMS","TERRIBLE","TEST","TEXT","THAN","THANK","THANKS","THAT","THE","THEATRE","THEIR","THEM","THEME","THEMSELVES","THEN","THEORY","THERE","THEREFORE","THESE","THEY","THIN","THING","THINK","THIS","THOSE","THOUGH","THOUGHT","THREAT","THREATEN","THROUGH","THROUGHOUT","THROW","THUS","TICKET","TIME","TINY","TITLE","TO","TODAY","TOGETHER","TOMORROW","TONE","TONIGHT","TOO","TOOL","TOOTH","TOP","TOTAL","TOTALLY","TOUCH","TOUR","TOWARDS","TOWN","TRACK","TRADE","TRADITION","TRADITIONAL","TRAFFIC","TRAIN","TRAINING","TRANSFER","TRANSPORT","TRAVEL","TREAT","TREATMENT","TREATY","TREE","TREND","TRIAL","TRIP","TROOP","TROUBLE","TRUE","TRUST","TRUTH","TRY","TURN","TWICE","TYPE","TYPICAL","UNABLE","UNDER","UNDERSTAND","UNDERSTANDING","UNDERTAKE","UNEMPLOYMENT","UNFORTUNATELY","UNION","UNIT","UNITED","UNIVERSITY","UNLESS","UNLIKELY","UNTIL","UP","UPON","UPPER","URBAN","US","USE","USED","USEFUL","USER","USUAL","USUALLY","VALUE","VARIATION","VARIETY","VARIOUS","VARY","VAST","VEHICLE","VERSION","VERY","VIA","VICTIM","VICTORY","VIDEO","VIEW","VILLAGE","VIOLENCE","VISION","VISIT","VISITOR","VITAL","VOICE","VOLUME","VOTE","WAGE","WAIT","WALK","WALL","WANT","WAR","WARM","WARN","WASH","WATCH","WATER","WAVE","WAY","WE","WEAK","WEAPON","WEAR","WEATHER","WEEK","WEEKEND","WEIGHT","WELCOME","WELFARE","WELL","WEST","WESTERN","WHAT","WHATEVER","WHEN","WHERE","WHEREAS","WHETHER","WHICH","WHILE","WHILST","WHITE","WHO","WHOLE","WHOM","WHOSE","WHY","WIDE","WIDELY","WIFE","WILD","WILL","WIN","WIND","WINDOW","WINE","WING","WINNER","WINTER","WISH","WITH","WITHDRAW","WITHIN","WITHOUT","WOMAN","WONDER","WONDERFUL","WOOD","WORD","WORK","WORKER","WORKING","WORKS","WORLD","WORRY","WORTH","WOULD","WRITE","WRITER","WRITING","WRONG","YARD","YEAH","YEAR","YES","YESTERDAY","YET","YOU","YOUNG","YOUR","YOURSELF","YOUTH"pythran-0.10.0+ds2/pythran/tests/euler/words42.txt000066400000000000000000000377311416264035500217720ustar00rootroot00000000000000"A","ABILITY","ABLE","ABOUT","ABOVE","ABSENCE","ABSOLUTELY","ACADEMIC","ACCEPT","ACCESS","ACCIDENT","ACCOMPANY","ACCORDING","ACCOUNT","ACHIEVE","ACHIEVEMENT","ACID","ACQUIRE","ACROSS","ACT","ACTION","ACTIVE","ACTIVITY","ACTUAL","ACTUALLY","ADD","ADDITION","ADDITIONAL","ADDRESS","ADMINISTRATION","ADMIT","ADOPT","ADULT","ADVANCE","ADVANTAGE","ADVICE","ADVISE","AFFAIR","AFFECT","AFFORD","AFRAID","AFTER","AFTERNOON","AFTERWARDS","AGAIN","AGAINST","AGE","AGENCY","AGENT","AGO","AGREE","AGREEMENT","AHEAD","AID","AIM","AIR","AIRCRAFT","ALL","ALLOW","ALMOST","ALONE","ALONG","ALREADY","ALRIGHT","ALSO","ALTERNATIVE","ALTHOUGH","ALWAYS","AMONG","AMONGST","AMOUNT","AN","ANALYSIS","ANCIENT","AND","ANIMAL","ANNOUNCE","ANNUAL","ANOTHER","ANSWER","ANY","ANYBODY","ANYONE","ANYTHING","ANYWAY","APART","APPARENT","APPARENTLY","APPEAL","APPEAR","APPEARANCE","APPLICATION","APPLY","APPOINT","APPOINTMENT","APPROACH","APPROPRIATE","APPROVE","AREA","ARGUE","ARGUMENT","ARISE","ARM","ARMY","AROUND","ARRANGE","ARRANGEMENT","ARRIVE","ART","ARTICLE","ARTIST","AS","ASK","ASPECT","ASSEMBLY","ASSESS","ASSESSMENT","ASSET","ASSOCIATE","ASSOCIATION","ASSUME","ASSUMPTION","AT","ATMOSPHERE","ATTACH","ATTACK","ATTEMPT","ATTEND","ATTENTION","ATTITUDE","ATTRACT","ATTRACTIVE","AUDIENCE","AUTHOR","AUTHORITY","AVAILABLE","AVERAGE","AVOID","AWARD","AWARE","AWAY","AYE","BABY","BACK","BACKGROUND","BAD","BAG","BALANCE","BALL","BAND","BANK","BAR","BASE","BASIC","BASIS","BATTLE","BE","BEAR","BEAT","BEAUTIFUL","BECAUSE","BECOME","BED","BEDROOM","BEFORE","BEGIN","BEGINNING","BEHAVIOUR","BEHIND","BELIEF","BELIEVE","BELONG","BELOW","BENEATH","BENEFIT","BESIDE","BEST","BETTER","BETWEEN","BEYOND","BIG","BILL","BIND","BIRD","BIRTH","BIT","BLACK","BLOCK","BLOOD","BLOODY","BLOW","BLUE","BOARD","BOAT","BODY","BONE","BOOK","BORDER","BOTH","BOTTLE","BOTTOM","BOX","BOY","BRAIN","BRANCH","BREAK","BREATH","BRIDGE","BRIEF","BRIGHT","BRING","BROAD","BROTHER","BUDGET","BUILD","BUILDING","BURN","BUS","BUSINESS","BUSY","BUT","BUY","BY","CABINET","CALL","CAMPAIGN","CAN","CANDIDATE","CAPABLE","CAPACITY","CAPITAL","CAR","CARD","CARE","CAREER","CAREFUL","CAREFULLY","CARRY","CASE","CASH","CAT","CATCH","CATEGORY","CAUSE","CELL","CENTRAL","CENTRE","CENTURY","CERTAIN","CERTAINLY","CHAIN","CHAIR","CHAIRMAN","CHALLENGE","CHANCE","CHANGE","CHANNEL","CHAPTER","CHARACTER","CHARACTERISTIC","CHARGE","CHEAP","CHECK","CHEMICAL","CHIEF","CHILD","CHOICE","CHOOSE","CHURCH","CIRCLE","CIRCUMSTANCE","CITIZEN","CITY","CIVIL","CLAIM","CLASS","CLEAN","CLEAR","CLEARLY","CLIENT","CLIMB","CLOSE","CLOSELY","CLOTHES","CLUB","COAL","CODE","COFFEE","COLD","COLLEAGUE","COLLECT","COLLECTION","COLLEGE","COLOUR","COMBINATION","COMBINE","COME","COMMENT","COMMERCIAL","COMMISSION","COMMIT","COMMITMENT","COMMITTEE","COMMON","COMMUNICATION","COMMUNITY","COMPANY","COMPARE","COMPARISON","COMPETITION","COMPLETE","COMPLETELY","COMPLEX","COMPONENT","COMPUTER","CONCENTRATE","CONCENTRATION","CONCEPT","CONCERN","CONCERNED","CONCLUDE","CONCLUSION","CONDITION","CONDUCT","CONFERENCE","CONFIDENCE","CONFIRM","CONFLICT","CONGRESS","CONNECT","CONNECTION","CONSEQUENCE","CONSERVATIVE","CONSIDER","CONSIDERABLE","CONSIDERATION","CONSIST","CONSTANT","CONSTRUCTION","CONSUMER","CONTACT","CONTAIN","CONTENT","CONTEXT","CONTINUE","CONTRACT","CONTRAST","CONTRIBUTE","CONTRIBUTION","CONTROL","CONVENTION","CONVERSATION","COPY","CORNER","CORPORATE","CORRECT","COS","COST","COULD","COUNCIL","COUNT","COUNTRY","COUNTY","COUPLE","COURSE","COURT","COVER","CREATE","CREATION","CREDIT","CRIME","CRIMINAL","CRISIS","CRITERION","CRITICAL","CRITICISM","CROSS","CROWD","CRY","CULTURAL","CULTURE","CUP","CURRENT","CURRENTLY","CURRICULUM","CUSTOMER","CUT","DAMAGE","DANGER","DANGEROUS","DARK","DATA","DATE","DAUGHTER","DAY","DEAD","DEAL","DEATH","DEBATE","DEBT","DECADE","DECIDE","DECISION","DECLARE","DEEP","DEFENCE","DEFENDANT","DEFINE","DEFINITION","DEGREE","DELIVER","DEMAND","DEMOCRATIC","DEMONSTRATE","DENY","DEPARTMENT","DEPEND","DEPUTY","DERIVE","DESCRIBE","DESCRIPTION","DESIGN","DESIRE","DESK","DESPITE","DESTROY","DETAIL","DETAILED","DETERMINE","DEVELOP","DEVELOPMENT","DEVICE","DIE","DIFFERENCE","DIFFERENT","DIFFICULT","DIFFICULTY","DINNER","DIRECT","DIRECTION","DIRECTLY","DIRECTOR","DISAPPEAR","DISCIPLINE","DISCOVER","DISCUSS","DISCUSSION","DISEASE","DISPLAY","DISTANCE","DISTINCTION","DISTRIBUTION","DISTRICT","DIVIDE","DIVISION","DO","DOCTOR","DOCUMENT","DOG","DOMESTIC","DOOR","DOUBLE","DOUBT","DOWN","DRAW","DRAWING","DREAM","DRESS","DRINK","DRIVE","DRIVER","DROP","DRUG","DRY","DUE","DURING","DUTY","EACH","EAR","EARLY","EARN","EARTH","EASILY","EAST","EASY","EAT","ECONOMIC","ECONOMY","EDGE","EDITOR","EDUCATION","EDUCATIONAL","EFFECT","EFFECTIVE","EFFECTIVELY","EFFORT","EGG","EITHER","ELDERLY","ELECTION","ELEMENT","ELSE","ELSEWHERE","EMERGE","EMPHASIS","EMPLOY","EMPLOYEE","EMPLOYER","EMPLOYMENT","EMPTY","ENABLE","ENCOURAGE","END","ENEMY","ENERGY","ENGINE","ENGINEERING","ENJOY","ENOUGH","ENSURE","ENTER","ENTERPRISE","ENTIRE","ENTIRELY","ENTITLE","ENTRY","ENVIRONMENT","ENVIRONMENTAL","EQUAL","EQUALLY","EQUIPMENT","ERROR","ESCAPE","ESPECIALLY","ESSENTIAL","ESTABLISH","ESTABLISHMENT","ESTATE","ESTIMATE","EVEN","EVENING","EVENT","EVENTUALLY","EVER","EVERY","EVERYBODY","EVERYONE","EVERYTHING","EVIDENCE","EXACTLY","EXAMINATION","EXAMINE","EXAMPLE","EXCELLENT","EXCEPT","EXCHANGE","EXECUTIVE","EXERCISE","EXHIBITION","EXIST","EXISTENCE","EXISTING","EXPECT","EXPECTATION","EXPENDITURE","EXPENSE","EXPENSIVE","EXPERIENCE","EXPERIMENT","EXPERT","EXPLAIN","EXPLANATION","EXPLORE","EXPRESS","EXPRESSION","EXTEND","EXTENT","EXTERNAL","EXTRA","EXTREMELY","EYE","FACE","FACILITY","FACT","FACTOR","FACTORY","FAIL","FAILURE","FAIR","FAIRLY","FAITH","FALL","FAMILIAR","FAMILY","FAMOUS","FAR","FARM","FARMER","FASHION","FAST","FATHER","FAVOUR","FEAR","FEATURE","FEE","FEEL","FEELING","FEMALE","FEW","FIELD","FIGHT","FIGURE","FILE","FILL","FILM","FINAL","FINALLY","FINANCE","FINANCIAL","FIND","FINDING","FINE","FINGER","FINISH","FIRE","FIRM","FIRST","FISH","FIT","FIX","FLAT","FLIGHT","FLOOR","FLOW","FLOWER","FLY","FOCUS","FOLLOW","FOLLOWING","FOOD","FOOT","FOOTBALL","FOR","FORCE","FOREIGN","FOREST","FORGET","FORM","FORMAL","FORMER","FORWARD","FOUNDATION","FREE","FREEDOM","FREQUENTLY","FRESH","FRIEND","FROM","FRONT","FRUIT","FUEL","FULL","FULLY","FUNCTION","FUND","FUNNY","FURTHER","FUTURE","GAIN","GAME","GARDEN","GAS","GATE","GATHER","GENERAL","GENERALLY","GENERATE","GENERATION","GENTLEMAN","GET","GIRL","GIVE","GLASS","GO","GOAL","GOD","GOLD","GOOD","GOVERNMENT","GRANT","GREAT","GREEN","GREY","GROUND","GROUP","GROW","GROWING","GROWTH","GUEST","GUIDE","GUN","HAIR","HALF","HALL","HAND","HANDLE","HANG","HAPPEN","HAPPY","HARD","HARDLY","HATE","HAVE","HE","HEAD","HEALTH","HEAR","HEART","HEAT","HEAVY","HELL","HELP","HENCE","HER","HERE","HERSELF","HIDE","HIGH","HIGHLY","HILL","HIM","HIMSELF","HIS","HISTORICAL","HISTORY","HIT","HOLD","HOLE","HOLIDAY","HOME","HOPE","HORSE","HOSPITAL","HOT","HOTEL","HOUR","HOUSE","HOUSEHOLD","HOUSING","HOW","HOWEVER","HUGE","HUMAN","HURT","HUSBAND","I","IDEA","IDENTIFY","IF","IGNORE","ILLUSTRATE","IMAGE","IMAGINE","IMMEDIATE","IMMEDIATELY","IMPACT","IMPLICATION","IMPLY","IMPORTANCE","IMPORTANT","IMPOSE","IMPOSSIBLE","IMPRESSION","IMPROVE","IMPROVEMENT","IN","INCIDENT","INCLUDE","INCLUDING","INCOME","INCREASE","INCREASED","INCREASINGLY","INDEED","INDEPENDENT","INDEX","INDICATE","INDIVIDUAL","INDUSTRIAL","INDUSTRY","INFLUENCE","INFORM","INFORMATION","INITIAL","INITIATIVE","INJURY","INSIDE","INSIST","INSTANCE","INSTEAD","INSTITUTE","INSTITUTION","INSTRUCTION","INSTRUMENT","INSURANCE","INTEND","INTENTION","INTEREST","INTERESTED","INTERESTING","INTERNAL","INTERNATIONAL","INTERPRETATION","INTERVIEW","INTO","INTRODUCE","INTRODUCTION","INVESTIGATE","INVESTIGATION","INVESTMENT","INVITE","INVOLVE","IRON","IS","ISLAND","ISSUE","IT","ITEM","ITS","ITSELF","JOB","JOIN","JOINT","JOURNEY","JUDGE","JUMP","JUST","JUSTICE","KEEP","KEY","KID","KILL","KIND","KING","KITCHEN","KNEE","KNOW","KNOWLEDGE","LABOUR","LACK","LADY","LAND","LANGUAGE","LARGE","LARGELY","LAST","LATE","LATER","LATTER","LAUGH","LAUNCH","LAW","LAWYER","LAY","LEAD","LEADER","LEADERSHIP","LEADING","LEAF","LEAGUE","LEAN","LEARN","LEAST","LEAVE","LEFT","LEG","LEGAL","LEGISLATION","LENGTH","LESS","LET","LETTER","LEVEL","LIABILITY","LIBERAL","LIBRARY","LIE","LIFE","LIFT","LIGHT","LIKE","LIKELY","LIMIT","LIMITED","LINE","LINK","LIP","LIST","LISTEN","LITERATURE","LITTLE","LIVE","LIVING","LOAN","LOCAL","LOCATION","LONG","LOOK","LORD","LOSE","LOSS","LOT","LOVE","LOVELY","LOW","LUNCH","MACHINE","MAGAZINE","MAIN","MAINLY","MAINTAIN","MAJOR","MAJORITY","MAKE","MALE","MAN","MANAGE","MANAGEMENT","MANAGER","MANNER","MANY","MAP","MARK","MARKET","MARRIAGE","MARRIED","MARRY","MASS","MASTER","MATCH","MATERIAL","MATTER","MAY","MAYBE","ME","MEAL","MEAN","MEANING","MEANS","MEANWHILE","MEASURE","MECHANISM","MEDIA","MEDICAL","MEET","MEETING","MEMBER","MEMBERSHIP","MEMORY","MENTAL","MENTION","MERELY","MESSAGE","METAL","METHOD","MIDDLE","MIGHT","MILE","MILITARY","MILK","MIND","MINE","MINISTER","MINISTRY","MINUTE","MISS","MISTAKE","MODEL","MODERN","MODULE","MOMENT","MONEY","MONTH","MORE","MORNING","MOST","MOTHER","MOTION","MOTOR","MOUNTAIN","MOUTH","MOVE","MOVEMENT","MUCH","MURDER","MUSEUM","MUSIC","MUST","MY","MYSELF","NAME","NARROW","NATION","NATIONAL","NATURAL","NATURE","NEAR","NEARLY","NECESSARILY","NECESSARY","NECK","NEED","NEGOTIATION","NEIGHBOUR","NEITHER","NETWORK","NEVER","NEVERTHELESS","NEW","NEWS","NEWSPAPER","NEXT","NICE","NIGHT","NO","NOBODY","NOD","NOISE","NONE","NOR","NORMAL","NORMALLY","NORTH","NORTHERN","NOSE","NOT","NOTE","NOTHING","NOTICE","NOTION","NOW","NUCLEAR","NUMBER","NURSE","OBJECT","OBJECTIVE","OBSERVATION","OBSERVE","OBTAIN","OBVIOUS","OBVIOUSLY","OCCASION","OCCUR","ODD","OF","OFF","OFFENCE","OFFER","OFFICE","OFFICER","OFFICIAL","OFTEN","OIL","OKAY","OLD","ON","ONCE","ONE","ONLY","ONTO","OPEN","OPERATE","OPERATION","OPINION","OPPORTUNITY","OPPOSITION","OPTION","OR","ORDER","ORDINARY","ORGANISATION","ORGANISE","ORGANIZATION","ORIGIN","ORIGINAL","OTHER","OTHERWISE","OUGHT","OUR","OURSELVES","OUT","OUTCOME","OUTPUT","OUTSIDE","OVER","OVERALL","OWN","OWNER","PACKAGE","PAGE","PAIN","PAINT","PAINTING","PAIR","PANEL","PAPER","PARENT","PARK","PARLIAMENT","PART","PARTICULAR","PARTICULARLY","PARTLY","PARTNER","PARTY","PASS","PASSAGE","PAST","PATH","PATIENT","PATTERN","PAY","PAYMENT","PEACE","PENSION","PEOPLE","PER","PERCENT","PERFECT","PERFORM","PERFORMANCE","PERHAPS","PERIOD","PERMANENT","PERSON","PERSONAL","PERSUADE","PHASE","PHONE","PHOTOGRAPH","PHYSICAL","PICK","PICTURE","PIECE","PLACE","PLAN","PLANNING","PLANT","PLASTIC","PLATE","PLAY","PLAYER","PLEASE","PLEASURE","PLENTY","PLUS","POCKET","POINT","POLICE","POLICY","POLITICAL","POLITICS","POOL","POOR","POPULAR","POPULATION","POSITION","POSITIVE","POSSIBILITY","POSSIBLE","POSSIBLY","POST","POTENTIAL","POUND","POWER","POWERFUL","PRACTICAL","PRACTICE","PREFER","PREPARE","PRESENCE","PRESENT","PRESIDENT","PRESS","PRESSURE","PRETTY","PREVENT","PREVIOUS","PREVIOUSLY","PRICE","PRIMARY","PRIME","PRINCIPLE","PRIORITY","PRISON","PRISONER","PRIVATE","PROBABLY","PROBLEM","PROCEDURE","PROCESS","PRODUCE","PRODUCT","PRODUCTION","PROFESSIONAL","PROFIT","PROGRAM","PROGRAMME","PROGRESS","PROJECT","PROMISE","PROMOTE","PROPER","PROPERLY","PROPERTY","PROPORTION","PROPOSE","PROPOSAL","PROSPECT","PROTECT","PROTECTION","PROVE","PROVIDE","PROVIDED","PROVISION","PUB","PUBLIC","PUBLICATION","PUBLISH","PULL","PUPIL","PURPOSE","PUSH","PUT","QUALITY","QUARTER","QUESTION","QUICK","QUICKLY","QUIET","QUITE","RACE","RADIO","RAILWAY","RAIN","RAISE","RANGE","RAPIDLY","RARE","RATE","RATHER","REACH","REACTION","READ","READER","READING","READY","REAL","REALISE","REALITY","REALIZE","REALLY","REASON","REASONABLE","RECALL","RECEIVE","RECENT","RECENTLY","RECOGNISE","RECOGNITION","RECOGNIZE","RECOMMEND","RECORD","RECOVER","RED","REDUCE","REDUCTION","REFER","REFERENCE","REFLECT","REFORM","REFUSE","REGARD","REGION","REGIONAL","REGULAR","REGULATION","REJECT","RELATE","RELATION","RELATIONSHIP","RELATIVE","RELATIVELY","RELEASE","RELEVANT","RELIEF","RELIGION","RELIGIOUS","RELY","REMAIN","REMEMBER","REMIND","REMOVE","REPEAT","REPLACE","REPLY","REPORT","REPRESENT","REPRESENTATION","REPRESENTATIVE","REQUEST","REQUIRE","REQUIREMENT","RESEARCH","RESOURCE","RESPECT","RESPOND","RESPONSE","RESPONSIBILITY","RESPONSIBLE","REST","RESTAURANT","RESULT","RETAIN","RETURN","REVEAL","REVENUE","REVIEW","REVOLUTION","RICH","RIDE","RIGHT","RING","RISE","RISK","RIVER","ROAD","ROCK","ROLE","ROLL","ROOF","ROOM","ROUND","ROUTE","ROW","ROYAL","RULE","RUN","RURAL","SAFE","SAFETY","SALE","SAME","SAMPLE","SATISFY","SAVE","SAY","SCALE","SCENE","SCHEME","SCHOOL","SCIENCE","SCIENTIFIC","SCIENTIST","SCORE","SCREEN","SEA","SEARCH","SEASON","SEAT","SECOND","SECONDARY","SECRETARY","SECTION","SECTOR","SECURE","SECURITY","SEE","SEEK","SEEM","SELECT","SELECTION","SELL","SEND","SENIOR","SENSE","SENTENCE","SEPARATE","SEQUENCE","SERIES","SERIOUS","SERIOUSLY","SERVANT","SERVE","SERVICE","SESSION","SET","SETTLE","SETTLEMENT","SEVERAL","SEVERE","SEX","SEXUAL","SHAKE","SHALL","SHAPE","SHARE","SHE","SHEET","SHIP","SHOE","SHOOT","SHOP","SHORT","SHOT","SHOULD","SHOULDER","SHOUT","SHOW","SHUT","SIDE","SIGHT","SIGN","SIGNAL","SIGNIFICANCE","SIGNIFICANT","SILENCE","SIMILAR","SIMPLE","SIMPLY","SINCE","SING","SINGLE","SIR","SISTER","SIT","SITE","SITUATION","SIZE","SKILL","SKIN","SKY","SLEEP","SLIGHTLY","SLIP","SLOW","SLOWLY","SMALL","SMILE","SO","SOCIAL","SOCIETY","SOFT","SOFTWARE","SOIL","SOLDIER","SOLICITOR","SOLUTION","SOME","SOMEBODY","SOMEONE","SOMETHING","SOMETIMES","SOMEWHAT","SOMEWHERE","SON","SONG","SOON","SORRY","SORT","SOUND","SOURCE","SOUTH","SOUTHERN","SPACE","SPEAK","SPEAKER","SPECIAL","SPECIES","SPECIFIC","SPEECH","SPEED","SPEND","SPIRIT","SPORT","SPOT","SPREAD","SPRING","STAFF","STAGE","STAND","STANDARD","STAR","START","STATE","STATEMENT","STATION","STATUS","STAY","STEAL","STEP","STICK","STILL","STOCK","STONE","STOP","STORE","STORY","STRAIGHT","STRANGE","STRATEGY","STREET","STRENGTH","STRIKE","STRONG","STRONGLY","STRUCTURE","STUDENT","STUDIO","STUDY","STUFF","STYLE","SUBJECT","SUBSTANTIAL","SUCCEED","SUCCESS","SUCCESSFUL","SUCH","SUDDENLY","SUFFER","SUFFICIENT","SUGGEST","SUGGESTION","SUITABLE","SUM","SUMMER","SUN","SUPPLY","SUPPORT","SUPPOSE","SURE","SURELY","SURFACE","SURPRISE","SURROUND","SURVEY","SURVIVE","SWITCH","SYSTEM","TABLE","TAKE","TALK","TALL","TAPE","TARGET","TASK","TAX","TEA","TEACH","TEACHER","TEACHING","TEAM","TEAR","TECHNICAL","TECHNIQUE","TECHNOLOGY","TELEPHONE","TELEVISION","TELL","TEMPERATURE","TEND","TERM","TERMS","TERRIBLE","TEST","TEXT","THAN","THANK","THANKS","THAT","THE","THEATRE","THEIR","THEM","THEME","THEMSELVES","THEN","THEORY","THERE","THEREFORE","THESE","THEY","THIN","THING","THINK","THIS","THOSE","THOUGH","THOUGHT","THREAT","THREATEN","THROUGH","THROUGHOUT","THROW","THUS","TICKET","TIME","TINY","TITLE","TO","TODAY","TOGETHER","TOMORROW","TONE","TONIGHT","TOO","TOOL","TOOTH","TOP","TOTAL","TOTALLY","TOUCH","TOUR","TOWARDS","TOWN","TRACK","TRADE","TRADITION","TRADITIONAL","TRAFFIC","TRAIN","TRAINING","TRANSFER","TRANSPORT","TRAVEL","TREAT","TREATMENT","TREATY","TREE","TREND","TRIAL","TRIP","TROOP","TROUBLE","TRUE","TRUST","TRUTH","TRY","TURN","TWICE","TYPE","TYPICAL","UNABLE","UNDER","UNDERSTAND","UNDERSTANDING","UNDERTAKE","UNEMPLOYMENT","UNFORTUNATELY","UNION","UNIT","UNITED","UNIVERSITY","UNLESS","UNLIKELY","UNTIL","UP","UPON","UPPER","URBAN","US","USE","USED","USEFUL","USER","USUAL","USUALLY","VALUE","VARIATION","VARIETY","VARIOUS","VARY","VAST","VEHICLE","VERSION","VERY","VIA","VICTIM","VICTORY","VIDEO","VIEW","VILLAGE","VIOLENCE","VISION","VISIT","VISITOR","VITAL","VOICE","VOLUME","VOTE","WAGE","WAIT","WALK","WALL","WANT","WAR","WARM","WARN","WASH","WATCH","WATER","WAVE","WAY","WE","WEAK","WEAPON","WEAR","WEATHER","WEEK","WEEKEND","WEIGHT","WELCOME","WELFARE","WELL","WEST","WESTERN","WHAT","WHATEVER","WHEN","WHERE","WHEREAS","WHETHER","WHICH","WHILE","WHILST","WHITE","WHO","WHOLE","WHOM","WHOSE","WHY","WIDE","WIDELY","WIFE","WILD","WILL","WIN","WIND","WINDOW","WINE","WING","WINNER","WINTER","WISH","WITH","WITHDRAW","WITHIN","WITHOUT","WOMAN","WONDER","WONDERFUL","WOOD","WORD","WORK","WORKER","WORKING","WORKS","WORLD","WORRY","WORTH","WOULD","WRITE","WRITER","WRITING","WRONG","YARD","YEAH","YEAR","YES","YESTERDAY","YET","YOU","YOUNG","YOUR","YOURSELF","YOUTH"pythran-0.10.0+ds2/pythran/tests/g webb/000077500000000000000000000000001416264035500177245ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/tests/g webb/average_position.py000066400000000000000000000007631416264035500236420ustar00rootroot00000000000000#pythran export average_position(str:(float,float) dict, str:(float,float) dict) #runas d = {"e":(1.,2.) } ; e = {"d":(2.,1.) } ; average_position(e,d) def average_position(pos1,pos2): pos_avg={} for k in pos1: if k in pos2: pos_avg[k]=((pos1[k][0]+pos2[k][0])/2,(pos1[k][1]+pos2[k][1])/2) else: pos_avg[k]=pos1[k] for k in pos2: if k in pos1: if k not in pos_avg: pos_avg[k]=((pos1[k][0]+pos2[k][0])/2,(pos1[k][1]+pos2[k][1])/2) else: pos_avg[k]=pos2[k] return pos_avg pythran-0.10.0+ds2/pythran/tests/g webb/global.py000066400000000000000000000001621416264035500215350ustar00rootroot00000000000000#pythran export foo, bar #pythran export yolo #runas foo #runas bar foo = [1, 3, 7] bar = 1.5, True yolo = {'a'} pythran-0.10.0+ds2/pythran/tests/g webb/score_text.py000066400000000000000000000004321416264035500224540ustar00rootroot00000000000000#unittest.python3.skip string.find not available anymore. #pythran export score_text(str, str:int dict) #runas score_text("e", { "d": 1 }) import string def score_text(txt,kwdict): score=0 for kw in kwdict.keys(): if string.find(txt,kw)>-1: score+=kwdict[kw] return score pythran-0.10.0+ds2/pythran/tests/ipython_script.ipy000066400000000000000000000003141416264035500223750ustar00rootroot00000000000000from IPython import get_ipython ipython = get_ipython() ipython.magic('%load_ext pythran.magic') ipython.run_cell_magic('pythran', '', ''' #pythran export foo(int) def foo(n): return n ''') print(foo(3)) pythran-0.10.0+ds2/pythran/tests/ipython_script_timeit.ipy000066400000000000000000000007221416264035500237530ustar00rootroot00000000000000from IPython import get_ipython ipython = get_ipython() ipython.magic('%load_ext pythran.magic') ipython.run_cell_magic('pythran', '-O2', ''' #pythran export foo(int) def foo(n): for i in range(n): i = 1 return 0 ''') ipython.run_line_magic('timeit', 'foo(1000)') ipython.run_cell_magic('pythran', '-Ofast', ''' #pythran export foo(int) def foo(n): for i in range(n): i = 1 return 0 ''') ipython.run_line_magic('timeit', 'foo(1000)') pythran-0.10.0+ds2/pythran/tests/notebooks/000077500000000000000000000000001416264035500206015ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/tests/notebooks/capsule.ipynb000066400000000000000000000124351416264035500233050ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "Usual magic stuff" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import pythran" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "%load_ext pythran.magic" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Create a pythran capsule, not callable as a Python function" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "%%pythran\n", "#pythran export capsule f(int32, float64*, float64* )\n", "def f(n, x, cp):\n", " c = cp[0]\n", " return c + x[0] - x[1] * x[2]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Prepare to pass this function to scipy's LowLevel" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "import ctypes\n", "c = ctypes.c_double(1.0)\n", "user_data = ctypes.cast(ctypes.pointer(c), ctypes.c_void_p)" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "(1200.0000000000002, 1.3322676295501882e-11)" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from scipy import integrate, LowLevelCallable\n", "func = LowLevelCallable(f, user_data, signature=\"double (int, double *, void *)\")\n", "dat = [[0, 10], [-10, 0], [-1, 1]]\n", "integrate.nquad(func, dat)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Showcase Numpy integration" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "%%pythran\n", "\n", "# using numpy adaptator\n", "#pythran export capsule transform(int64*, float64*, int32, int32, float64*)\n", "from numpy.ctypeslib import as_array\n", "def transform(output_coordinates, input_coordinates, output_rank, input_rank, user_data):\n", " shift = user_data[0]\n", " input_data = as_array(input_coordinates, input_rank)\n", " output_data = as_array(output_coordinates, output_rank)\n", " input_data[:] = output_data - shift\n", " return 1\n", "\n", "# same with explicit loops\n", "#pythran export capsule transform_basic(int64*, float64*, int32, int32, float64*)\n", "def transform_basic(output_coordinates, input_coordinates, output_rank, input_rank, user_data):\n", " shift = user_data[0]\n", " for i in range(input_rank):\n", " input_coordinates[i] = output_coordinates[i] - shift;\n", " return 1" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Validate output using the Numpy API" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[0. , 0. , 0. ],\n", " [0. , 1.3625, 2.7375],\n", " [0. , 4.8125, 6.1875],\n", " [0. , 8.2625, 9.6375]])" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import ctypes\n", "import numpy as np\n", "from scipy import ndimage, LowLevelCallable\n", "\n", "\n", "shift = 0.5\n", "\n", "user_data = ctypes.c_double(shift)\n", "ptr = ctypes.cast(ctypes.pointer(user_data), ctypes.c_void_p)\n", "callback = LowLevelCallable(transform, ptr, \"int (npy_intp *, double *, int, int, void *)\")\n", "im = np.arange(12).reshape(4, 3).astype(np.float64)\n", "\n", "out0 = ndimage.geometric_transform(im, callback)\n", "out0" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "And using the explicit looping. Hopefully the results are the same!" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[0. , 0. , 0. ],\n", " [0. , 1.3625, 2.7375],\n", " [0. , 4.8125, 6.1875],\n", " [0. , 8.2625, 9.6375]])" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "callback = LowLevelCallable(transform_basic, ptr, \"int (npy_intp *, double *, int, int, void *)\")\n", "im = np.arange(12).reshape(4, 3).astype(np.float64)\n", "\n", "out1 = ndimage.geometric_transform(im, callback)\n", "out1" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "assert np.all(out0 == out1)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.3" } }, "nbformat": 4, "nbformat_minor": 2 } pythran-0.10.0+ds2/pythran/tests/notebooks/export.ipynb000066400000000000000000000456261416264035500232020ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "The usual magic stuff" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import pythran\n", "%load_ext pythran.magic" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Test basic types" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "%%pythran\n", "# simple types\n", "#pythran export identity(int)\n", "#pythran export identity(None)\n", "#pythran export identity(str)\n", "\n", "# parametric types\n", "#pythran export identity(int list)\n", "#pythran export identity(int set)\n", "#pythran export identity(int:str dict)\n", "#pythran export identity((int, int, str))\n", "\n", "# numpy stuff\n", "#pythran export identity(int[])\n", "#pythran export identity(int[:,:])\n", "#pythran export identity(int[][][])\n", "\n", "def identity(x):\n", " return x" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "for elem in (int, str, list, set, dict):\n", " assert isinstance(identity(elem()), elem), elem" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "assert identity(None) is None\n", "assert isinstance(identity((1,1,\"1\")), tuple)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Numpy arrays keep the same id when passed through pythran. this is not guaranteed for other containers" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "import numpy\n", "x = numpy.ones(1, dtype=int)\n", "assert x is identity(x)\n", "\n", "y = numpy.ones((1, 1), dtype=int)\n", "assert y is identity(y)\n", "\n", "z = numpy.ones((1, 1, 1), dtype=int)\n", "assert z is identity(z)" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "WARNING: Exporting function 'inplace_modification' that modifies its List argument. Beware that this argument won't be modified at Python call site\n" ] } ], "source": [ "%%pythran\n", "#pythran export inplace_modification(int list)\n", "def inplace_modification(l):\n", " l[0] = 0\n", " return l" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "l = [1, 2, 3]\n", "lp = inplace_modification(l)\n", "assert l is not lp\n", "assert l != lp" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "It's possible to declare the overloads in a single export using the ``or`` keyword" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "%%pythran\n", "#pythran export strint(str or int, str or int)\n", "def strint(x, y):\n", " return y, x" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "(2, 1)" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "strint(1, 2)" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "('2', '1')" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "strint('1', '2')" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "('2', 1)" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "strint(1, '2')" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "(2, '1')" ] }, "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ "strint('1', 2)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The ``or`` operator also works inside polymorphic types, but it has lower precedence than ``set``, ``dict`` etc." ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [], "source": [ "%%pythran\n", "#pythran export set_of(int or str set)\n", "def set_of(x): return x" ] }, { "cell_type": "code", "execution_count": 14, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "1" ] }, "execution_count": 14, "metadata": {}, "output_type": "execute_result" } ], "source": [ "set_of(1)" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "{'1'}" ] }, "execution_count": 15, "metadata": {}, "output_type": "execute_result" } ], "source": [ "set_of({'1'})" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Use ``[ ]`` to force ordering" ] }, { "cell_type": "code", "execution_count": 16, "metadata": {}, "outputs": [], "source": [ "%%pythran\n", "#pythran export set_of([int or str] set)\n", "def set_of(x): return x" ] }, { "cell_type": "code", "execution_count": 17, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "{1}" ] }, "execution_count": 17, "metadata": {}, "output_type": "execute_result" } ], "source": [ "set_of({1})" ] }, { "cell_type": "code", "execution_count": 18, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "{'1'}" ] }, "execution_count": 18, "metadata": {}, "output_type": "execute_result" } ], "source": [ "set_of({'1'})" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Overload for different scalar types are most of the time not ambiguous:" ] }, { "cell_type": "code", "execution_count": 19, "metadata": {}, "outputs": [], "source": [ "%%pythran\n", "#pythran export scalar(bool)\n", "#pythran export scalar(int)\n", "#pythran export scalar(float)\n", "#pythran export scalar(complex)\n", "def scalar(x): return str(x)" ] }, { "cell_type": "code", "execution_count": 20, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "True\n", "1\n", "1.1\n", "(1.1,0)\n" ] } ], "source": [ "print(scalar(True))\n", "print(scalar(1))\n", "print(scalar(1.1))\n", "print(scalar(1.1+0j))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "It works fine for scalars of differents size / sign" ] }, { "cell_type": "code", "execution_count": 21, "metadata": {}, "outputs": [], "source": [ "%%pythran\n", "#pythran export dtype(complex64)\n", "#pythran export dtype(complex128)\n", "#pythran export dtype(complex256)\n", "import numpy as np\n", "def dtype(x): return x.real, x.imag" ] }, { "cell_type": "code", "execution_count": 22, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "(1.5, -1.5) \n", "(1.5, -1.5) \n", "(1.5, -1.5) \n" ] } ], "source": [ "import numpy as np\n", "x64 = dtype(np.complex64(1.5 + -1.5j))\n", "print(x64, type(x64[0]))\n", "x128 = dtype(np.complex128(1.5 + -1.5j))\n", "print(x128, type(x128[0]))\n", "x256 = dtype(np.complex256(1.5 + -1.5j))\n", "print(x256, type(x256[0]))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "It also works correctly for ndarray of different dimension and dtype:" ] }, { "cell_type": "code", "execution_count": 23, "metadata": {}, "outputs": [], "source": [ "%%pythran\n", "#pythran export array(int8[])\n", "#pythran export array(int16[][])\n", "#pythran export array(int16[][][])\n", "import numpy\n", "def array(x): return x.shape, x.itemsize" ] }, { "cell_type": "code", "execution_count": 24, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "((1,), 1)\n", "((1, 1), 2)\n", "((1, 1, 1), 2)\n" ] } ], "source": [ "import numpy as np\n", "print(array(np.array([1], dtype=np.int8)))\n", "print(array(np.array([[1]], dtype=np.int16)))\n", "print(array(np.array([[[1]]], dtype=np.int16)))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "It is however ambiguous to use numpy's dtype that actually have the same sign and size (in that case on a 64bit machine)" ] }, { "cell_type": "code", "execution_count": 25, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ ":2:16 error: Ambiguous overloads\n", "\tambiguous(int64)\n", "\tambiguous(int).\n", "\n" ] } ], "source": [ "code = '''\n", "#pythran export ambiguous(int)\n", "#pythran export ambiguous(int64)\n", "def ambiguous(x): return x\n", "'''\n", "try:\n", " pythran.compile_pythrancode('dummy_module_name', code)\n", "except pythran.syntax.PythranSyntaxError as e:\n", " print(e)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "And in case of invalid argument types, each overload is printed, as well as some information about the call site." ] }, { "cell_type": "code", "execution_count": 26, "metadata": {}, "outputs": [], "source": [ "%%pythran\n", "#pythran export some(float32)\n", "#pythran export some(int)\n", "def some(x): return x" ] }, { "cell_type": "code", "execution_count": 27, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Invalid call to pythranized function `some(bool)'\n", "Candidates are:\n", "\n", " - some(int)\n", " - some(float32)\n", "\n" ] } ], "source": [ "try:\n", " some(True)\n", "except TypeError as e:\n", " print(e)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Overloads are useful to handle function with default parameters." ] }, { "cell_type": "code", "execution_count": 28, "metadata": {}, "outputs": [], "source": [ "%%pythran\n", "# pythran export func(int, str, float64)\n", "# pythran export func(int, str)\n", "# pythran export func(int, None, float64)\n", "# pythran export func(int, None)\n", "# pythran export func(int)\n", "# pythran export func()\n", "\n", "def func(a=1, b=None, c=1.0):\n", " print(b)\n", " return a + c" ] }, { "cell_type": "code", "execution_count": 29, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "3.0" ] }, "execution_count": 29, "metadata": {}, "output_type": "execute_result" } ], "source": [ "func(1, \"hello\", 2.)" ] }, { "cell_type": "code", "execution_count": 30, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "2.0" ] }, "execution_count": 30, "metadata": {}, "output_type": "execute_result" } ], "source": [ "func(1)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "It's possible to declare multiple entires in the same ``pythran export`` line" ] }, { "cell_type": "code", "execution_count": 31, "metadata": {}, "outputs": [], "source": [ "%%pythran\n", "#pythran export foo(int), foo(str)\n", "def foo(s): return s" ] }, { "cell_type": "code", "execution_count": 32, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "(1, '1')" ] }, "execution_count": 32, "metadata": {}, "output_type": "execute_result" } ], "source": [ "foo(1), foo('1')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The pythran export can also be used to export a global variable. But the global variable is not going to be shared, consider it as a read only view!" ] }, { "cell_type": "code", "execution_count": 33, "metadata": {}, "outputs": [], "source": [ "%%pythran\n", "# pythran export thing\n", "thing = 'stuff that matter'" ] }, { "cell_type": "code", "execution_count": 34, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'stuff that matter'" ] }, "execution_count": 34, "metadata": {}, "output_type": "execute_result" } ], "source": [ "thing" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "It's also possible to ask pythran to export raw function pointer, using the ``capsule`` keyword." ] }, { "cell_type": "code", "execution_count": 35, "metadata": {}, "outputs": [], "source": [ "%%pythran\n", "#pythran export capsule corp(int, float)\n", "def corp(x, y):\n", " return x + y" ] }, { "cell_type": "code", "execution_count": 36, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "':2:27 error: Unexpected token `[` at that point\n", "\n" ] } ], "source": [ "code = '''\n", "#pythran export invalid(str[])\n", "def invalid(x): return x\n", "'''\n", "try:\n", " pythran.compile_pythrancode('dummy_module_name', code)\n", "except pythran.syntax.PythranSyntaxError as e:\n", " print(e)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Pythran tries its best to provide detailed type information about parameters in case of mismatch" ] }, { "cell_type": "code", "execution_count": 41, "metadata": {}, "outputs": [], "source": [ "%%pythran\n", "#pythran export basic(float32)\n", "def basic(x): return x" ] }, { "cell_type": "code", "execution_count": 42, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Invalid call to pythranized function `basic(int64[:] (is a view))'\n", "Candidates are:\n", "\n", " - basic(float32)\n", "\n" ] } ], "source": [ "try:\n", " import numpy as np\n", " x = np.arange(10)[::2]\n", " basic(x)\n", "except TypeError as e:\n", " print(e)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Pythran supports views with new axis" ] }, { "cell_type": "code", "execution_count": 43, "metadata": {}, "outputs": [], "source": [ "%%pythran\n", "#pythran export views(float64[:,:])\n", "def views(x):\n", " return x.shape" ] }, { "cell_type": "code", "execution_count": 44, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "((5, 1), (1, 5))" ] }, "execution_count": 44, "metadata": {}, "output_type": "execute_result" } ], "source": [ "x = np.ones(5)[:, None]\n", "y = np.ones(5)[None, :]\n", "views(x), views(y)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "When usure of an object type, just print it!" ] }, { "cell_type": "code", "execution_count": 45, "metadata": {}, "outputs": [], "source": [ "%%pythran\n", "#pythran export dump_type((float, bool) list)\n", "#pythran export dump_type((int, complex) list)\n", "def dump_type(arg1):\n", " return str([(type(x[0]), type(x[1])) for x in arg1])" ] }, { "cell_type": "code", "execution_count": 46, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "('[(float, bool)]', '[(int_, complex)]')" ] }, "execution_count": 46, "metadata": {}, "output_type": "execute_result" } ], "source": [ "dump_type([(1., True)]), dump_type([(1, 1j)])" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.3" } }, "nbformat": 4, "nbformat_minor": 2 } pythran-0.10.0+ds2/pythran/tests/notebooks/magic.ipynb000066400000000000000000000052221416264035500227250ustar00rootroot00000000000000{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import pythran" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "This is how one enable pythran magic:" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "%load_ext pythran.magic" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Pythran magic is similar to cython's" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "%%pythran\n", "#pythran export simple()\n", "def simple(): return 1" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Once compiled, the function is available in the current environment" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "assert simple() == 1" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "One can pass extra compiler options to pythran magic" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "%%pythran -Ofast\n", "#pythran export fast(float64)\n", "def fast(n):\n", " return 1 + n - 1 # thanks to fast, this can be reordered" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "assert 0 != fast(10e-30) == 10e-30" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "assert 0 == (1 + 10e-30 - 1)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "It's also possible to redefine a function like that" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "%%pythran -O0\n", "#pythran export fast(float64)\n", "def fast(n):\n", " return 1 + n - 1 # thanks to fast, this can be reordered" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "assert 0 == fast(10e-30)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.3" } }, "nbformat": 4, "nbformat_minor": 2 } pythran-0.10.0+ds2/pythran/tests/openmp.4/000077500000000000000000000000001416264035500202365ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/tests/openmp.4/cancel_for.py000066400000000000000000000010411416264035500226770ustar00rootroot00000000000000#from http://jakascorner.com/blog/2016/08/omp-cancel.html def has_zero(matrix): has_zero = False rows, cols = matrix.shape #omp parallel default(none) shared(matrix, has_zero) #omp for for row in range(rows): for col in range(cols): if matrix[row, col] == 0: #omp critical has_zero = True #omp cancel for return has_zero import numpy as np def cancel_for(): data = np.array([[1,2,3], [4,0,5], [7, 8, 9]]) res = has_zero(data) return res pythran-0.10.0+ds2/pythran/tests/openmp.4/declare_reduction.py000066400000000000000000000011631416264035500242640ustar00rootroot00000000000000#from https://software.intel.com/en-us/node/695675 def min_abs(omp_in, omp_out): return min(abs(omp_in), omp_out) import numpy as np def find_min_abs(data): '''return the smallest magnitude among all the integers in data[N]''' result = abs(data[0]) #omp declare reduction(minabs : int : omp_out = min_abs(omp_in, omp_out)) initializer(omp_priv=omp_orig) #omp parallel for reduction(minabs: result) for d in data: if abs(d) < result: result = abs(d) return result def declare_reduction(): data = [-1, 2, 3, 1, 4, 5, 6, -7] res = find_min_abs(data) return res == 1 pythran-0.10.0+ds2/pythran/tests/openmp.4/declare_reduction_complex_implicit.py000066400000000000000000000004241416264035500277040ustar00rootroot00000000000000import numpy as np def csum(data): result = 0. #omp parallel for reduction(+: result) for d in data: result += d return result def declare_reduction_complex_implicit(): data = [-1j, 2j, 3j, 1j, 4j, 5j, 6j, -7j] res = csum(data) return res pythran-0.10.0+ds2/pythran/tests/openmp.4/simd.py000066400000000000000000000004531416264035500215460ustar00rootroot00000000000000import numpy as np def perm(data, p): n = len(data) out = np.empty(n) #pragma omp simd for i in range(n): out[i] = data[p[i]] * 2 return out def simd(): data = [10, 20, 30, 40] p = [1, 0, 3, 2] res = perm(data, p) return np.all(res == [40, 20, 80, 60]) pythran-0.10.0+ds2/pythran/tests/openmp.legacy/000077500000000000000000000000001416264035500213375ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_atomic_add.py000066400000000000000000000003051416264035500246460ustar00rootroot00000000000000def omp_atomic_add(): sum = 0 LOOPCOUNT=1000 "omp parallel for" for i in range(LOOPCOUNT): "omp atomic" sum += i return sum == (LOOPCOUNT * (LOOPCOUNT -1 ) ) /2 pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_atomic_bitand.py000066400000000000000000000003151416264035500253600ustar00rootroot00000000000000def omp_atomic_bitand(): sum = 0 LOOPCOUNT = 1000 logics = [1]*LOOPCOUNT "omp parallel for" for i in range(LOOPCOUNT): "omp atomic" sum &= logics[i] return sum == 0 pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_atomic_bitor.py000066400000000000000000000003511416264035500252360ustar00rootroot00000000000000def omp_atomic_bitor(): sum = 0 LOOPCOUNT = 1000 logics = [1]*LOOPCOUNT logics[LOOPCOUNT//2] = 0 "omp parallel for" for i in range(LOOPCOUNT): "omp atomic" sum |= logics[i] return sum == 1 pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_atomic_bitxor.py000066400000000000000000000003151416264035500254260ustar00rootroot00000000000000def omp_atomic_bitxor(): sum = 0 LOOPCOUNT = 1000 logics = [0]*LOOPCOUNT "omp parallel for" for i in range(LOOPCOUNT): "omp atomic" sum ^= logics[i] return sum == 0 pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_atomic_div.py000066400000000000000000000002561416264035500247050ustar00rootroot00000000000000def omp_atomic_div(): sum = 362880 LOOPCOUNT = 10 "omp parallel for" for i in range(1,LOOPCOUNT): "omp critical" sum /= i return sum == 1 pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_atomic_lshift.py000066400000000000000000000002661416264035500254150ustar00rootroot00000000000000def omp_atomic_lshift(): sum = 1 LOOPCOUNT = 10 "omp parallel for" for i in range(LOOPCOUNT): "omp atomic" sum <<= 1 return sum == 2 ** LOOPCOUNT pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_atomic_prod.py000066400000000000000000000002551416264035500250660ustar00rootroot00000000000000def omp_atomic_prod(): sum = 1 LOOPCOUNT = 10 "omp parallel for" for i in range(1,LOOPCOUNT): "omp atomic" sum *= i return sum == 362880 pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_atomic_rshift.py000066400000000000000000000002541416264035500254200ustar00rootroot00000000000000def omp_atomic_rshift(): sum = 1024 LOOPCOUNT = 10 "omp parallel for" for i in range(LOOPCOUNT): "omp atomic" sum >>= 1 return sum == 1 pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_atomic_sub.py000066400000000000000000000003031416264035500247050ustar00rootroot00000000000000def omp_atomic_sub(): sum = 0. LOOPCOUNT = 1000 "omp parallel for" for i in range(LOOPCOUNT): "omp atomic" sum -= i return sum == -(LOOPCOUNT*(LOOPCOUNT-1))/2 pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_barrier.py000066400000000000000000000006601416264035500242140ustar00rootroot00000000000000def omp_barrier(): import omp from time import sleep result1 = 0 result2 = 0 #omp parallel num_threads(4) if 1: use_omp = omp.in_parallel() rank = omp.get_thread_num() if rank == 1: sleep(0.5) result2 = 3 #omp barrier if rank == 2: result1 = result2 if use_omp: return result1 == 3 else: return result1 == 0 pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_critical.py000066400000000000000000000003351416264035500243570ustar00rootroot00000000000000def omp_critical(): sum = 0 if 'omp parallel': mysum = 0 'omp for' for i in range(1000): mysum += i 'omp critical' sum += mysum return sum == 999 * 1000 / 2 pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_flush.py000066400000000000000000000007671416264035500237170ustar00rootroot00000000000000import omp from time import sleep def omp_flush(): result1 = 0 result2 = 0 if 'omp parallel': use_omp = omp.in_parallel() rank = omp.get_thread_num() 'omp barrier' if rank == 1: result2 = 3 'omp flush (result2)' dummy = result2 if rank == 0: sleep(0.5) 'omp flush(result2)' result1 = result2 return not use_omp or (result1 == result2 and result2 == dummy and result2 == 3) pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_for_collapse.py000066400000000000000000000012331416264035500252330ustar00rootroot00000000000000# Utility function to check that i is increasing monotonically # with each call def check_i_islarger (i, last_i): if i==1: last_i[0] = 0 islarger = ((i >= last_i[0])and(i - last_i[0]<=1)) last_i[0] = i return islarger def omp_for_collapse(): is_larger = 1 last_i = [0] if 'omp parallel': my_islarger = 1 #omp for schedule(static,1) collapse(2) ordered for i in range(1, 100): for j in range(1, 100): #omp ordered my_islarger = check_i_islarger(i, last_i) and my_islarger #omp critical is_larger = is_larger and my_islarger return is_larger pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_for_firstprivate.py000066400000000000000000000007221416264035500261550ustar00rootroot00000000000000def omp_for_firstprivate(): sum = 0 sum0 = 12345 sum1 = 0 import omp LOOPCOUNT = 1000 if 'omp parallel private(sum1)': 'omp single' threadsnum = omp.get_num_threads() 'omp for firstprivate(sum0)' for i in range(1, LOOPCOUNT+1): sum0+=i sum1 = sum0 'omp critical' sum+=sum1 known_sum = 12345* threadsnum+ (LOOPCOUNT * (LOOPCOUNT + 1)) / 2 return sum == known_sum pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_for_lastprivate.py000066400000000000000000000006071416264035500257730ustar00rootroot00000000000000def omp_for_lastprivate(): sum = 0 i0 = -1 LOOPCOUNT = 1000 if 'omp parallel': sum0 = 0 'omp for schedule(static,7) lastprivate(i0)' for i in range(1, LOOPCOUNT + 1): sum0 += i i0 = i 'omp critical' sum+=sum0 known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2 return (sum == known_sum) and (i0 == LOOPCOUNT) pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_for_nowait.py000066400000000000000000000010741416264035500247350ustar00rootroot00000000000000def omp_for_nowait(): LOOPCOUNT = 1000 myarray = [0]*LOOPCOUNT result = 0 count = 0 import omp if 'omp parallel num_threads(4)': use_omp = omp.in_parallel() rank = omp.get_thread_num() 'omp for nowait' for i in range(LOOPCOUNT): if i == 0: while i < LOOPCOUNT**2: i+=1 count = 1 'omp flush(count)' for i in range(LOOPCOUNT): 'omp flush(count)' if count ==0: result = 1 return result == 1 or not use_omp pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_for_ordered.py000066400000000000000000000006771416264035500250700ustar00rootroot00000000000000def omp_for_ordered(): sum = 0 is_larger = 1 last_i = 0 if 'omp parallel': my_is_larger = 1 'omp for schedule(static,1) ordered' for i in range(1,100): if 'omp ordered': my_is_larger &= i > last_i last_i = i sum += i 'omp critical' is_larger &= my_is_larger known_sum = (99 * 100) / 2 return known_sum == sum and is_larger pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_for_private.py000066400000000000000000000010701416264035500251020ustar00rootroot00000000000000def do_some_work(): import math sum = 0. for i in range(1000): sum+=math.sqrt(i) def omp_for_private(): sum = 0 sum0 = 0 LOOPCOUNT = 1000 if 'omp parallel': sum1 = 0 'omp for private(sum0) schedule(static,1)' for i in range(1, LOOPCOUNT+1): sum0 = sum1 'omp flush' sum0 += i do_some_work() 'omp flush' sum1 = sum0 'omp critical' sum += sum1 known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2 return known_sum == sum pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_for_reduction.py000066400000000000000000000053631416264035500254350ustar00rootroot00000000000000def omp_for_reduction(): DOUBLE_DIGITS = 20 MAX_FACTOR = 10 KNOWN_PRODUCT = 3628800 rounding_error = 1.e-9 result = 0 LOOPCOUNT=1000 logicsArray = [0]*LOOPCOUNT sum = 0 product = 1 known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2 dt = 1. / 3. dsum=0. logics = logicsArray logic_and = 1 logic_or = 0 bit_and = 1 bit_or = 0 exclusiv_bit_or = 0 # testing integer addition 'omp parallel for schedule(dynamic,1) reduction(+:sum)' for j in range(1, LOOPCOUNT+1): sum = sum + j if known_sum != sum: result+=1 print('Error in sum with integers') # testing integer substaction diff = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2 'omp parallel for schedule(dynamic,1) reduction(-:diff)' for j in range(1, LOOPCOUNT+1): diff = diff - j if diff != 0: result+=1 print('Error in difference with integers') # testing integer multiplication 'omp parallel for schedule(dynamic,1) reduction(*:product)' for j in range(1, MAX_FACTOR +1): product *= j known_product = KNOWN_PRODUCT if known_product != product: result+=1 print('Error in product with integers') # testing bit and logics = [1] * LOOPCOUNT 'omp parallel for schedule(dynamic,1) reduction(&:logic_and)' for logic in logics: logic_and = logic_and & logic if not logic_and: result+=1 print('Error in bit and part 1') logics[LOOPCOUNT//2]=0 'omp parallel for schedule(dynamic,1) reduction(&:logic_and)' for logic in logics: logic_and = logic_and & logic if logic_and: result+=1 print('Error in bit and part 2') # testing bit or logics = [0] * LOOPCOUNT 'omp parallel for schedule(dynamic,1) reduction(|:logic_or)' for logic in logics: logic_or = logic_or | logic if logic_or: result+=1 print('Error in logic or part 1') logics[LOOPCOUNT//2]=1 'omp parallel for schedule(dynamic,1) reduction(|:logic_or)' for logic in logics: logic_or = logic_or | logic if not logic_or: result+=1 print('Error in logic or part 2') # testing exclusive bit or logics = [0] * LOOPCOUNT 'omp parallel for schedule(dynamic,1) reduction(^:exclusiv_bit_or)' for logic in logics: exclusiv_bit_or = exclusiv_bit_or ^ logic if exclusiv_bit_or: result+=1 print('Error in exclusive bit or part 1') logics[LOOPCOUNT//2]=1 'omp parallel for schedule(dynamic,1) reduction(^:exclusiv_bit_or)' for logic in logics: exclusiv_bit_or = exclusiv_bit_or ^ logic if not logic_or: result+=1 print('Error in exclusive bit or part 2') return result == 0 pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_for_schedule_auto.py000066400000000000000000000007221416264035500262570ustar00rootroot00000000000000def omp_for_schedule_auto(): import omp sum = 0 sum0 = 12345 sum1 = 0 if 'omp parallel private(sum1)': if 'omp single': threadsnum = omp.get_num_threads() 'omp for firstprivate(sum0) schedule(auto)' for i in range(1, 1001): sum0 += i sum1 = sum0 if 'omp critical': sum += sum1 known_sum = 12345 * threadsnum + (1000 * (1000 + 1)) / 2 return known_sum == sum pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_for_schedule_dynamic.py000066400000000000000000000014561416264035500267400ustar00rootroot00000000000000def omp_for_schedule_dynamic(): CFDMAX_SIZE = 100 chunk_size = 7 tids = [0]*CFDMAX_SIZE count = 0 tmp_count = 0 result = 0 import omp if 'omp parallel shared(tids)': tid = omp.get_thread_num() 'omp for schedule(dynamic, chunk_size)' for i in range(CFDMAX_SIZE): tids[i] = tid for i in range(CFDMAX_SIZE-1): if tids[i] != tids[i+1]: count +=1 tmp = [1] * (count + 1) for i in range(CFDMAX_SIZE-1): if tids[i] != tids[i+1]: tmp_count+=1 tmp[tmp_count] = 1 else: tmp[tmp_count]+=1 for i in range(count): if tmp[i]%chunk_size != 0: result+=1 if tmp[count]%chunk_size != CFDMAX_SIZE%chunk_size: result+=1 return result == 0 pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_for_schedule_guided.py000066400000000000000000000042271416264035500265540ustar00rootroot00000000000000import omp from time import sleep def omp_for_schedule_guided(): tids = list(range(1001)) maxiter = 0 result = True notout = True if 'omp parallel num_threads(4)': in_parallel = omp.in_parallel() if 'omp single': threads = omp.get_num_threads() if threads<2: print("This test only works with at least two threads") result = False if 'omp parallel shared(tids, maxiter) num_threads(4)': tid = omp.get_num_threads() 'omp for nowait schedule(guided)' for j in range(1000): count = 0 'omp flush(maxiter)' if j > maxiter: if 'omp critical': maxiter = j 'omp flush(notout, maxiter)' while notout and count < 0.0005 and maxiter == j: 'omp flush(notout, maxiter)' sleep(0.0001) count += 0.0001 tids[j] = tid notout = False 'omp flush(maxiter, notout)' last_threadnr = tids[0] global_chunknr = 0 local_chunknr = [0 for i in range(10)] openwork = 1000; tids[1000] = -1 for i in range(1,1001): if last_threadnr == tids[i]: pass else: global_chunknr += 1 local_chunknr[last_threadnr] += 1 last_threadnr = tids[i] chuncksize = list(range(global_chunknr)) global_chunknr = 0 determined_chunksize = 1 last_threadnr = tids[0] for i in range(1,1001): if last_threadnr == tids[i]: determined_chunksize += 1 else: chuncksize[global_chunknr] = determined_chunksize global_chunknr += 1 local_chunknr[last_threadnr] += 1 last_threadnr = tids[i] determined_chunksize = 1 expected_chunk_size = openwork / threads c = chuncksize[0] / expected_chunk_size for i in range(global_chunknr): if expected_chunk_size > 1: expected_chunk_size = c * openwork / threads if abs(chuncksize[i] - expected_chunk_size) >= 2: result = False openwork -= chuncksize[i] return result or not in_parallel pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_for_schedule_static.py000066400000000000000000000050611416264035500265770ustar00rootroot00000000000000import omp from time import sleep def omp_for_schedule_static(): NUMBER_OF_THREADS = 10 CFSMAX_SIZE = 10000 MAX_TIME = 0.01 SLEEPTIME = 0.0005 counter = 0 tmp_count=1 lastthreadsstarttid = -1 result = 1 chunk_size = 7 tids = [0] * (CFSMAX_SIZE + 1) notout = 1 maxiter = 0 #omp parallel shared(tids,counter), num_threads(NUMBER_OF_THREADS) #omp single threads = omp.get_num_threads () if threads < 2: print("This test only works with at least two threads"); return 1 tids[CFSMAX_SIZE] = -1 if "omp parallel shared(tids) num_threads(NUMBER_OF_THREADS)": tid = omp.get_thread_num (); #omp for nowait schedule(static,chunk_size) for j in range(CFSMAX_SIZE): count = 0. #pragma omp flush(maxiter) if j > maxiter: #pragma omp critical maxiter = j while notout and (count < MAX_TIME) and (maxiter == j): #pragma omp flush(maxiter,notout) sleep (SLEEPTIME) count += SLEEPTIME tids[j] = tid notout = 0 #omp flush(maxiter,notout) # analysing the data in array tids lasttid = tids[0] tmp_count = 0; print(lasttid) print(tids) for i in range(CFSMAX_SIZE): # If the work was done by the same thread increase tmp_count by one. if tids[i] == lasttid: tmp_count+=1 continue; # Check if the next thread had has the right thread number. When finding # threadnumber -1 the end should be reached. if (tids[i] == (lasttid + 1) % threads or tids[i] == -1): # checking for the right chunk size if (tmp_count == chunk_size): tmp_count = 1 lasttid = tids[i] # If the chunk size was wrong, check if the end was reached else: if (tids[i] == -1): if (i == CFSMAX_SIZE): print("Last thread had chunk size ", tmp_count) break; else: print("ERROR: Last thread (thread with number -1) was found before the end.") result = 0 else: print("ERROR: chunk size was . (assigned was )\n", tmp_count, chunk_size) result = 0 else: print("ERROR: Found thread with number %d (should be inbetween 0 and %d).", tids[i], threads - 1) result = 0; return result pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_for_schedule_static_3.py000066400000000000000000000050371416264035500270240ustar00rootroot00000000000000import omp from time import sleep def omp_for_schedule_static_3(): tmp_count = 1 result = True chunk_size = 7 tids = list(range(1001)) notout = True maxiter = 0 if 'omp parallel shared(tids)': if 'omp single': threads = omp.get_num_threads() if threads < 2: print("E: This test only works with at least two threads") return True tids[1000] = -1 if 'omp parallel shared(tids)': tid = omp.get_thread_num() 'omp for nowait schedule(static,chunk_size)' for j in range(1000): count = 0 'omp flush(maxiter)' if j > maxiter: if 'omp critical': maxiter = j while notout and count < 0.01 and maxiter == j: 'omp flush(maxiter,notout)' sleep(0.0005) count += 0.0005 tids[j] = tid notout = False lasttid = tids[0] tmp_count = 0 for i in range(1001): if tids[i] == lasttid: tmp_count += 1 continue if tids[i] == (lasttid + 1) % threads or tids[i] == -1: if tmp_count == chunk_size: tmp_count = 1 lasttid = tids[i] else: if tids[i] == -1: if i == 1000: break; else: print("E: Last thread (thread with number -1) was " + "found before the end.\n") result = False else: print("ERROR: chunk size was " + str(tmp_count) + ". (assigned was " + str(chunk_size) + ")\n") result = False else: print("ERROR: Found thread with number " + str(tids[i]) + " (should be inbetween 0 and " + str(threads - 1) + ").\n") result = False tids = list(range(1000)) tids2 = list(range(1000)) if 'omp parallel': 'omp for schedule(static) nowait' for n in range(1000): if 1000 == n + 1: sleep(0.0005) tids[n] = omp.get_thread_num() 'omp for schedule(static) nowait' for m in range(1, 1001): tids2[m-1] = omp.get_thread_num() for i in range(1000): if tids[i] != tids2[i]: print("E: Chunk no. " + str(i) + " was assigned once to thread " + str(tids[i]) + " and later to thread " + str(tids2[i]) + ".\n") result = False return result pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_get_num_threads.py000066400000000000000000000004151416264035500257340ustar00rootroot00000000000000def omp_get_num_threads(): import omp nthreads = 0 nthreads_lib = -1 if 'omp parallel': if 'omp critical': nthreads += 1 if 'omp single': nthreads_lib = omp.get_num_threads() return nthreads == nthreads_lib pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_get_wtick.py000066400000000000000000000001461416264035500245450ustar00rootroot00000000000000def omp_get_wtick(): import omp tick = omp.get_wtick() return tick > 0.0 and tick < 0.01 pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_get_wtime.py000066400000000000000000000005751416264035500245570ustar00rootroot00000000000000def omp_get_wtime(): import omp from time import sleep wait_time = 2 #omp parallel in_parallel = omp.in_parallel() start = omp.get_wtime() sleep(wait_time) end = omp.get_wtime() measured_time = end - start print(measured_time, wait_time) return (measured_time > 0.9 * wait_time and measured_time < 1.1 * wait_time) or not in_parallel pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_in_parallel.py000066400000000000000000000006011416264035500250430ustar00rootroot00000000000000def omp_in_parallel(): import omp serial = 1 isparallel = 0 serial = omp.in_parallel() if 'omp parallel num_threads(2)': num_threads = omp.get_num_threads() if 'omp single': isparallel = omp.in_parallel() if 'omp parallel': if 'omp single': pass return bool(not serial and isparallel) or num_threads == 1 pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_master.py000066400000000000000000000004361416264035500240620ustar00rootroot00000000000000def omp_master(): import omp threads = 0 executing_thread = -1 if 'omp parallel': if 'omp master': if 'omp critical': threads += 1 executing_thread = omp.get_thread_num() return threads == 1 and executing_thread == 0 pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_master_3.py000066400000000000000000000007211416264035500243010ustar00rootroot00000000000000def omp_master_3(): import omp tid_result = 0 nthreads = 0 executing_thread = -1 if 'omp parallel': if 'omp master': tid = omp.get_thread_num() if tid != 0: if 'omp critical': tid_result += 1 if 'omp critical': nthreads += 1 executing_thread = omp.get_thread_num() return nthreads == 1 and executing_thread == 0 and tid_result == 0 pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_nested.py000066400000000000000000000005441416264035500240510ustar00rootroot00000000000000def omp_nested(): import omp counter = 0 omp.set_nested(1) if 'omp parallel shared(counter) num_threads(4)': use_omp = omp.in_parallel() if 'omp critical': counter += 1 if 'omp parallel num_threads(4)': if 'omp critical': counter -= 1 return counter != 0 or not use_omp pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_parallel_copyin.py000066400000000000000000000006141416264035500257420ustar00rootroot00000000000000#unittest.skip threadprivate not supported def omp_parallel_copyin(): sum = 0 sum1 = 7 num_threads = 0 if 'omp parallel copyin(sum1) private(i)': 'omp for' for i in range(1, 1000): sum1 += i if 'omp critical': sum += sum1 num_threads += 1 known_sum = (999 * 1000) / 2 + 7 * num_threads return known_sum == sum pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_parallel_default.py000066400000000000000000000004651416264035500260710ustar00rootroot00000000000000def omp_parallel_default(): import omp sum = 0 known_sum = (1000 * (1000 + 1)) / 2 if "omp parallel default(shared)": mysum = 0 'omp for' for i in range(1, 1001): mysum += i if 'omp critical': sum += mysum return known_sum == sum pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_parallel_firstprivate.py000066400000000000000000000005421416264035500271630ustar00rootroot00000000000000def omp_parallel_firstprivate(): sum = 0 sum1 = 7 num_threads = 0 if 'omp parallel firstprivate(sum1)': 'omp for' for i in range(1,1000): sum1 += i if 'omp critical': sum += sum1 num_threads += 1 known_sum = (999 * 1000) / 2 + 7 * num_threads return sum == known_sum pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_parallel_for_firstprivate.py000066400000000000000000000003721416264035500300320ustar00rootroot00000000000000def omp_parallel_for_firstprivate(): sum = 0 i2 = 3 'omp parallel for reduction(+:sum) firstprivate(i2)' for i in range(1,1001): sum += i + i2 known_sum = (1000 * (1000 + 1)) / 2 + i2 * 1000; return known_sum == sum pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_parallel_for_if.py000066400000000000000000000005261416264035500257070ustar00rootroot00000000000000def omp_parallel_for_if(using=0): num_threads = 0 import omp sum = 0 sum2 = 0 LOOPCOUNT=1000 'omp parallel for if(using == 1)' for i in range(LOOPCOUNT+1): num_threads = omp.get_num_threads() sum+=i known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2 return known_sum == sum and num_threads == 1 pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_parallel_for_lastprivate.py000066400000000000000000000004271416264035500276470ustar00rootroot00000000000000def omp_parallel_for_lastprivate(): sum = 0 i0 = -1 'omp parallel for reduction(+:sum) schedule(static,7) lastprivate(i0)' for i in range(1,1001): sum += i i0 = i known_sum = (1000 * (1000 + 1)) / 2 return known_sum == sum and i0 == 1000 pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_parallel_for_ordered.py000066400000000000000000000010051416264035500267260ustar00rootroot00000000000000def check_i_islarger2(i, last_i): islarger = i > last_i last_i = i return islarger, last_i def omp_parallel_for_ordered(): sum = 0 is_larger = True last_i = 0 'omp parallel for schedule(static, 1) ordered' for i in range(1,100): ii = i if 'omp ordered': tmp_is_larger, last_i = check_i_islarger2(i, last_i) is_larger = tmp_is_larger and is_larger sum += ii known_sum = (99 * 100) / 2 return known_sum == sum and is_larger pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_parallel_for_private.py000066400000000000000000000006471416264035500267670ustar00rootroot00000000000000import math def some_work(): sum = 0; for i in range(0, 1000): sum += math.sqrt (i) def omp_parallel_for_private(): sum = 0 i2 = 0 'omp parallel for reduction(+: sum) schedule(static, 1) private(i2)' for i in range(1, 1001): i2 = i 'omp flush' some_work() 'omp flush' sum += i2 known_sum = (1000 * (1000 + 1)) / 2 return known_sum == sum; pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_parallel_for_reduction.py000066400000000000000000000110111416264035500272740ustar00rootroot00000000000000def omp_parallel_for_reduction(): import math dt = 0.5 rounding_error = 1.E-9 sum = 0 dsum = 0 dt = 1. / 3. result = True product = 1 logic_and = 1 logic_or = 0 bit_and = 1 bit_or = 0 exclusiv_bit_or = 0 i = 0 known_sum = (1000 * (1000 + 1)) / 2 'omp parallel for schedule(dynamic,1) private(i) reduction(+:sum)' for i in range(1,1001): sum += i if known_sum != sum: print("E: reduction(+:sum)") result = False diff = (1000 * (1000 + 1)) / 2 'omp parallel for schedule(dynamic,1) private(i) reduction(-:diff)' for i in range(1,1001): diff -= i if diff != 0: print("E: reduction(-:diff)") result = False dsum = 0 dpt = 0 for i in range(0, 20): dpt *= dt dknown_sum = (1 - dpt) / (1 - dt) 'omp parallel for schedule(dynamic,1) private(i) reduction(+:dsum)' for i in range(0,20): dsum += math.pow(dt, i) if abs(dsum-dknown_sum) > rounding_error: print("E: reduction(+:dsum)") result = False dsum = 0 dpt = 1 for i in range(0, 20): dpt *= dt ddiff = (1 - dpt) / (1 - dt) 'omp parallel for schedule(dynamic,1) private(i) reduction(-:ddiff)' for i in range(0,20): ddiff -= math.pow(dt, i) if abs(ddiff) > rounding_error: print("E: reduction(-:ddiff)") result = False 'omp parallel for schedule(dynamic,1) private(i) reduction(*:product)' for i in range(1,11): product *= i known_product = 3628800 if known_product != product: print("E: reduction(*:product)") result = False logics = [1 for i in range(0,1000)] 'omp parallel for schedule(dynamic,1) private(i) reduction(&&:logic_and)' for i in range(0, 1000): logic_and = (logic_and and logics[i]) if not logic_and: print("E: reduction(&&:logic_and)") result = False logic_and = 1; logics[1000//2]=0 'omp parallel for schedule(dynamic,1) private(i) reduction(&&:logic_and)' for i in range(0, 1000): logic_and = (logic_and and logics[i]) if logic_and: print("E: reduction(&&:logic_and) with logics[1000/2]=0") result = False logics = [0 for i in range(0,1000)] 'omp parallel for schedule(dynamic,1) private(i) reduction(||:logic_or)' for i in range(0, 1000): logic_or = (logic_or or logics[i]) if logic_or: print("E: reduction(||:logic_or)") result = False logic_or = 0; logics[1000//2]=1 'omp parallel for schedule(dynamic,1) private(i) reduction(||:logic_or)' for i in range(0, 1000): logic_or = (logic_or or logics[i]) if not logic_or: print("E: reduction(||:logic_or) with logics[1000/2]=1") result = False logics = [1 for i in range(0,1000)] 'omp parallel for schedule(dynamic,1) private(i) reduction(&:bit_and)' for i in range(0, 1000): bit_and = (bit_and & logics[i]) if not bit_and: print("E: reduction(&:bit_and)") result = False bit_and = 1; logics[1000//2]=0 'omp parallel for schedule(dynamic,1) private(i) reduction(&:bit_and)' for i in range(0, 1000): bit_and = (bit_and & logics[i]) if bit_and: print("E: reduction(&:bit_and) with logics[1000/2]=0") result = False logics = [0 for i in range(0,1000)] 'omp parallel for schedule(dynamic,1) private(i) reduction(|:bit_or)' for i in range(0, 1000): bit_or = (bit_or | logics[i]) if bit_or: print("E: reduction(|:bit_or)") result = False bit_or = 0; logics[1000//2]=1 'omp parallel for schedule(dynamic,1) private(i) reduction(|:bit_or)' for i in range(0, 1000): bit_or = (bit_or | logics[i]) if not bit_or: print("E: reduction(|:bit_or) with logics[1000/2]=1") result = False logics = [0 for i in range(0,1000)] 'omp parallel for schedule(dynamic,1) private(i) reduction(^:exclusiv_bit_or)' for i in range(0, 1000): exclusiv_bit_or = (exclusiv_bit_or ^ logics[i]) if exclusiv_bit_or: print("E: reduction(^:exclusiv_bit_or)") result = False exclusiv_bit_or = 0; logics[1000//2]=1 'omp parallel for schedule(dynamic,1) private(i) reduction(^:exclusiv_bit_or)' for i in range(0, 1000): exclusiv_bit_or = (exclusiv_bit_or ^ logics[i]) if not exclusiv_bit_or: print("E: reduction(^:exclusiv_bit_or) with logics[1000/2]=1") result = False return result pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_parallel_if.py000066400000000000000000000004241416264035500250360ustar00rootroot00000000000000def omp_parallel_if(control=1): sum = 0 known_sum = (1000 * (1000 + 1)) / 2 if 'omp parallel if(control==0)': mysum = 0 for i in range(1,1001): mysum += i if 'omp critical': sum += mysum return sum == known_sum pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_parallel_num_threads.py000066400000000000000000000010241416264035500267460ustar00rootroot00000000000000import unittest unittest.skip #segfault .... def omp_parallel_num_threads(): import omp max_threads = 0 failed = 0 if 'omp parallel': if 'omp master': max_threads = omp.get_num_threads() for threads in range(1, max_threads + 1): nthreads = 0 if 'omp parallel reduction(+:failed) num_threads(threads)': failed += (threads != omp.get_num_threads()) 'omp atomic' nthreads += 1 failed += (nthreads != threads) return not failed pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_parallel_private.py000066400000000000000000000005201416264035500261070ustar00rootroot00000000000000def omp_parallel_private(): sum = 0 num_threads = 0 if 'omp parallel': sum1 = 7 'omp for' for i in range(1, 1000): sum1 += i if 'omp critical': sum += sum1 num_threads += 1 known_sum = (999 * 1000) / 2 + 7 * num_threads return known_sum == sum pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_parallel_reduction.py000066400000000000000000000116241416264035500264400ustar00rootroot00000000000000def omp_parallel_reduction(): import math dt = 0.5 rounding_error = 1.E-9 sum = 0 dsum = 0 dt = 1. / 3. result = True product = 1 logic_and = 1 logic_or = 0 bit_and = 1 bit_or = 0 exclusiv_bit_or = 0 known_sum = (1000 * (1000 + 1)) / 2 'omp parallel for schedule(dynamic,1) reduction(+:sum)' for i in range(1,1001): sum += i if known_sum != sum: print("E: reduction(+:sum)") result = False diff = (1000 * (1000 + 1)) / 2 'omp parallel for schedule(dynamic,1) reduction(-:diff)' for i in range(1,1001): diff -= i if diff != 0: print("E: reduction(-:diff)") result = False dsum = 0 dpt = 0 for i in range(0, 20): dpt *= dt dknown_sum = (1 - dpt) / (1 - dt) 'omp parallel for schedule(dynamic,1) reduction(+:dsum)' for i in range(0,20): dsum += math.pow(dt, i) if abs(dsum-dknown_sum) > rounding_error: print("E: reduction(+:dsum)") result = False dsum = 0 dpt = 1 for i in range(0, 20): dpt *= dt ddiff = (1 - dpt) / (1 - dt) 'omp parallel for schedule(dynamic,1) reduction(-:ddiff)' for i in range(0,20): ddiff -= math.pow(dt, i) if abs(ddiff) > rounding_error: print("E: reduction(-:ddiff)") result = False 'omp parallel for schedule(dynamic,1) reduction(*:product)' for i in range(1,11): product *= i known_product = 3628800 if known_product != product: print("E: reduction(*:product)") result = False logics = [1 for i in range(0,1000)] 'omp parallel for schedule(dynamic,1) reduction(&&:logic_and)' for i in range(0, 1000): logic_and = (logic_and and logics[i]) if not logic_and: print("E: reduction(&&:logic_and)") result = False logic_and = 1; logics[1000//2]=0 'omp parallel for schedule(dynamic,1) reduction(&&:logic_and)' for i in range(0, 1000): logic_and = (logic_and and logics[i]) if logic_and: print("E: reduction(&&:logic_and) with logics[1000/2]=0") result = False logics = [0 for i in range(0,1000)] 'omp parallel for schedule(dynamic,1) reduction(||:logic_or)' for i in range(0, 1000): logic_or = (logic_or or logics[i]) if logic_or: print("E: reduction(||:logic_or)") result = False logic_or = 0; logics[1000//2]=1 'omp parallel for schedule(dynamic,1) reduction(||:logic_or)' for i in range(0, 1000): logic_or = (logic_or or logics[i]) if not logic_or: print("E: reduction(||:logic_or) with logics[1000/2]=1") result = False logics = [1 for i in range(0,1000)] 'omp parallel for schedule(dynamic,1) reduction(&:bit_and)' for i in range(0, 1000): bit_and = (bit_and & logics[i]) if not bit_and: print("E: reduction(&:bit_and)") result = False bit_and = 1; logics[1000//2]=0 'omp parallel for schedule(dynamic,1) reduction(&:bit_and)' for i in range(0, 1000): bit_and = (bit_and & logics[i]) if bit_and: print("E: reduction(&:bit_and) with logics[1000/2]=0") result = False logics = [0 for i in range(0,1000)] 'omp parallel for schedule(dynamic,1) reduction(|:bit_or)' for i in range(0, 1000): bit_or = (bit_or | logics[i]) if bit_or: print("E: reduction(|:bit_or)") result = False bit_or = 0; logics[1000//2]=1 'omp parallel for schedule(dynamic,1) reduction(|:bit_or)' for i in range(0, 1000): bit_or = (bit_or | logics[i]) if not bit_or: print("E: reduction(|:bit_or) with logics[1000/2]=1") result = False logics = [0 for i in range(0,1000)] 'omp parallel for schedule(dynamic,1) reduction(^:exclusiv_bit_or)' for i in range(0, 1000): exclusiv_bit_or = (exclusiv_bit_or ^ logics[i]) if exclusiv_bit_or: print("E: reduction(^:exclusiv_bit_or)") result = False exclusiv_bit_or = 0; logics[1000//2]=1 'omp parallel for schedule(dynamic,1) reduction(^:exclusiv_bit_or)' for i in range(0, 1000): exclusiv_bit_or = (exclusiv_bit_or ^ logics[i]) if not exclusiv_bit_or: print("E: reduction(^:exclusiv_bit_or) with logics[1000/2]=1") result = False max_ = 0; logics[1000//2]=1 'omp parallel for schedule(dynamic,1) reduction(max:max_)' for i in range(0, 1000): max_ = max(max_, logics[i]) if not max_: print("E: reduction(max:max_) with logics[1000/2]=1") result = False min_ = 1; logics = [1 for _ in range(1000)] logics[1000//2] = 0 'omp parallel for schedule(dynamic,1) reduction(min:min_)' for i in range(0, 1000): min_ = min(min_, logics[i]) if min_: print("E: reduction(min:min_) with logics[1000/2]=1") result = False return result pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_parallel_sections_firstprivate.py000066400000000000000000000006501416264035500310720ustar00rootroot00000000000000def omp_parallel_sections_firstprivate(): sum = 7 sum0 = 11 if 'omp parallel sections firstprivate(sum0)': if 'omp section': if 'omp critical': sum += sum0 if 'omp section': if 'omp critical': sum += sum0 if 'omp section': if 'omp critical': sum += sum0 known_sum=11*3+7 return (known_sum==sum) pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_parallel_sections_lastprivate.py000066400000000000000000000015051416264035500307060ustar00rootroot00000000000000def omp_parallel_sections_lastprivate(): sum =0 sum0 = 0 i0 = -1 if 'omp parallel sections private(sum0) lastprivate(i0) private(i) num_threads(4)': if 'omp section': sum0 = 0 for i in range(1, 400): sum0 += i i0 = i if 'omp critical(lock)': sum += sum0 if 'omp section': sum0 = 0 for i in range(400, 700): sum0 += i i0 = i if 'omp critical(lock)': sum += sum0 if 'omp section': sum0 = 0 for i in range(700, 1000): sum0 += i i0 = i if 'omp critical(lock)': sum += sum0 known_sum = (999 * 1000) / 2 return known_sum == sum and i0 == 999 pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_parallel_sections_private.py000066400000000000000000000012551416264035500300240ustar00rootroot00000000000000def omp_parallel_sections_private(): sum = 7 sum0 = 0 if 'omp parallel sections private(sum0)': if 'omp section': sum0 = 0 for i in range(0, 400): sum0 += i if 'omp critical': sum += sum0 if 'omp section': sum0 = 0 for i in range(400, 700): sum0 += i if 'omp critical': sum += sum0 if 'omp section': sum0 = 0 for i in range(700, 1000): sum0 += i if 'omp critical': sum += sum0 known_sum = (999 * 1000) / 2 + 7; return known_sum == sum pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_parallel_sections_reduction.py000066400000000000000000000202251416264035500303440ustar00rootroot00000000000000def omp_parallel_sections_reduction(): import math dt = 0.5 rounding_error = 1.E-9 sum = 7 dsum = 0 dt = 1. / 3. result = True product = 1 logic_and = 1 logic_or = 0 bit_and = 1 bit_or = 0 i = 0 exclusiv_bit_or = 0 known_sum = (1000 * 999) / 2 + 7 if 'omp parallel sections private(i) reduction(+:sum)': if 'omp section': for i in range(1,300): sum += i if 'omp section': for i in range(300,700): sum += i if 'omp section': for i in range(700,1000): sum += i if known_sum != sum: print("E: reduction(+:sum)") result = False diff = (1000 * 999) / 2 if 'omp parallel sections private(i) reduction(-:diff)': if 'omp section': for i in range(1,300): diff -= i if 'omp section': for i in range(300,700): diff -= i if 'omp section': for i in range(700,1000): diff -= i if diff != 0: print("E: reduction(-:diff)") result = False dsum = 0 dpt = 0 for i in range(0, 20): dpt *= dt dknown_sum = (1 - dpt) / (1 - dt) if 'omp parallel sections private(i) reduction(+:dsum)': if 'omp section': for i in range(0,7): dsum += math.pow(dt, i) if 'omp section': for i in range(7,14): dsum += math.pow(dt, i) if 'omp section': for i in range(14,20): dsum += math.pow(dt, i) if abs(dsum-dknown_sum) > rounding_error: print("E: reduction(+:dsum)") result = False dsum = 0 dpt = 0 for i in range(0, 20): dpt *= dt ddiff = (1 - dpt) / (1 - dt) if 'omp parallel sections private(i) reduction(-:ddiff)': if 'omp section': for i in range(0,6): ddiff -= math.pow(dt, i) if 'omp section': for i in range(6,12): ddiff -= math.pow(dt, i) if 'omp section': for i in range(12,20): ddiff -= math.pow(dt, i) if abs(ddiff) > rounding_error: print("E: reduction(-:ddiff)") result = False if 'omp parallel sections private(i) reduction(*:product)': if 'omp section': for i in range(1,3): product *= i if 'omp section': for i in range(3,6): product *= i if 'omp section': for i in range(6,11): product *= i known_product = 3628800 if known_product != product: print("E: reduction(*:product)") result = False logics = [1 for i in range(0,1000)] if 'omp parallel sections private(i) reduction(&&:logic_and)': if 'omp section': for i in range(0, 300): logic_and = (logic_and and logics[i]) if 'omp section': for i in range(300, 700): logic_and = (logic_and and logics[i]) if 'omp section': for i in range(700, 1000): logic_and = (logic_and and logics[i]) if not logic_and: print("E: reduction(&&:logic_and)") result = False logic_and = 1; logics[1000//2]=0 if 'omp parallel sections private(i) reduction(&&:logic_and)': if 'omp section': for i in range(0, 300): logic_and = (logic_and and logics[i]) if 'omp section': for i in range(300, 700): logic_and = (logic_and and logics[i]) if 'omp section': for i in range(700, 1000): logic_and = (logic_and and logics[i]) if logic_and: print("E: reduction(&&:logic_and) with logics[1000/2]=0") result = False logics = [0 for i in range(0,1000)] if 'omp parallel sections private(i) reduction(||:logic_or)': if 'omp section': for i in range(0, 300): logic_or = (logic_or or logics[i]) if 'omp section': for i in range(300, 700): logic_or = (logic_or or logics[i]) if 'omp section': for i in range(700, 1000): logic_or = (logic_or or logics[i]) if logic_or: print("E: reduction(||:logic_or)") result = False logic_or = 0; logics[1000//2]=1 if 'omp parallel sections private(i) reduction(||:logic_or)': if 'omp section': for i in range(0, 300): logic_or = (logic_or or logics[i]) if 'omp section': for i in range(300, 700): logic_or = (logic_or or logics[i]) if 'omp section': for i in range(700, 1000): logic_or = (logic_or or logics[i]) if not logic_or: print("E: reduction(||:logic_or) with logics[1000/2]=1") result = False logics = [1 for i in range(0,1000)] if 'omp parallel sections private(i) reduction(&:bit_and)': if 'omp section': for i in range(0, 300): bit_and = (bit_and & logics[i]) if 'omp section': for i in range(300, 700): bit_and = (bit_and & logics[i]) if 'omp section': for i in range(700, 1000): bit_and = (bit_and & logics[i]) if not bit_and: print("E: reduction(&:bit_and)") result = False bit_and = 1; logics[1000//2]=0 if 'omp parallel sections private(i) reduction(&:bit_and)': if 'omp section': for i in range(0, 300): bit_and = (bit_and & logics[i]) if 'omp section': for i in range(300, 700): bit_and = (bit_and & logics[i]) if 'omp section': for i in range(700, 1000): bit_and = (bit_and & logics[i]) if bit_and: print("E: reduction(&:bit_and) with logics[1000/2]=0") result = False logics = [0 for i in range(0,1000)] if 'omp parallel sections private(i) reduction(|:bit_or)': if 'omp section': for i in range(0, 300): bit_or = (bit_or | logics[i]) if 'omp section': for i in range(300, 700): bit_or = (bit_or | logics[i]) if 'omp section': for i in range(700, 1000): bit_or = (bit_or | logics[i]) if bit_or: print("E: reduction(|:bit_or)") result = False bit_or = 0; logics[1000//2]=1 if 'omp parallel sections private(i) reduction(|:bit_or)': if 'omp section': for i in range(0, 300): bit_or = (bit_or | logics[i]) if 'omp section': for i in range(300, 700): bit_or = (bit_or | logics[i]) if 'omp section': for i in range(700, 1000): bit_or = (bit_or | logics[i]) if not bit_or: print("E: reduction(|:bit_or) with logics[1000/2]=1") result = False logics = [0 for i in range(0,1000)] if 'omp parallel sections private(i) reduction(^:exclusiv_bit_or)': if 'omp section': for i in range(0, 300): exclusiv_bit_or = (exclusiv_bit_or ^ logics[i]) if 'omp section': for i in range(300, 700): exclusiv_bit_or = (exclusiv_bit_or ^ logics[i]) if 'omp section': for i in range(700, 1000): exclusiv_bit_or = (exclusiv_bit_or ^ logics[i]) if exclusiv_bit_or: print("E: reduction(^:exclusiv_bit_or)") result = False exclusiv_bit_or = 0; logics[1000//2]=1 if 'omp parallel sections private(i) reduction(^:exclusiv_bit_or)': if 'omp section': for i in range(0, 300): exclusiv_bit_or = (exclusiv_bit_or ^ logics[i]) if 'omp section': for i in range(300, 700): exclusiv_bit_or = (exclusiv_bit_or ^ logics[i]) if 'omp section': for i in range(700, 1000): exclusiv_bit_or = (exclusiv_bit_or ^ logics[i]) if not exclusiv_bit_or: print("E: reduction(^:exclusiv_bit_or) with logics[1000/2]=1") result = False return result pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_parallel_shared.py000066400000000000000000000005071416264035500257100ustar00rootroot00000000000000def omp_parallel_shared(): sum = 0 LOOPCOUNT = 1000 known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2 if 'omp parallel shared(sum)': mysum = 0 'omp for' for i in range(1, LOOPCOUNT + 1): mysum += i if 'omp critical': sum += mysum return known_sum == sum pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_sections_firstprivate.py000066400000000000000000000007251416264035500272210ustar00rootroot00000000000000def omp_sections_firstprivate(): sum = 7 sum0 = 11 if 'omp parallel': if 'omp sections firstprivate(sum0)': if 'omp section': if 'omp critical': sum += sum0 if 'omp section': if 'omp critical': sum += sum0 if 'omp section': if 'omp critical': sum += sum0 known_sum=11*3+7 return (known_sum==sum) pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_sections_lastprivate.py000066400000000000000000000015661416264035500270410ustar00rootroot00000000000000def omp_sections_lastprivate(): sum =0 sum0 = 0 i0 = -1 if 'omp parallel': if 'omp sections private(sum0) lastprivate(i0)': if 'omp section': sum0 = 0 for i in range(1, 400): sum0 += i i0 = i if 'omp critical': sum += sum0 if 'omp section': sum0 = 0 for i in range(400, 700): sum0 += i i0 = i if 'omp critical': sum += sum0 if 'omp section': sum0 = 0 for i in range(700, 1000): sum0 += i i0 = i if 'omp critical': sum += sum0 known_sum = (999 * 1000) / 2 return known_sum == sum and i0 == 999 pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_sections_nowait.py000066400000000000000000000011521416264035500257730ustar00rootroot00000000000000def omp_sections_nowait(): import omp from time import sleep result = False count = 0 if 'omp parallel': in_parallel = omp.in_parallel() rank = omp.get_thread_num() if 'omp sections nowait': if 'omp section': sleep(0.01) count = 1 'omp flush(count)' if 'omp section': pass if 'omp sections': if 'omp section': pass if 'omp section': if count == 0: result = True return result or not in_parallel pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_sections_private.py000066400000000000000000000013761416264035500261540ustar00rootroot00000000000000def omp_sections_private(): sum = 7 sum0 = 0 if 'omp parallel': if 'omp sections private(sum0)': if 'omp section': sum0 = 0 for i in range(0, 400): sum0 += i if 'omp critical': sum += sum0 if 'omp section': sum0 = 0 for i in range(400, 700): sum0 += i if 'omp critical': sum += sum0 if 'omp section': sum0 = 0 for i in range(700, 1000): sum0 += i if 'omp critical': sum += sum0 known_sum = (999 * 1000) / 2 + 7; return known_sum == sum pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_sections_reduction.py000066400000000000000000000216661416264035500265020ustar00rootroot00000000000000def omp_sections_reduction(): import math dt = 0.5 rounding_error = 1.E-9 sum = 7 dsum = 0 dt = 1. / 3. result = True product = 1 logic_and = 1 logic_or = 0 bit_and = 1 bit_or = 0 i = 0 exclusiv_bit_or = 0 known_sum = (1000 * 999) / 2 + 7 if 'omp parallel': if 'omp sections private(i) reduction(+:sum)': if 'omp section': for i in range(1,300): sum += i if 'omp section': for i in range(300,700): sum += i if 'omp section': for i in range(700,1000): sum += i if known_sum != sum: print("E: reduction(+:sum)") result = False diff = (1000 * 999) / 2 if 'omp parallel': if 'omp sections private(i) reduction(-:diff)': if 'omp section': for i in range(1,300): diff -= i if 'omp section': for i in range(300,700): diff -= i if 'omp section': for i in range(700,1000): diff -= i if diff != 0: print("E: reduction(-:diff)") result = False dsum = 0 dpt = 0 for i in range(0, 20): dpt *= dt dknown_sum = (1 - dpt) / (1 - dt) if 'omp parallel': if 'omp sections private(i) reduction(+:dsum)': if 'omp section': for i in range(0,7): dsum += math.pow(dt, i) if 'omp section': for i in range(7,14): dsum += math.pow(dt, i) if 'omp section': for i in range(14,20): dsum += math.pow(dt, i) if abs(dsum-dknown_sum) > rounding_error: print("E: reduction(+:dsum)") result = False dsum = 0 dpt = 0 for i in range(0, 20): dpt *= dt ddiff = (1 - dpt) / (1 - dt) if 'omp parallel': if 'omp sections private(i) reduction(-:ddiff)': if 'omp section': for i in range(0,6): ddiff -= math.pow(dt, i) if 'omp section': for i in range(6,12): ddiff -= math.pow(dt, i) if 'omp section': for i in range(12,20): ddiff -= math.pow(dt, i) if abs(ddiff) > rounding_error: print("E: reduction(-:ddiff)") result = False if 'omp parallel': if 'omp sections private(i) reduction(*:product)': if 'omp section': for i in range(1,3): product *= i if 'omp section': for i in range(3,6): product *= i if 'omp section': for i in range(6,11): product *= i known_product = 3628800 if known_product != product: print("E: reduction(*:product)") result = False logics = [1 for i in range(0,1000)] if 'omp parallel': if 'omp sections private(i) reduction(&&:logic_and)': if 'omp section': for i in range(0, 300): logic_and = (logic_and and logics[i]) if 'omp section': for i in range(300, 700): logic_and = (logic_and and logics[i]) if 'omp section': for i in range(700, 1000): logic_and = (logic_and and logics[i]) if not logic_and: print("E: reduction(&&:logic_and)") result = False logic_and = 1; logics[1000//2]=0 if 'omp parallel': if 'omp sections private(i) reduction(&&:logic_and)': if 'omp section': for i in range(0, 300): logic_and = (logic_and and logics[i]) if 'omp section': for i in range(300, 700): logic_and = (logic_and and logics[i]) if 'omp section': for i in range(700, 1000): logic_and = (logic_and and logics[i]) if logic_and: print("E: reduction(&&:logic_and) with logics[1000/2]=0") result = False logics = [0 for i in range(0,1000)] if 'omp parallel': if 'omp sections private(i) reduction(||:logic_or)': if 'omp section': for i in range(0, 300): logic_or = (logic_or or logics[i]) if 'omp section': for i in range(300, 700): logic_or = (logic_or or logics[i]) if 'omp section': for i in range(700, 1000): logic_or = (logic_or or logics[i]) if logic_or: print("E: reduction(||:logic_or)") result = False logic_or = 0; logics[1000//2]=1 if 'omp parallel': if 'omp sections private(i) reduction(||:logic_or)': if 'omp section': for i in range(0, 300): logic_or = (logic_or or logics[i]) if 'omp section': for i in range(300, 700): logic_or = (logic_or or logics[i]) if 'omp section': for i in range(700, 1000): logic_or = (logic_or or logics[i]) if not logic_or: print("E: reduction(||:logic_or) with logics[1000/2]=1") result = False logics = [1 for i in range(0,1000)] if 'omp parallel': if 'omp sections private(i) reduction(&:bit_and)': if 'omp section': for i in range(0, 300): bit_and = (bit_and & logics[i]) if 'omp section': for i in range(300, 700): bit_and = (bit_and & logics[i]) if 'omp section': for i in range(700, 1000): bit_and = (bit_and & logics[i]) if not bit_and: print("E: reduction(&:bit_and)") result = False bit_and = 1; logics[1000//2]=0 if 'omp parallel': if 'omp sections private(i) reduction(&:bit_and)': if 'omp section': for i in range(0, 300): bit_and = (bit_and & logics[i]) if 'omp section': for i in range(300, 700): bit_and = (bit_and & logics[i]) if 'omp section': for i in range(700, 1000): bit_and = (bit_and & logics[i]) if bit_and: print("E: reduction(&:bit_and) with logics[1000/2]=0") result = False logics = [0 for i in range(0,1000)] if 'omp parallel': if 'omp sections private(i) reduction(|:bit_or)': if 'omp section': for i in range(0, 300): bit_or = (bit_or | logics[i]) if 'omp section': for i in range(300, 700): bit_or = (bit_or | logics[i]) if 'omp section': for i in range(700, 1000): bit_or = (bit_or | logics[i]) if bit_or: print("E: reduction(|:bit_or)") result = False bit_or = 0; logics[1000//2]=1 if 'omp parallel': if 'omp sections private(i) reduction(|:bit_or)': if 'omp section': for i in range(0, 300): bit_or = (bit_or | logics[i]) if 'omp section': for i in range(300, 700): bit_or = (bit_or | logics[i]) if 'omp section': for i in range(700, 1000): bit_or = (bit_or | logics[i]) if not bit_or: print("E: reduction(|:bit_or) with logics[1000/2]=1") result = False logics = [0 for i in range(0,1000)] if 'omp parallel': if 'omp sections private(i) reduction(^:exclusiv_bit_or)': if 'omp section': for i in range(0, 300): exclusiv_bit_or = (exclusiv_bit_or ^ logics[i]) if 'omp section': for i in range(300, 700): exclusiv_bit_or = (exclusiv_bit_or ^ logics[i]) if 'omp section': for i in range(700, 1000): exclusiv_bit_or = (exclusiv_bit_or ^ logics[i]) if exclusiv_bit_or: print("E: reduction(^:exclusiv_bit_or)") result = False exclusiv_bit_or = 0; logics[1000//2]=1 if 'omp parallel': if 'omp sections private(i) reduction(^:exclusiv_bit_or)': if 'omp section': for i in range(0, 300): exclusiv_bit_or = (exclusiv_bit_or ^ logics[i]) if 'omp section': for i in range(300, 700): exclusiv_bit_or = (exclusiv_bit_or ^ logics[i]) if 'omp section': for i in range(700, 1000): exclusiv_bit_or = (exclusiv_bit_or ^ logics[i]) if not exclusiv_bit_or: print("E: reduction(^:exclusiv_bit_or) with logics[1000/2]=1") result = False return result pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_single.py000066400000000000000000000007251416264035500240510ustar00rootroot00000000000000def omp_single(): nr_threads_in_single = 0 result = 0 nr_iterations = 0 LOOPCOUNT = 1000 if 'omp parallel': for i in range(LOOPCOUNT): if 'omp single': 'omp flush' nr_threads_in_single += 1 'omp flush' nr_iterations += 1 nr_threads_in_single -= 1 result += nr_threads_in_single return result == 0 and nr_iterations == LOOPCOUNT pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_single_copyprivate.py000066400000000000000000000006351416264035500264760ustar00rootroot00000000000000def omp_single_copyprivate(): result = 0 nr_iterations = 0 LOOPCOUNT = 1000 j = 0 if 'omp parallel private(j)': for i in range(LOOPCOUNT): if 'omp single copyprivate(j)': nr_iterations += 1 j = i if 'omp critical': result += j - i 'omp barrier' return result == 0 and nr_iterations == LOOPCOUNT pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_single_nowait.py000066400000000000000000000011121416264035500254210ustar00rootroot00000000000000def omp_single_nowait(): total_iterations = 0 nr_iterations = 0 LOOPCOUNT = 1000 i = 0 if 'omp parallel private(i)': for i in range(LOOPCOUNT): if 'omp single nowait': 'omp atomic' nr_iterations += 1 if 'omp parallel private(i)': my_iterations = 0 for i in range(LOOPCOUNT): if 'omp single nowait': my_iterations += 1 if 'omp critical': total_iterations += my_iterations return nr_iterations == LOOPCOUNT and total_iterations == LOOPCOUNT pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_single_private.py000066400000000000000000000013151416264035500255770ustar00rootroot00000000000000import omp def omp_single_private(): nr_threads_in_single = 0 nr_iterations = 0 result = 0 LOOPCOUNT = 1000 if 'omp parallel': use_parallel = omp.in_parallel() myresult = 0 myit = 0 for i in range(LOOPCOUNT): if 'omp single private(nr_threads_in_single)': nr_threads_in_single = 0 'omp flush' nr_threads_in_single += 1 'omp flush' myit += 1 myresult += nr_threads_in_single if 'omp critical': result += nr_threads_in_single nr_iterations += myit return not use_parallel or (result == 0 and nr_iterations == LOOPCOUNT) pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_task.py000066400000000000000000000007021416264035500235250ustar00rootroot00000000000000def omp_task(): import omp from time import sleep NUM_TASKS = 25 tids = list(range(NUM_TASKS)) if 'omp parallel': use_parallel = omp.in_parallel() for i in range(NUM_TASKS): myi = i if 'omp task': sleep(0.01) tids[myi] = omp.get_thread_num() for i in range(NUM_TASKS): if tids[0] != tids[i]: return True return not use_parallel pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_task_final.py000066400000000000000000000012271416264035500247010ustar00rootroot00000000000000import unittest unittest.skip #final semble ne pas fonctionner def omp_task_final(): import omp from time import sleep error = 0 NUM_TASKS = 25 tids = list(range(NUM_TASKS)) if 'omp parallel': if 'omp single': for i in range(NUM_TASKS): myi = i if 'omp task final(i>=10) private(k) firstprivate(myi)': sleep(0.01) tids[myi] = omp.get_thread_num() 'omp taskwait' for i in range(10, NUM_TASKS): if tids[10] != tids[i]: print(i, tids[10], tids[i]) error += 1 print(error) return error == 0 pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_task_firstprivate.py000066400000000000000000000011701416264035500263270ustar00rootroot00000000000000import omp def omp_task_firstprivate(): sum = 1234 result = 0 LOOPCOUNT = 1000 NUM_TASKS = 25 known_sum = 1234 + (LOOPCOUNT * (LOOPCOUNT + 1)) / 2 if 'omp parallel': in_parallel = omp.in_parallel() if 'omp single': for i in range(NUM_TASKS): if 'omp task firstprivate(sum)': for j in range(LOOPCOUNT + 1): 'omp flush' sum += j if sum != known_sum: if 'omp critical': result += 1 return result == 0 or not in_parallel pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_task_if.py000066400000000000000000000005041416264035500242030ustar00rootroot00000000000000def omp_task_if(condition_false=False): from time import sleep count = 0 result = 0 if 'omp parallel': if 'omp single': if 'omp task if(condition_false) shared(count, result)': sleep(0.5) result = int(count == 0) count = 1 return result pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_task_imp_firstprivate.py000066400000000000000000000011221416264035500271710ustar00rootroot00000000000000import omp def omp_task_imp_firstprivate(): i = 5 k = 0 result = False NUM_TASKS = 25 task_result = True if 'omp parallel firstprivate(i)': in_parallel = omp.in_parallel() if 'omp single': for k in range(NUM_TASKS): if 'omp task shared(result, task_result)': if i != 5: task_result = False for j in range(0, NUM_TASKS): i += 1 'omp taskwait' result = task_result and i == 5 return result or not in_parallel pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_task_private.py000066400000000000000000000011311416264035500252540ustar00rootroot00000000000000def omp_task_private(): sum = 0 result = 0 LOOPCOUNT = 1000 NUM_TASKS = 25 known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2 if 'omp parallel': if 'omp single': for i in range(0, NUM_TASKS): if 'omp task private(sum) shared(result, known_sum)': sum = 0 for j in range(0, LOOPCOUNT + 1): 'omp flush' sum += j if sum != known_sum: if 'omp critical': result += 1 return result == 0 pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_task_shared.py000066400000000000000000000004731416264035500250600ustar00rootroot00000000000000def omp_task_shared(): i = 0 k = 0 result = 0 NUM_TASKS = 25 if 'omp parallel': if 'omp single': for k in range(0, NUM_TASKS): if 'omp task shared(i)': 'omp atomic' i += 1 result = i return result == NUM_TASKS pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_task_untied.py000066400000000000000000000014611416264035500251000ustar00rootroot00000000000000def omp_task_untied(): import omp from time import sleep NUM_TASKS = 25 start_id = [0 for _ in range(NUM_TASKS)] current_id = [0 for _ in range(NUM_TASKS)] count = 0 if 'omp parallel': use_parallel = omp.in_parallel() if 'omp single': for i in range(NUM_TASKS): myi = i if 'omp task firstprivate(myi) untied': sleep(0.01) start_id[myi] = omp.get_thread_num() 'omp taskwait' if start_id[myi] % 2 != 0: sleep(0.01) current_id[myi] = omp.get_thread_num() for i in range(NUM_TASKS): if current_id[i] == start_id[i]: count += 1 return count < NUM_TASKS or not use_parallel pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_taskwait.py000066400000000000000000000014101416264035500244070ustar00rootroot00000000000000def omp_taskwait(): from time import sleep result1 = 0 result2 = 0 NUM_TASKS = 25 array = [0 for _ in range(NUM_TASKS)] if 'omp parallel': if 'omp single': for i in range(NUM_TASKS): myi = i if 'omp task firstprivate(myi)': sleep(0.01) array[myi] = 1 'omp taskwait' for i in range(NUM_TASKS): if array[i] != 1: result1 += 1 for i in range(NUM_TASKS): myi = i if 'omp task firstprivate(myi)': array[myi] = 2 for i in range(NUM_TASKS): if array[i] != 2: result2 += 1 return result1 == 0 and result2 == 0 pythran-0.10.0+ds2/pythran/tests/openmp.legacy/omp_taskyield.py000066400000000000000000000014471416264035500245630ustar00rootroot00000000000000def omp_taskyield(): import omp from time import sleep NUM_TASKS = 25 count = 0 start_id = [0 for _ in range(NUM_TASKS)] current_id = [0 for _ in range(NUM_TASKS)] if 'omp parallel': use_omp = omp.in_parallel() if 'omp single': for i in range(NUM_TASKS): myi = i if 'omp task firstprivate(myi) untied': sleep(0.01) start_id[myi] = omp.get_thread_num() 'omp taskyield' if start_id[myi] % 2 == 0: sleep(0.01) current_id[myi] = omp.get_thread_num() for i in range(NUM_TASKS): if current_id[i] == start_id[i]: count += 1 return count < NUM_TASKS or not use_omp pythran-0.10.0+ds2/pythran/tests/openmp.legacy/pythran_collapse.py000066400000000000000000000006211416264035500252570ustar00rootroot00000000000000import numpy as np def collapse_bug(x, m, btx): n = int(np.log2(m)) lv = np.zeros((x.shape[0], n)) N = x.shape[0] #omp parallel for collapse(2) for i in range(N): for k in range(n): lv[i,k] = abs(x[i]- btx[k]) return lv def pythran_collapse(): from random import randint n = randint(50, 50) x = np.ones(n) return collapse_bug(x, 5, 3. * x) pythran-0.10.0+ds2/pythran/tests/openmp.legacy/pythran_forward_substitution.py000066400000000000000000000004151416264035500277560ustar00rootroot00000000000000import numpy as np def aaa(a): grid = np.empty((a, )) # omp parallel for private(i, j) for i in range(a): j = i * i grid[i] = j return grid def pythran_forward_substitution(): from random import randint return aaa(randint(5,5)) pythran-0.10.0+ds2/pythran/tests/openmp.legacy/pythran_private.py000066400000000000000000000004561416264035500251350ustar00rootroot00000000000000 import numpy as np def aaa(a): grid = np.empty((a, )) # omp parallel for private(i, j) for i in range(a): if i % 2: j = i * i else: j = i * i *3 grid[i] = j def pythran_private(): from random import randint return aaa(randint(5,5)) pythran-0.10.0+ds2/pythran/tests/pydata/000077500000000000000000000000001416264035500200605ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/tests/pydata/compute_mask.py000066400000000000000000000203011416264035500231150ustar00rootroot00000000000000#pythran export compute_mask(int[:,:], int[:,:]) #runas import numpy as np; coords = np.array([[0, 0, 1, 1, 2, 2]]); indices = np.array([[0, 3, 2]]); compute_mask(coords, indices) import numpy as np def compute_mask(coords, indices): # pragma: no cover """ Gets the mask for the coords given the indices in slice format. Works with either start-stop ranges of matching indices into coords called "pairs" (start-stop pairs) or filters the mask directly, based on which is faster. Exploits the structure in sorted coords, which is that for a constant value of coords[i - 1], coords[i - 2] and so on, coords[i] is sorted. Concretely, ``coords[i, coords[i - 1] == v1 & coords[i - 2] = v2, ...]`` is always sorted. It uses this sortedness to find sub-pairs for each dimension given the previous, and so on. This is efficient for small slices or ints, but not for large ones. After it detects that working with pairs is rather inefficient (or after going through each possible index), it constructs a filtered mask from the start-stop pairs. Parameters ---------- coords : np.ndarray The coordinates of the array. indices : np.ndarray The indices in the form of slices such that indices[:, 0] are starts, indices[:, 1] are stops and indices[:, 2] are steps. Returns ------- mask : np.ndarray The starts and stops in the mask. is_slice : bool Whether or not the array represents a continuous slice. Examples -------- Let's create some mock coords and indices >>> import numpy as np >>> coords = np.array([[0, 0, 1, 1, 2, 2]]) >>> indices = np.array([[0, 3, 2]]) # Equivalent to slice(0, 3, 2) Now let's get the mask. Notice that the indices of ``0`` and ``2`` are matched. >>> _compute_mask(coords, indices) (array([0, 1, 4, 5]), False) Now, let's try with a more "continuous" slice. Matches ``0`` and ``1``. >>> indices = np.array([[0, 2, 1]]) >>> _compute_mask(coords, indices) (array([0, 4]), True) This is equivalent to mask being ``slice(0, 4, 1)``. """ # Set the initial mask to be the entire range of coordinates. starts = [0] stops = [coords.shape[1]] n_matches = coords.shape[1] i = 0 while i < len(indices): # Guesstimate whether working with pairs is more efficient or # working with the mask directly. # One side is the estimate of time taken for binary searches # (n_searches * log(avg_length)) # The other is an estimated time of a linear filter for the mask. n_pairs = len(starts) n_current_slices = _get_slice_len(indices[i]) * n_pairs + 2 if n_current_slices * np.log(n_current_slices / max(n_pairs, 1)) > \ n_matches + n_pairs: break # For each of the pairs, search inside the coordinates for other # matching sub-pairs. # This gets the start-end coordinates in coords for each 'sub-array' # Which would come out of indexing a single integer. starts, stops, n_matches = _get_mask_pairs(starts, stops, coords[i], indices[i]) i += 1 # Combine adjacent pairs starts, stops = _join_adjacent_pairs(starts, stops) # If just one pair is left over, treat it as a slice. if i == len(indices) and len(starts) == 1: return np.array([starts[0], stops[0]]), True # Convert start-stop pairs into mask, filtering by remaining # coordinates. mask = _filter_pairs(starts, stops, coords[i:], indices[i:]) return np.array(mask, dtype=np.intp), False def _get_slice_len(idx): """ Get the number of elements in a slice. Parameters ---------- idx : np.ndarray A (3,) shaped array containing start, stop, step Returns ------- n : int The length of the slice. Examples -------- >>> idx = np.array([5, 15, 5]) >>> _get_slice_len(idx) 2 """ start, stop, step = idx[0], idx[1], idx[2] if step > 0: return (stop - start + step - 1) // step else: return (start - stop - step - 1) // (-step) def _get_mask_pairs(starts_old, stops_old, c, idx): # pragma: no cover """ Gets the pairs for a following dimension given the pairs for a dimension. For each pair, it searches in the following dimension for matching coords and returns those. The total combined length of all pairs is returned to help with the performance guesstimate. Parameters ---------- starts_old, stops_old : list[int] The starts and stops from the previous index. c : np.ndarray The coords for this index's dimension. idx : np.ndarray The index in the form of a slice. idx[0], idx[1], idx[2] = start, stop, step Returns ------- starts, stops: list The starts and stops after applying the current index. n_matches : int The sum of elements in all ranges. Examples -------- >>> c = np.array([1, 2, 1, 2, 1, 1, 2, 2]) >>> starts_old = [4] >>> stops_old = [8] >>> idx = np.array([1, 2, 1]) >>> _get_mask_pairs(starts_old, stops_old, c, idx) ([4], [6], 2) """ starts = [] stops = [] n_matches = 0 for j in range(len(starts_old)): # For each matching "integer" in the slice, search within the "sub-coords" # Using binary search. for p_match in range(idx[0], idx[1], idx[2]): start = np.searchsorted(c[starts_old[j]:stops_old[j]], p_match) + starts_old[j] stop = np.searchsorted(c[starts_old[j]:stops_old[j]], p_match + 1) + starts_old[j] if start != stop: starts.append(start) stops.append(stop) n_matches += stop - start return starts, stops, n_matches def _join_adjacent_pairs(starts_old, stops_old): # pragma: no cover """ Joins adjacent pairs into one. For example, 2-5 and 5-7 will reduce to 2-7 (a single pair). This may help in returning a slice in the end which could be faster. Parameters ---------- starts_old, stops_old : list[int] The input starts and stops Returns ------- starts, stops : list[int] The reduced starts and stops. Examples -------- >>> starts = [2, 5] >>> stops = [5, 7] >>> _join_adjacent_pairs(starts, stops) ([2], [7]) """ if len(starts_old) <= 1: return starts_old, stops_old starts = [starts_old[0]] stops = [] for i in range(1, len(starts_old)): if starts_old[i] != stops_old[i - 1]: starts.append(starts_old[i]) stops.append(stops_old[i - 1]) stops.append(stops_old[-1]) return starts, stops def _filter_pairs(starts, stops, coords, indices): # pragma: no cover """ Converts all the pairs into a single integer mask, additionally filtering by the indices. Parameters ---------- starts, stops : list[int] The starts and stops to convert into an array. coords : np.ndarray The coordinates to filter by. indices : np.ndarray The indices in the form of slices such that indices[:, 0] are starts, indices[:, 1] are stops and indices[:, 2] are steps. Returns ------- mask : list The output integer mask. Examples -------- >>> import numpy as np >>> starts = [2] >>> stops = [7] >>> coords = np.array([[0, 1, 2, 3, 4, 5, 6, 7]]) >>> indices = np.array([[2, 8, 2]]) # Start, stop, step pairs >>> _filter_pairs(starts, stops, coords, indices) [2, 4, 6] """ mask = [] # For each pair, for i in range(len(starts)): # For each element match within the pair range for j in range(starts[i], stops[i]): match = True # Check if it matches all indices for k in range(len(indices)): idx = indices[k] elem = coords[k, j] match &= ((elem - idx[0]) % idx[2] == 0 and ((idx[2] > 0 and idx[0] <= elem < idx[1]) or (idx[2] < 0 and idx[0] >= elem > idx[1]))) # and append to the mask if so. if match: mask.append(j) return mask pythran-0.10.0+ds2/pythran/tests/pydata/matcharray.py000066400000000000000000000024111416264035500225630ustar00rootroot00000000000000#pythran export match_arrays (float32[], float32[]) #pythran export match_arrays (int16[], int16[]) #runas import numpy as np; x = np.array([1,2,3], dtype=np.int16); y = np.array([3,4,5], dtype=np.int16); match_arrays(x, y) import numpy as np def match_arrays(a, b): # pragma: no cover """ Finds all indexes into a and b such that a[i] = b[j]. The outputs are sorted in lexographical order. Parameters ---------- a, b : np.ndarray The input 1-D arrays to match. If matching of multiple fields is needed, use np.recarrays. These two arrays must be sorted. Returns ------- a_idx, b_idx : np.ndarray The output indices of every possible pair of matching elements. """ if len(a) == 0 or len(b) == 0: return np.empty(0, dtype=np.uintp), np.empty(0, dtype=np.uintp) a_ind, b_ind = [], [] nb = len(b) ib = 0 match = 0 for ia, j in enumerate(a): if j == b[match]: ib = match while ib < nb and j >= b[ib]: if j == b[ib]: a_ind.append(ia) b_ind.append(ib) if b[match] < b[ib]: match = ib ib += 1 return np.array(a_ind, dtype=np.uintp), np.array(b_ind, dtype=np.uintp) pythran-0.10.0+ds2/pythran/tests/rosetta/000077500000000000000000000000001416264035500202575ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/tests/rosetta/100doors.py000066400000000000000000000017221416264035500222020ustar00rootroot00000000000000#from http://rosettacode.org/wiki/100_doors#Python #pythran export unoptimized() #runas unoptimized() #pythran export optimized() #runas optimized() #pythran export one_liner_list_comprehension() #runas one_liner_list_comprehension() #pythran export one_liner_generator_comprehension() #runas one_liner_generator_comprehension() def unoptimized(): doors = [False] * 100 for i in range(100): for j in range(i, 100, i+1): doors[j] = not doors[j] print("Door %d:" % (i+1), 'open' if doors[i] else 'close') def optimized(): for i in range(1, 101): root = i ** 0.5 print("Door %d:" % i, 'open' if root == int(root) else 'close') def one_liner_list_comprehension(): print('\n'.join(['Door %s is %s' % (i, ['closed', 'open'][(i**0.5).is_integer()]) for i in range(1, 10001)])) def one_liner_generator_comprehension(): print('\n'.join('Door %s is %s' % (i, 'closed' if i**0.5 % 1 else 'open') for i in range(1, 101))) pythran-0.10.0+ds2/pythran/tests/rosetta/ackermann.py000066400000000000000000000013661416264035500225760ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Ackermann_function#Python #pythran export ack1(int, int) #pythran export ack2(int, int) #pythran export ack3(int, int) #runas ack1(2, 2) #runas ack2(2, 1) #runas ack3(1, 2) def ack1(M, N): return (N + 1) if M == 0 else ( ack1(M-1, 1) if N == 0 else ack1(M-1, ack1(M, N-1))) def ack2(M, N): if M == 0: return N + 1 elif N == 0: return ack1(M - 1, 1) else: return ack1(M - 1, ack1(M, N - 1)) def ack3(M, N): return (N + 1) if M == 0 else ( (N + 2) if M == 1 else ( (2*N + 3) if M == 2 else ( (8*(int(2**N) - 1) + 5) if M == 3 else ( # int conversion is pythran-specific ack2(M-1, 1) if N == 0 else ack2(M-1, ack2(M, N-1)))))) pythran-0.10.0+ds2/pythran/tests/rosetta/appolonius.py000066400000000000000000000026351416264035500230300ustar00rootroot00000000000000import math #from http://rosettacode.org/wiki/Problem_of_Apollonius#Python #pythran export solveApollonius((float, float, float), (float, float, float), (float, float, float), int, int, int) #runas c1, c2, c3 = (0., 0., 1.), (4., 0., 1.), (2., 4., 2.); solveApollonius(c1, c2, c3, 1, 1, 1) #runas c1, c2, c3 = (0., 0., 1.), (4., 0., 1.), (2., 4., 2.); solveApollonius(c1, c2, c3, -1, -1, -1) def solveApollonius(c1, c2, c3, s1, s2, s3): ''' >>> solveApollonius((0, 0, 1), (4, 0, 1), (2, 4, 2), 1,1,1) >>> solveApollonius((0, 0, 1), (4, 0, 1), (2, 4, 2), -1,-1,-1) ''' x1, y1, r1 = c1 x2, y2, r2 = c2 x3, y3, r3 = c3 v11 = 2*x2 - 2*x1 v12 = 2*y2 - 2*y1 v13 = x1*x1 - x2*x2 + y1*y1 - y2*y2 - r1*r1 + r2*r2 v14 = 2*s2*r2 - 2*s1*r1 v21 = 2*x3 - 2*x2 v22 = 2*y3 - 2*y2 v23 = x2*x2 - x3*x3 + y2*y2 - y3*y3 - r2*r2 + r3*r3 v24 = 2*s3*r3 - 2*s2*r2 w12 = v12/v11 w13 = v13/v11 w14 = v14/v11 w22 = v22/v21-w12 w23 = v23/v21-w13 w24 = v24/v21-w14 P = -w23/w22 Q = w24/w22 M = -w12*P-w13 N = w14 - w12*Q a = N*N + Q*Q - 1 b = 2*M*N - 2*N*x1 + 2*P*Q - 2*Q*y1 + 2*s1*r1 c = x1*x1 + M*M - 2*M*x1 + P*P + y1*y1 - 2*P*y1 - r1*r1 # Find a root of a quadratic equation. This requires the circle centers not to be e.g. colinear D = b*b-4*a*c rs = (-b-math.sqrt(D))/(2*a) xs = M+N*rs ys = P+Q*rs return (xs, ys, rs) pythran-0.10.0+ds2/pythran/tests/rosetta/array_concatenation.py000066400000000000000000000010301416264035500246460ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Array_concatenation#Python #pythran export array_concatenation(int list, int list, int list, int list, int list) #runas arr1 = [1, 2, 3]; arr2 = [4, 5, 6]; arr3 = [7, 8, 9]; arr5 = [4, 5, 6]; arr6 = [7, 8, 9]; array_concatenation(arr1, arr2, arr3, arr5, arr6) def array_concatenation(arr1, arr2, arr3, arr5, arr6): arr4 = arr1 + arr2 assert arr4 == [1, 2, 3, 4, 5, 6] arr4.extend(arr3) assert arr4 == [1, 2, 3, 4, 5, 6, 7, 8, 9] arr6 += arr5 assert arr6 == [7, 8, 9, 4, 5, 6] pythran-0.10.0+ds2/pythran/tests/rosetta/average_loop_length.py000066400000000000000000000012011416264035500246270ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Average_loop_length#Python #pythran export analytical(int) #pythran export testing(int, int) #runas analytical(5) #runas avg = testing(10, 10**5); theory = analytical(10); abs((avg / theory - 1)) < 0.01 from math import factorial from random import randrange def analytical(n): return sum(factorial(n) / pow(n, i) / float(factorial(n - i)) for i in range(1, n+1)) def testing(n, times): count = 0 for i in range(times): x, bits = 1, 0 while not (bits & x): count += 1 bits |= x x = 1 << randrange(n) return count / times pythran-0.10.0+ds2/pythran/tests/rosetta/exponential.py000066400000000000000000000010531416264035500231560ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Generator/Exponential#Python #pythran export test(int, int) #runas test(2, 3) #FIXME unittest.skip from itertools import islice, count def powers(m): for n in count(): yield n ** m def filtered(s1, s2): v, f = next(s1), next(s2) while True: if v > f: f = next(s2) continue elif v < f: yield v v = next(s1) def test(sq, cu): squares, cubes = powers(sq), powers(cu) f = filtered(squares, cubes) return list(islice(f, 20, 30)) pythran-0.10.0+ds2/pythran/tests/rosetta/gamma_function.py000066400000000000000000000023001416264035500236130ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Gamma_function#Python #pythran export test(int) #FIXME unittest.skip #runas test(11) def test(end): _a = [ 1.00000000000000000000, 0.57721566490153286061, -0.65587807152025388108, -0.04200263503409523553, 0.16653861138229148950, -0.04219773455554433675, -0.00962197152787697356, 0.00721894324666309954, -0.00116516759185906511, -0.00021524167411495097, 0.00012805028238811619, -0.00002013485478078824, -0.00000125049348214267, 0.00000113302723198170, -0.00000020563384169776, 0.00000000611609510448, 0.00000000500200764447, -0.00000000118127457049, 0.00000000010434267117, 0.00000000000778226344, -0.00000000000369680562, 0.00000000000051003703, -0.00000000000002058326, -0.00000000000000534812, 0.00000000000000122678, -0.00000000000000011813, 0.00000000000000000119, 0.00000000000000000141, -0.00000000000000000023, 0.00000000000000000002 ] def gamma (x): y = float(x) - 1.0; sm = _a[-1]; for an in _a[-2::-1]: sm = sm * y + an; return 1.0 / sm; return [ gamma(i/3.0) for i in range(1,end)] pythran-0.10.0+ds2/pythran/tests/rosetta/generic_swap.py000066400000000000000000000002131416264035500232730ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Generic_swap#Python #pythran export swap(str, int) #runas swap("e", 15) def swap(a, b): return b, a pythran-0.10.0+ds2/pythran/tests/rosetta/gray_code.py000066400000000000000000000017241416264035500225710ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Gray_code #pythran export bin2gray(int list) #pythran export gray2bin(int list) #pythran export int2bin(int) #pythran export bin2int(int list) #runas [int2bin(i) for i in range(16)] #runas [bin2gray(int2bin(i)) for i in range(16)] #runas [gray2bin(bin2gray(int2bin(i))) for i in range(16)] #runas [bin2int(gray2bin(bin2gray(int2bin(i)))) for i in range(16)] def bin2gray(bits): return bits[:1] + [i ^ ishift for i, ishift in zip(bits[:-1], bits[1:])] def gray2bin(bits): b = [bits[0]] for nextb in bits[1:]: b.append(b[-1] ^ nextb) return b def int2bin(n): 'From positive integer to list of binary bits, msb at index 0' if n: bits = [] while n: n,remainder = divmod(n, 2) bits.insert(0, remainder) return bits else: return [0] def bin2int(bits): 'From binary bits, msb at index 0 to integer' i = 0 for bit in bits: i = i * 2 + bit return i pythran-0.10.0+ds2/pythran/tests/rosetta/greatest_common_divisor.py000066400000000000000000000015041416264035500255560ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Greatest_common_divisor#Python #pythran export gcd_iter(int, int) #pythran export gcd(int, int) #pythran export gcd_bin(int, int) #runas gcd_iter(40902, 24140) #runas gcd(40902, 24140) #runas gcd_bin(40902, 24140) def gcd_iter(u, v): while v: u, v = v, u % v return abs(u) def gcd(u, v): return gcd(v, u % v) if v else abs(u) def gcd_bin(u, v): u, v = abs(u), abs(v) # u >= 0, v >= 0 if u < v: u, v = v, u # u >= v >= 0 if v == 0: return u # u >= v > 0 k = 1 while u & 1 == 0 and v & 1 == 0: # u, v - even u >>= 1; v >>= 1 k <<= 1 t = -v if u & 1 else u while t: while t & 1 == 0: t >>= 1 if t > 0: u = t else: v = -t t = u - v return u * k pythran-0.10.0+ds2/pythran/tests/rosetta/greatest_element_of_a_list.py000066400000000000000000000003771416264035500262060ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Greatest_element_of_a_list#Python #pythran export test(str list) #runas test(['1\n', ' 2.3\n', '4.5e-1\n', '0.01e4\n', '-1.2']) #FIXME unittest.skip def test(floatstrings): return max(float(x) for x in floatstrings) pythran-0.10.0+ds2/pythran/tests/rosetta/greatest_subsequential_sum.py000066400000000000000000000030051416264035500262750ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Greatest_subsequential_sum#Python #pythran export maxsum(int list) #pythran export maxsumseq(int list) #pythran export maxsumit(int list) #runas maxsum([0, 1, 0]) #runas maxsumseq([-1, 2, -1, 3, -1]) #runas maxsumit([-1, 1, 2, -5, -6]) def maxsum(sequence): """Return maximum sum.""" maxsofar, maxendinghere = 0, 0 for x in sequence: # invariant: ``maxendinghere`` and ``maxsofar`` are accurate for ``x[0..i-1]`` maxendinghere = max(maxendinghere + x, 0) maxsofar = max(maxsofar, maxendinghere) return maxsofar def maxsumseq(sequence): start, end, sum_start = -1, -1, -1 maxsum_, sum_ = 0, 0 for i, x in enumerate(sequence): sum_ += x if maxsum_ < sum_: # found maximal subsequence so far maxsum_ = sum_ start, end = sum_start, i elif sum_ < 0: # start new sequence sum_ = 0 sum_start = i assert maxsum_ == maxsum(sequence) assert maxsum_ == sum(sequence[start + 1:end + 1]) return sequence[start + 1:end + 1] def maxsumit(iterable): maxseq = seq = [] start, end, sum_start = -1, -1, -1 maxsum_, sum_ = 0, 0 for i, x in enumerate(iterable): seq.append(x); sum_ += x if maxsum_ < sum_: maxseq = seq; maxsum_ = sum_ start, end = sum_start, i elif sum_ < 0: seq = []; sum_ = 0 sum_start = i assert maxsum_ == sum(maxseq[:end - start]) return maxseq[:end - start] pythran-0.10.0+ds2/pythran/tests/rosetta/hailstone_sequence.py000066400000000000000000000004501416264035500245060ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Hailstone_sequence#Python #pythran export hailstone(int) #runas hailstone(27) #runas max((len(hailstone(i)), i) for i in range(1,100000)) def hailstone(n): seq = [n] while n>1: n = 3*n + 1 if n & 1 else n//2 seq.append(n) return seq pythran-0.10.0+ds2/pythran/tests/rosetta/hamming_numbers.py000066400000000000000000000031301416264035500240010ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Hamming_numbers#Python #pythran export test(int, int) #runas test(20, 1690) #FIXME unittest.skip from itertools import islice def hamming2(): '''\ This version is based on a snippet from: http://dobbscodetalk.com/index.php?option=com_content&task=view&id=913&Itemid=85 When expressed in some imaginary pseudo-C with automatic unlimited storage allocation and BIGNUM arithmetics, it can be expressed as: hamming = h where array h; n=0; h[0]=1; i=0; j=0; k=0; x2=2*h[ i ]; x3=3*h[j]; x5=5*h[k]; repeat: h[++n] = min(x2,x3,x5); if (x2==h[n]) { x2=2*h[++i]; } if (x3==h[n]) { x3=3*h[++j]; } if (x5==h[n]) { x5=5*h[++k]; } ''' h = 1 _h=[h] # memoized multipliers = [2, 3, 5] multindeces = [0 for i in multipliers] # index into _h for multipliers multvalues = [x * _h[i] for x,i in zip(multipliers, multindeces)] yield h while True: h = min(multvalues) _h.append(h) for (n,(v,x,i)) in enumerate(zip(multvalues, multipliers, multindeces)): if v == h: i += 1 multindeces[n] = i multvalues[n] = x * _h[i] # cap the memoization mini = min(multindeces) if mini >= 1000: del _h[:mini] multindeces = [i - mini for i in multindeces] # yield h def test(v1, v2): return list(islice(hamming2(), v1)), list(islice(hamming2(), v2, v2 + 1)) pythran-0.10.0+ds2/pythran/tests/rosetta/happy_numbers.py000066400000000000000000000004611416264035500235060ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Happy_numbers#Python #pythran export happy(int) #runas [x for x in range(500) if happy(x)][:8] def happy(n): past = set() while n != 1: n = sum(int(i)**2 for i in str(n)) if n in past: return False past.add(n) return True pythran-0.10.0+ds2/pythran/tests/rosetta/harshad_or_niven_series.py000066400000000000000000000011711416264035500255140ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Harshad_or_Niven_series#Python #pythran export test() #runas test() #FIXME unittest.skip import itertools def harshad(): for n in itertools.count(1): if n % sum(int(ch) for ch in str(n)) == 0: yield n def test(): l = list(itertools.islice(harshad(), 0, 20)) for n in harshad(): if n > 1000: r = n break from itertools import count, islice harshad_ = (n for n in count(1) if n % sum(int(ch) for ch in str(n)) == 0) l2 = list(islice(harshad_, 0, 20)) r2 = next(x for x in harshad_ if x > 1000) return r,l, r2, l2 pythran-0.10.0+ds2/pythran/tests/rosetta/palindrome.py000066400000000000000000000014151416264035500227640ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Palindrome_detection#Python #pythran export test() #runas test() def is_palindrome(s): return s == s[::-1] def is_palindrome_r2(s): return not s or s[0] == s[-1] and is_palindrome_r2(s[1:-1]) def is_palindrome_r(s): if len(s) <= 1: return True elif s[0] != s[-1]: return False else: return is_palindrome_r(s[1:-1]) def test_(f, good, bad): if all(f(x) for x in good) and not any(f(x) for x in bad): print('function passed all %d tests' % (len(good)+len(bad))) def test(): pals = ['', 'a', 'aa', 'aba', 'abba'] notpals = ['aA', 'abA', 'abxBa', 'abxxBa'] test_(is_palindrome, pals, notpals) test_(is_palindrome_r, pals, notpals) test_(is_palindrome_r2, pals, notpals) pythran-0.10.0+ds2/pythran/tests/rosetta/pangram.py000066400000000000000000000005461416264035500222630ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Pangram_checker#Python #pythran export ispangram(str) #runas ispangram("The quick brown fox jumps over the lazy dog") #runas ispangram("The brown fox jumps over the lazy dog") import string def ispangram(sentence, alphabet=string.ascii_lowercase): alphaset = set(alphabet) return alphaset <= set(sentence.lower()) pythran-0.10.0+ds2/pythran/tests/rosetta/pascal.py000066400000000000000000000013041416264035500220720ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Pascal%27s_triangle#Python #pythran export pascal(int) #pythran export pascal_(int) #runas pascal(10) #runas pascal_(10) def pascal(n): """Prints out n rows of Pascal's triangle. It returns False for failure and True for success.""" row = [1] k = [0] for x in range(max(n,0)): print(row) row=[l+r for l,r in zip(row+k,k+row)] return n>=1 def scan(op, seq, it): a = [] result = it a.append(it) for x in seq: result = op(result, x) a.append(result) return a def pascal_(n): def nextrow(row, x): return [l+r for l,r in zip(row+[0,],[0,]+row)] return scan(nextrow, range(n-1), [1,]) pythran-0.10.0+ds2/pythran/tests/rosetta/perf.py000066400000000000000000000005601416264035500215660ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Perfect_numbers#Python #pythran export perf(int) #pythran export perf_(int) #runas list(map(perf, range(20))) #runas list(map(perf_, range(20))) def perf(n): sum = 0 for i in range(1, n): if n % i == 0: sum += i return sum == n def perf_(n): return n == sum(i for i in range(1, n) if n % i == 0) pythran-0.10.0+ds2/pythran/tests/rosetta/permutation_derangement.py000066400000000000000000000022411416264035500255500ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Permutations/Derangements#Python from itertools import permutations import math #pythran export test(int, int, int) #runas test(4, 10, 20) def derangements(n): 'All deranged permutations of the integers 0..n-1 inclusive' return ( perm for perm in permutations(range(n)) if all(indx != p for indx, p in enumerate(perm)) ) def subfact(n): if n == 2 or n == 0: return 1 elif n == 1: return 0 elif 1 <= n <=18: return round(math.factorial(n) / math.e) elif n.imag == 0 and n.real == int(n.real) and n > 0: return (n-1) * ( subfact(n - 1) + subfact(n - 2) ) else: raise ValueError() def _iterlen(iter): 'length of an iterator without taking much memory' l = 0 for _ in iter: l += 1 return l def test(n1, n2, n3): print("Derangements of %s" % list(range(n1))) for d in derangements(n1): print(" %s" % (d,)) print("\nTable of n vs counted vs calculated derangements") for n in range(n2): print("%2i %-5i %-5i" % (n, _iterlen(derangements(n)), subfact(n))) print("\n!%i = %i" % (n3, subfact(n3))) pythran-0.10.0+ds2/pythran/tests/rosetta/permutation_rank.py000066400000000000000000000046461416264035500242250ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Permutations/Rank_of_a_permutation#Python #pythran export test() #runas test() from math import factorial as fact from random import randrange def identity_perm(n): return list(range(n)) def unranker1(n, r, pi): while n > 0: n1, (rdivn, rmodn) = n-1, divmod(r, n) pi[n1], pi[rmodn] = pi[rmodn], pi[n1] n = n1 r = rdivn return pi def init_pi1(n, pi): pi1 = [-1] * n for i in range(n): pi1[pi[i]] = i return pi1 def ranker1(n, pi, pi1): if n == 1: return 0 n1 = n-1 s = pi[n1] pi[n1], pi[pi1[n1]] = pi[pi1[n1]], pi[n1] pi1[s], pi1[n1] = pi1[n1], pi1[s] return s + n * ranker1(n1, pi, pi1) def unranker2(n, r, pi): while n > 0: n1 = n-1 s, rmodf = divmod(r, fact(n1)) pi[n1], pi[s] = pi[s], pi[n1] n = n1 r = rmodf return pi def ranker2(n, pi, pi1): if n == 1: return 0 n1 = n-1 s = pi[n1] pi[n1], pi[pi1[n1]] = pi[pi1[n1]], pi[n1] pi1[s], pi1[n1] = pi1[n1], pi1[s] return s * fact(n1) + ranker2(n1, pi, pi1) def get_random_ranks(permsize, samplesize): perms = fact(permsize) ranks = set() while len(ranks) < samplesize: ranks |= set( randrange(perms) for r in range(samplesize - len(ranks)) ) return ranks def test1(comment, unranker, ranker): n, samplesize, n2 = 3, 4, 12 print(comment) perms = [] for r in range(fact(n)): pi = identity_perm(n) perm = unranker(n, r, pi) perms.append((r, perm)) for r, pi in perms: pi1 = init_pi1(n, pi) print(' From rank %s to %s back to %s' % (r, pi, ranker(n, pi[:], pi1))) print('\n %s random individual samples of %s items:' % (samplesize, n2)) for r in get_random_ranks(n2, samplesize): pi = identity_perm(n2) print(' ' + ' '.join('%s' % i for i in unranker(n2, r, pi))) print('') def test2(comment, unranker): samplesize, n2 = 4, 10 print(comment) print(' %s random individual samples of %s items:' % (samplesize, n2)) txt = '' for r in get_random_ranks(n2, samplesize): pi = identity_perm(n2) txt += '\n' + ''.join(str(unranker(n2, r, pi))) print(txt, '') def test(): test1('First ordering:', unranker1, ranker1) test1('Second ordering:', unranker2, ranker2) test2('First ordering, large number of perms:', unranker1) pythran-0.10.0+ds2/pythran/tests/rosetta/permutation_swap.py000066400000000000000000000015331416264035500242340ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Permutations_by_swapping#Python #pythran export test() #runas test() #unittest.skip requires two-step typing def s_permutations(seq): items = [[]] for j in seq: new_items = [] for i, item in enumerate(items): if i % 2: # step up new_items += [item[:i] + [j] + item[i:] for i in range(len(item) + 1)] else: # step down new_items += [item[:i] + [j] + item[i:] for i in range(len(item), -1, -1)] items = new_items return [(tuple(item), -1 if i % 2 else 1) for i, item in enumerate(items)] def test(): for n in (3, 4): print('\nPermutations and sign of %i items' % n) for i in s_permutations(range(n)): print('Perm: ', i) pythran-0.10.0+ds2/pythran/tests/rosetta/permutation_test.py000066400000000000000000000017331416264035500242430ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Permutation_test#Python #pythran export permutationTest(int list, int list) #pythran export permutationTest2(int list, int list) #runas permutationTest([85, 88, 75, 66, 25, 29, 83, 39, 97], [68, 41, 10, 49, 16, 65, 32, 92, 28, 98]) #runas permutationTest2([85, 88, 75, 66, 25, 29, 83, 39, 97], [68, 41, 10, 49, 16, 65, 32, 92, 28, 98]) from itertools import combinations as comb def statistic(ab, a): sumab, suma = sum(ab), sum(a) return ( suma / len(a) - (sumab -suma) / (len(ab) - len(a)) ) def permutationTest(a, b): ab = a + b Tobs = statistic(ab, a) under = 0 for count, perm in enumerate(comb(ab, len(a)), 1): if statistic(ab, perm) <= Tobs: under += 1 return under * 100. / count def permutationTest2(a, b): ab = a + b Tobs = sum(a) under = 0 for count, perm in enumerate(comb(ab, len(a)), 1): if sum(perm) <= Tobs: under += 1 return under * 100. / count pythran-0.10.0+ds2/pythran/tests/rosetta/permutations.py000066400000000000000000000003141416264035500233610ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Permutations#Python #pythran export test(int) #runas test(3) def test(n): import itertools return [values for values in itertools.permutations(range(1, n + 1))] pythran-0.10.0+ds2/pythran/tests/rosetta/pi.py000066400000000000000000000012621416264035500212420ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Pi#Python #pythran export test() #runas test() #FIXME unittest.skip def calcPi(): q, r, t, k, n, l = 1, 0, 1, 1, 3, 3 while True: if 4*q+r-t < n*t: yield n nr = 10*(r-n*t) n = ((10*(3*q+r))//t)-10*n q *= 10 r = nr else: nr = (2*q+r)*l nn = (q*(7*k)+2+(r*l))//(t*l) q *= k t *= l l += 2 k += 1 n = nn r = nr def test(): pi_digits = calcPi() res = list() for i, d in enumerate(pi_digits): res.append(str(d)) if i>50: return res pythran-0.10.0+ds2/pythran/tests/rosetta/pick.py000066400000000000000000000003551416264035500215620ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Pick_random_element#Python #pythran export test() def test(): import random res = {"foo":0, "bar":0, "baz":0} for i in range(500): res[random.choice(res.keys())] += 1 return res pythran-0.10.0+ds2/pythran/tests/rosetta/poly_div.py000066400000000000000000000013631416264035500224610ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Polynomial_long_division#Python from math import fabs #pythran export poly_div(int list, int list) #runas poly_div([-42, 0, -12, 1], [-3, 1, 0, 0]) def degree(poly): while poly and poly[-1] == 0: poly.pop() # normalize return len(poly)-1 def poly_div(N, D): dD = degree(D) dN = degree(N) if dD < 0: raise ZeroDivisionError if dN >= dD: q = [0] * dN while dN >= dD: d = [0]*(dN - dD) + D mult = q[dN - dD] = N[-1] / float(d[-1]) d = [coeff*mult for coeff in d] N = [fabs ( coeffN - coeffd ) for coeffN, coeffd in zip(N, d)] dN = degree(N) r = N else: q = [0] r = N return q, r pythran-0.10.0+ds2/pythran/tests/rosetta/power_set.py000066400000000000000000000020461416264035500226420ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Power_set#Python #pythran export p(int list) #pythran export list_powerset(int list) #pythran export list_powerset2(int list) #runas p([1,2,3]) #runas list_powerset([1,2,3]) #runas list_powerset2([1,2,3]) from functools import reduce def list_powerset(lst): # the power set of the empty set has one element, the empty set result = [[]] for x in lst: # for every additional element in our set # the power set consists of the subsets that don't # contain this element (just take the previous power set) # plus the subsets that do contain the element (use list # comprehension to add [x] onto everything in the # previous power set) result.extend([subset + [x] for subset in result]) return result # the above function in one statement def list_powerset2(lst): return reduce(lambda result, x: result + [subset + [x] for subset in result], lst, [[]]) def p(l): if not l: return [[]] return p(l[1:]) + [[l[0]] + x for x in p(l[1:])] pythran-0.10.0+ds2/pythran/tests/rosetta/price_fraction.py000066400000000000000000000010221416264035500236130ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Price_fraction#Python #pythran export pricerounder(float) #runas list(map(pricerounder, [0.3793, 0.4425, 0.0746, 0.6918, 0.2993, 0.5486, 0.7848, 0.9383, 0.2292, 0.9560])) import bisect def pricerounder(pricein): _cout = [.10, .18, .26, .32, .38, .44, .50, .54, .58, .62, .66, .70, .74, .78, .82, .86, .90, .94, .98, 1.00] _cin = [.06, .11, .16, .21, .26, .31, .36, .41, .46, .51, .56, .61, .66, .71, .76, .81, .86, .91, .96, 1.01] return _cout[ bisect.bisect_right(_cin, pricein) ] pythran-0.10.0+ds2/pythran/tests/rosetta/primality.py000066400000000000000000000015571416264035500226530ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Primality_by_trial_division#Python #pythran export test() #runas test() #FIXME unittest.skip def prime(a): return not (a < 2 or any(a % x == 0 for x in range(2, int(a**0.5) + 1))) def prime2(a): if a == 2: return True if a < 2 or a % 2 == 0: return False return not any(a % x == 0 for x in range(3, int(a**0.5) + 1, 2)) def prime3(a): if a < 2: return False if a == 2 or a == 3: return True # manually test 2 and 3 if a % 2 == 0 or a % 3 == 0: return False # exclude multiples of 2 and 3 maxDivisor = a**0.5 d, i = 5, 2 while d <= maxDivisor: if a % d == 0: return False d += i i = 6 - i # this modifies 2 into 4 and viceversa return True def test(): return [i for i in range(40) if prime(i)], [i for i in range(40) if prime2(i)], [i for i in range(40) if prime3(i)] pythran-0.10.0+ds2/pythran/tests/rosetta/prime_decomposition.py000066400000000000000000000020651416264035500247040ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Prime_decomposition#Python #pythran export fac(int) #runas fac(2**10 - 1) #pythran export test_decompose(int) #runas test_decompose(2**10 - 1) import math def decompose(n): primelist = [2, 3] for p in primes(primelist): if p*p > n: break while n % p == 0: yield p n //= p if n > 1: yield n def test_decompose(to): return [i for i in decompose(to)] def primes(primelist): for n in primelist: yield n n = primelist[-1] while True: n += 2 for x in primelist: if not n % x: break if x * x > n: primelist.append(n) yield n break def fac(n): step = lambda x: 1 + x * 4 - (x // 2) * 2 maxq = math.floor(math.sqrt(n)) d = 1 q = n % 2 == 0 and 2 or 3 while q <= maxq and n % q != 0: q = step(d) d += 1 res = [] if q <= maxq: res.extend(fac(n // q)) res.extend(fac(q)) else: res = [n] return res pythran-0.10.0+ds2/pythran/tests/rosetta/proba_choice.py000066400000000000000000000037411416264035500232530ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Probabilistic_choice#Python #pythran export test(str?, int?) #runas test() import random, bisect def probchoice(items, probs, bincount=1000): ''' Splits the interval 0.0-1.0 in proportion to probs then finds where each random.random() choice lies ''' prob_accumulator = 0 accumulator = [] for p in probs: prob_accumulator += p accumulator.append(prob_accumulator) while True: r = random.random() yield items[bisect.bisect(accumulator, r)] def probchoice2(items, probs, bincount=1000): ''' Puts items in bins in proportion to probs then uses random.choice() to select items. Larger bincount for more memory use but higher accuracy (on avarage). ''' bins = [] for item,prob in zip(items, probs): bins += [item]*int(bincount*prob) while True: yield random.choice(bins) def tester(func=probchoice, items=('good', 'bad' 'ugly'), probs=(0.5, 0.3, 0.2), trials = 100000 ): def problist2string(probs): ''' Turns a list of probabilities into a string Also rounds FP values ''' return ",".join('%8.6f' % (p,) for p in probs) counter = dict() it = func(items, probs) for dummy in range(trials): k = next(it) if k in counter: counter[k] += 1 else: counter[k] = 1 print("\n##\n##\n##") print("Trials: ", trials) print("Items: ", ' '.join(items)) print("Target probability: ", problist2string(probs)) print("Attained probability:", problist2string( counter[x]/float(trials) for x in items)) def test(init_seq='aleph beth gimel daleth he waw zayin heth', bincount=1000000): items = init_seq.split() probs = [1/(float(n)+5) for n in range(len(items))] probs[-1] = 1-sum(probs[:-1]) tester(probchoice, items, probs, bincount) tester(probchoice2, items, probs, 1000000) pythran-0.10.0+ds2/pythran/tests/rosetta/pythagor_triples.py000066400000000000000000000011731416264035500242320ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Pythagorean_triples#Python #pythran export triples(int) #runas triples(10) #runas triples(100) #runas triples(1000) #runas triples(10000) #runas triples(100000) from functools import reduce def triples(lim, a = 3, b = 4, c = 5): l = a + b + c if l > lim: return (0, 0) return reduce(lambda x, y: (x[0] + y[0], x[1] + y[1]), [ (1, int(lim / l)), triples(lim, a - 2*b + 2*c, 2*a - b + 2*c, 2*a - 2*b + 3*c), triples(lim, a + 2*b + 2*c, 2*a + b + 2*c, 2*a + 2*b + 3*c), triples(lim, -a + 2*b + 2*c, -2*a + b + 2*c, -2*a + 2*b + 3*c) ]) pythran-0.10.0+ds2/pythran/tests/rosetta/ramsey.py000066400000000000000000000006021416264035500221270ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Ramsey%27s_theorem#Python #pythran export test() #runas test() def test(): range17 = range(17) a = [['0'] * 17 for i in range17] for i in range17: a[i][i] = '-' for k in range(4): for i in range17: j = (i + pow(2, k)) % 17 a[i][j] = a[j][i] = '1' for row in a: print(' '.join(row)) pythran-0.10.0+ds2/pythran/tests/rosetta/rangeexpend.py000066400000000000000000000006031416264035500231300ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Range_expansion#Python #pythran export rangeexpand(str) #runas rangeexpand('-6,-3--1,3-5,7-11,14,15,17-20') def rangeexpand(txt): lst = [] for r in txt.split(','): if '-' in r[1:]: r0, r1 = r[1:].split('-', 1) lst += range(int(r[0] + r0), int(r1) + 1) else: lst.append(int(r)) return lst pythran-0.10.0+ds2/pythran/tests/rosetta/rangeextract.py000066400000000000000000000015421416264035500233220ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Range_extraction#Python #pythran export test_range_extract(int list list) #runas test_range_extract([[-8, -7, -6, -3, -2, -1, 0, 1, 3, 4, 5, 7, 8, 9, 10, 11, 14, 15, 17, 18, 19, 20], [0, 1, 2, 4, 6, 7, 8, 11, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 29, 30, 31, 32, 33, 35, 36, 37, 38, 39]]) def range_extract(lst): 'Yield 2-tuple ranges or 1-tuple single elements from list of increasing ints' lenlst = len(lst) i = 0 while i< lenlst: low = lst[i] while i = 2: yield [low, hi] elif hi - low == 1: yield [low] yield [hi] else: yield [low] i += 1 def test_range_extract(on): return [list(range_extract(i)) for i in on] pythran-0.10.0+ds2/pythran/tests/rosetta/read_conf.cfg000066400000000000000000000016721416264035500226660ustar00rootroot00000000000000# This is a configuration file in standard configuration file format # # Lines begininning with a hash or a semicolon are ignored by the application # program. Blank lines are also ignored by the application program. # This is the fullname parameter FULLNAME Foo Barber # This is a favourite fruit FAVOURITEFRUIT banana # This is a boolean that should be set NEEDSPEELING # This boolean is commented out ; SEEDSREMOVED # Configuration option names are not case sensitive, but configuration parameter # data is case sensitive and may be preserved by the application program. # An optional equals sign can be used to separate configuration parameter data # from the option name. This is dropped by the parser. # A configuration option may take multiple parameters separated by commas. # Leading and trailing whitespace around parameter names and parameter data fields # are ignored by the application program. OTHERFAMILY Rhu Barber, Harry Barber pythran-0.10.0+ds2/pythran/tests/rosetta/read_conf.py000066400000000000000000000017561416264035500225620ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Read_a_configuration_file#Python #pythran export readconf(str) #runas readconf("pythran/tests/rosetta/read_conf.cfg") def readconf(fn): ret = {} fp = open(fn) for line in fp: # Assume whitespace is ignorable line = line.strip() if not line or line.startswith('#'): continue boolval = "True" # Assume leading ";" means a false boolean if line.startswith(';'): # Remove one or more leading semicolons line = line.lstrip(';') # If more than just one word, not a valid boolean if len(line.split()) != 1: continue boolval = "False" bits = line.split(None, 1) if len(bits) == 1: # Assume booleans are just one standalone word k = bits[0] v = boolval else: # Assume more than one word is a string value k, v = bits ret[k.lower()] = v fp.close() return ret pythran-0.10.0+ds2/pythran/tests/rosetta/read_file.py000066400000000000000000000002661416264035500225470ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Read_entire_file#Python #pythran export readfile() #runas readfile() def readfile(): return open("pythran/tests/rosetta/read_conf.cfg").read() pythran-0.10.0+ds2/pythran/tests/rosetta/read_line.py000066400000000000000000000003121416264035500225470ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Read_a_file_line_by_line#Python #pythran export readfile() #runas readfile() def readfile(): return [line for line in open("pythran/tests/rosetta/read_conf.cfg")] pythran-0.10.0+ds2/pythran/tests/rosetta/read_specific_line.py000066400000000000000000000005771416264035500244310ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Read_a_specific_line_from_a_file#Python #pythran export readline(int) #runas readline(6) def readline(n): from itertools import islice f = open('pythran/tests/rosetta/read_conf.cfg') linelist = list(islice(f, n, n + 1)) assert linelist != [], 'Not ' + str(n) + ' lines in file' line = linelist[0] f.close() return line pythran-0.10.0+ds2/pythran/tests/rosetta/real_const.py000066400000000000000000000012341416264035500227620ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Real_constants_and_functions#Python #pythran export test() #runas test() def test(): import math x = 3.5 y = -0.2 print(math.e) # e print(math.pi) # pi print(math.sqrt(x)) # square root (Also commonly seen as x ** 0.5 to obviate importing the math module) print(math.log(x)) # natural logarithm print(math.log10(x)) # base 10 logarithm print(math.exp(x)) # e raised to the power of x print(abs(x)) # absolute value print(math.floor(x)) # floor print(math.ceil(x)) # ceiling print(x ** y) # exponentiation print(pow(x, y)) pythran-0.10.0+ds2/pythran/tests/rosetta/reduce_row_echelon.py000066400000000000000000000016001416264035500244610ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Reduced_row_echelon_form#Python #pythran export ToReducedRowEchelonForm(int list list) #runas mtx = [ [ 1, 2, -1, -4], [ 2, 3, -1, -11], [-2, 0, -3, 22],]; ToReducedRowEchelonForm(mtx) def ToReducedRowEchelonForm( M): if not M: return lead = 0 rowCount = len(M) columnCount = len(M[0]) for r in range(rowCount): if lead >= columnCount: return i = r while M[i][lead] == 0: i += 1 if i == rowCount: i = r lead += 1 if columnCount == lead: return M[i],M[r] = M[r],M[i] lv = M[r][lead] M[r] = [ mrx / lv for mrx in M[r]] for i in range(rowCount): if i != r: lv = M[i][lead] M[i] = [ iv - lv*rv for rv,iv in zip(M[r],M[i])] lead += 1 pythran-0.10.0+ds2/pythran/tests/rosetta/remove_duplicate.py000066400000000000000000000002661416264035500241640ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Remove_duplicate_elements#Python #pythran export unique(int list) #runas unique([1, 2, 3, 2, 3, 4]) def unique(items): return list(set(items)) pythran-0.10.0+ds2/pythran/tests/rosetta/rep_string.py000066400000000000000000000013231416264035500230040ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Rep-string#Python #pythran export is_repeated(str) #pythran export reps(str) #runas matchstr ="1001110011 1110111011 0010010010 1010101010 1111111111 0100101101 0100100 101 11 00 1"; [reps(i) for i in matchstr.split()] #runas matchstr ="1001110011 1110111011 0010010010 1010101010 1111111111 0100101101 0100100 101 11 00 1"; [is_repeated(i) for i in matchstr.split()] def is_repeated(text): 'check if the first part of the string is repeated throughout the string' for x in range(len(text)//2, 0, -1): if text.startswith(text[x:]): return x return 0 def reps(text): return [text[:x] for x in range(1, 1 + len(text) // 2) if text.startswith(text[x:])] pythran-0.10.0+ds2/pythran/tests/rosetta/repeat_string.py000066400000000000000000000002071416264035500234760ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Repeat_a_string #pythran export test(int) #runas test(5) def test(n): return "ha" * n, n * "ha" pythran-0.10.0+ds2/pythran/tests/rosetta/return_multiple.py000066400000000000000000000002331416264035500240610ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Return_multiple_values #pythran export addsub(int, int) #runas addsub(33, 12) def addsub(x, y): return x + y, x - y pythran-0.10.0+ds2/pythran/tests/rosetta/roman_decode.py000066400000000000000000000006601416264035500232520ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Roman_numerals/Decode#Python #pythran export decode(str) #runas decode('MCMXC') #runas decode('MMVIII') #runas decode('MDCLXVI') def decode(roman): _rdecode = dict(zip('MDCLXVI', (1000, 500, 100, 50, 10, 5, 1))) result = 0 for r, r1 in zip(roman, roman[1:]): rd, rd1 = _rdecode[r], _rdecode[r1] result += -rd if rd < rd1 else rd return result + _rdecode[roman[-1]] pythran-0.10.0+ds2/pythran/tests/rosetta/yin_and_yang.py000066400000000000000000000017131416264035500232720ustar00rootroot00000000000000# from http://rosettacode.org/wiki/Yin_and_yang#Python #pythran export yinyang(int) #runas yinyang(4) import math def yinyang(n=3): radii = [i * n for i in [1, 3, 6]] ranges = [list(range(-r, r+1)) for r in radii] squares = [[ (x,y) for x in rnge for y in rnge] for rnge in ranges] circles = [[ (x,y) for x,y in sqrpoints if math.hypot(x,y) <= radius ] for sqrpoints, radius in zip(squares, radii)] m = {(x,y):' ' for x,y in squares[-1]} for x,y in circles[-1]: m[x,y] = '*' for x,y in circles[-1]: if x>0: m[(x,y)] = '.' for x,y in circles[-2]: m[(x,y+3*n)] = '*' m[(x,y-3*n)] = '.' for x,y in circles[-3]: m[(x,y+3*n)] = '.' m[(x,y-3*n)] = '*' return '\n'.join(''.join(m[(x,y)] for x in reversed(ranges[-1])) for y in ranges[-1]) pythran-0.10.0+ds2/pythran/tests/rosetta/zeckendorf_number_representation.py000066400000000000000000000025761416264035500274670ustar00rootroot00000000000000#from http://rosettacode.org/wiki/Zeckendorf_number_representation#Python #pythran export test(int) #pythran export z(int) #pythran export zeckendorf(int) #runas test(20) #runas ['%3i: %8s' % (i, ''.join(str(d) for d in zeckendorf(i))) for i in range(21)] #runas ['%3i: %8s' % (i, ''.join(str(d) for d in z(i))) for i in range(21)] def fib(): memo = [1, 2] while True: memo.append(sum(memo)) yield memo.pop(0) def sequence_down_from_n(n, seq_generator): seq = [] for s in seq_generator(): seq.append(s) if s >= n: break return seq[::-1] def zeckendorf(n): if n == 0: return [0] seq = sequence_down_from_n(n, fib) digits, nleft = [], n for s in seq: if s <= nleft: digits.append(1) nleft -= s else: digits.append(0) assert nleft == 0, 'Check all of n is accounted for' assert sum(x*y for x,y in zip(digits, seq)) == n, 'Assert digits are correct' while digits[0] == 0: # Remove any zeroes padding L.H.S. digits.pop(0) return digits def z(n): if n == 0 : return [0] fib = [2,1] while fib[0] < n: fib[0:0] = [sum(fib[:2])] dig = [] for f in fib: if f <= n: dig, n = dig + [1], n - f else: dig += [0] return dig if dig[0] else dig[1:] def test(n): return sequence_down_from_n(n, fib) pythran-0.10.0+ds2/pythran/tests/scikit-image/000077500000000000000000000000001416264035500211445ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/tests/scikit-image/_colormixer.py000066400000000000000000000246221416264035500240460ustar00rootroot00000000000000"""Color Mixer NumPy does not do overflow checking when adding or multiplying integers, so currently the only way to clip results efficiently (without making copies of the data) is with an extension such as this one. """ import numpy as np # pythran export add(uint8[:,:,:], uint8[:,:,:], int, int) # pythran export multiply(uint8[:,:,:], uint8[:,:,:], int, float) # pythran export brightness(uint8[:,:,:], uint8[:,:,:], float, int) # pythran export sigmoid_gamma(uint8[:,:,:], uint8[:,:,:], float, float) # pythran export gamma(uint8[:,:,:], uint8[:,:,:], float) # pythran export py_hsv_2_rgb(float, float, float) # pythran export py_rgb_2_hsv(int, int, int) # pythran export hsv_add (uint8[:,:,:], uint8[:,:,:], float, float, float) def add(img, stateimg, channel, amount): """Add a given amount to a color channel of `stateimg`, and store the result in `img`. Overflow is clipped. Parameters ---------- img : (M, N, 3) ndarray of uint8 Output image. stateimg : (M, N, 3) ndarray of uint8 Input image. channel : int Channel (0 for "red", 1 for "green", 2 for "blue"). amount : int Value to add. """ height, width, _ = img.shape k = channel n = amount lut = [np.uint8()] * 256 for l in range(256): op_result = l + n if op_result > 255: op_result = 255 elif op_result < 0: op_result = 0 lut[l] = np.uint8(op_result) for i in range(height): for j in range(width): img[i, j, k] = lut[stateimg[i, j, k]] def multiply(img, stateimg, channel, amount): """Multiply a color channel of `stateimg` by a certain amount, and store the result in `img`. Overflow is clipped. Parameters ---------- img : (M, N, 3) ndarray of uint8 Output image. stateimg : (M, N, 3) ndarray of uint8 Input image. channel : int Channel (0 for "red", 1 for "green", 2 for "blue"). amount : float Multiplication factor. """ height, width, _ = img.shape k = channel n = amount lut = [np.uint8()] * 256 for l in range(256): op_result = l * n if op_result > 255: op_result = 255 elif op_result < 0: op_result = 0 lut[l] = np.uint8(op_result) for i in range(height): for j in range(width): img[i, j, k] = lut[stateimg[i, j, k]] def brightness(img, stateimg, factor, offset): """Modify the brightness of an image. 'factor' is multiplied to all channels, which are then added by 'amount'. Overflow is clipped. Parameters ---------- img : (M, N, 3) ndarray of uint8 Output image. stateimg : (M, N, 3) ndarray of uint8 Input image. factor : float Multiplication factor. offset : int Ammount to add to each channel. """ height, width, _ = img.shape lut = [np.uint8()] * 256 for k in range(256): op_result = k * factor + offset if op_result > 255: op_result = 255 elif op_result < 0: op_result = 0 lut[k] = np.uint8(op_result) for i in range(height): for j in range(width): img[i, j, 0] = lut[stateimg[i, j, 0]] img[i, j, 1] = lut[stateimg[i, j, 1]] img[i, j, 2] = lut[stateimg[i, j, 2]] def sigmoid_gamma(img, stateimg, alpha, beta): height, width, _ = img.shape c1 = 1 / (1 + np.exp(beta)) c2 = 1 / (1 + np.exp(beta - alpha)) - c1 lut = [np.uint8()] * 256 # compute the lut for k in range(256): lut[k] = np.uint8(((1 / (1 + np.exp(beta - (k / 255.) * alpha))) - c1) * 255 / c2) for i in range(height): for j in range(width): img[i, j, 0] = lut[stateimg[i, j, 0]] img[i, j, 1] = lut[stateimg[i, j, 1]] img[i, j, 2] = lut[stateimg[i, j, 2]] def gamma(img, stateimg, gamma): height, width, _ = img.shape lut = [np.uint8()] * 256 if gamma == 0: gamma = 0.00000000000000000001 gamma = 1./gamma # compute the lut for k in range(256): lut[k] = np.uint8((pow((k / 255.), gamma) * 255)) for i in range(height): for j in range(width): img[i, j, 0] = lut[stateimg[i, j, 0]] img[i, j, 1] = lut[stateimg[i, j, 1]] img[i, j, 2] = lut[stateimg[i, j, 2]] def rgb_2_hsv(RGB, HSV): R, G, B = RGB if R > 255: R = 255 elif R < 0: R = 0 if G > 255: G = 255 elif G < 0: G = 0 if B > 255: B = 255 elif B < 0: B = 0 if R < G: MIN = R MAX = G else: MIN = G MAX = R if B < MIN: MIN = B elif B > MAX: MAX = B else: pass V = MAX / 255. if MAX == MIN: H = 0. elif MAX == R: H = (60 * (G - B) / (MAX - MIN) + 360) % 360 elif MAX == G: H = 60 * (B - R) / (MAX - MIN) + 120 else: H = 60 * (R - G) / (MAX - MIN) + 240 if MAX == 0: S = 0 else: S = 1 - MIN / MAX HSV[0] = H HSV[1] = S HSV[2] = V def hsv_2_rgb(HSV, RGB): H, S, V = HSV if H > 360: H = H % 360 elif H < 0: H = 360 - ((-1 * H) % 360) else: pass if S > 1: S = 1 elif S < 0: S = 0 else: pass if V > 1: V = 1 elif V < 0: V = 0 else: pass hi = int(H / 60.) % 6 f = (H / 60.) - int(H / 60.) p = V * (1 - S) q = V * (1 - f * S) t = V * (1 - (1 - f) * S) if hi == 0: r = V g = t b = p elif hi == 1: r = q g = V b = p elif hi == 2: r = p g = V b = t elif hi == 3: r = p g = q b = V elif hi == 4: r = t g = p b = V else: r = V g = p b = q RGB[0] = r RGB[1] = g RGB[2] = b def py_hsv_2_rgb(H, S, V): '''Convert an HSV value to RGB. Automatic clipping. Parameters ---------- H : float From 0. - 360. S : float From 0. - 1. V : float From 0. - 1. Returns ------- out : (R, G, B) ints Each from 0 - 255 conversion convention from here: http://en.wikipedia.org/wiki/HSL_and_HSV ''' HSV = [H, S, V] RGB = [0] * 3 hsv_2_rgb(HSV, RGB) R = int(RGB[0] * 255) G = int(RGB[1] * 255) B = int(RGB[2] * 255) return R, G, B def py_rgb_2_hsv(R, G, B): '''Convert an HSV value to RGB. Automatic clipping. Parameters ---------- R : int From 0. - 255. G : int From 0. - 255. B : int From 0. - 255. Returns ------- out : (H, S, V) floats Ranges (0...360), (0...1), (0...1) conversion convention from here: http://en.wikipedia.org/wiki/HSL_and_HSV ''' RGB = [float(R), float(G), float(B)] HSV = [0.] * 3 rgb_2_hsv(RGB, HSV) return HSV def hsv_add(img, stateimg, h_amt, s_amt, v_amt): """Modify the image color by specifying additive HSV Values. Since the underlying images are RGB, all three values HSV must be specified at the same time. The RGB triplet in the image is converted to HSV, the operation is applied, and then the HSV triplet is converted back to RGB HSV values are scaled to H(0. - 360.), S(0. - 1.), V(0. - 1.) then the operation is performed and any overflow is clipped, then the reverse transform is performed. Those are the ranges to keep in mind, when passing in values. Parameters ---------- img : (M, N, 3) ndarray of uint8 Output image. stateimg : (M, N, 3) ndarray of uint8 Input image. h_amt : float Ammount to add to H channel. s_amt : float Ammount to add to S channel. v_amt : float Ammount to add to V channel. """ height, width, _ = img.shape HSV = [0.] * 3 RGB = [0.] * 3 for i in range(height): for j in range(width): RGB[0] = stateimg[i, j, 0] RGB[1] = stateimg[i, j, 1] RGB[2] = stateimg[i, j, 2] rgb_2_hsv(RGB, HSV) # Add operation HSV[0] += h_amt HSV[1] += s_amt HSV[2] += v_amt hsv_2_rgb(HSV, RGB) RGB[0] *= 255 RGB[1] *= 255 RGB[2] *= 255 img[i, j, 0] = RGB[0] img[i, j, 1] = RGB[1] img[i, j, 2] = RGB[2] # pythran export hsv_multiply(uint8[:,:,:], uint8[:,:,:], float, float, float) def hsv_multiply(img, stateimg, h_amt, s_amt, v_amt): """Modify the image color by specifying multiplicative HSV Values. Since the underlying images are RGB, all three values HSV must be specified at the same time. The RGB triplet in the image is converted to HSV, the operation is applied, and then the HSV triplet is converted back to RGB HSV values are scaled to H(0. - 360.), S(0. - 1.), V(0. - 1.) then the operation is performed and any overflow is clipped, then the reverse transform is performed. Those are the ranges to keep in mind, when passing in values. Note that since hue is in degrees, it makes no sense to multiply that channel, thus an add operation is performed on the hue. And the values given for h_amt, should be the same as for hsv_add Parameters ---------- img : (M, N, 3) ndarray of uint8 Output image. stateimg : (M, N, 3) ndarray of uint8 Input image. h_amt : float Ammount to add to H channel. s_amt : float Ammount by which to multiply S channel. v_amt : float Ammount by which to multiply V channel. """ height, width, _ = img.shape HSV = [0.] * 3 RGB = [0.] * 3 for i in range(height): for j in range(width): RGB[0] = stateimg[i, j, 0] RGB[1] = stateimg[i, j, 1] RGB[2] = stateimg[i, j, 2] rgb_2_hsv(RGB, HSV) # Multiply operation HSV[0] += h_amt HSV[1] *= s_amt HSV[2] *= v_amt hsv_2_rgb(HSV, RGB) RGB[0] *= 255 RGB[1] *= 255 RGB[2] *= 255 img[i, j, 0] = RGB[0] img[i, j, 1] = RGB[1] img[i, j, 2] = RGB[2] pythran-0.10.0+ds2/pythran/tests/scikit-image/_convex_hull.py000066400000000000000000000030611416264035500242030ustar00rootroot00000000000000import numpy as np # pythran export possible_hull(uint8[:,:]) def possible_hull(img): """Return positions of pixels that possibly belong to the convex hull. Parameters ---------- img : ndarray of bool Binary input image. Returns ------- coords : ndarray (cols, 2) The ``(row, column)`` coordinates of all pixels that possibly belong to the convex hull. """ rows, cols = img.shape # Output: rows storage slots for left boundary pixels # cols storage slots for top boundary pixels # rows storage slots for right boundary pixels # cols storage slots for bottom boundary pixels coords = np.ones((2 * (rows + cols), 2), dtype=np.intp) coords *= -1 rows_cols = rows + cols rows_2_cols = 2 * rows + cols for r in range(rows): rows_cols_r = rows_cols + r for c in range(cols): if img[r, c] != 0: rows_c = rows + c rows_2_cols_c = rows_2_cols + c # Left check if coords[r, 1] == -1: coords[r, 0] = r coords[r, 1] = c # Right check elif coords[rows_cols_r, 1] < c: coords[rows_cols_r] = r, c # Top check if coords[rows_c, 1] == -1: coords[rows_c] = r, c # Bottom check elif coords[rows_2_cols_c, 0] < r: coords[rows_2_cols_c] = r, c return coords[coords[:, 0] != -1] pythran-0.10.0+ds2/pythran/tests/scikit-image/_greyreconstruct.py000066400000000000000000000065151416264035500251260ustar00rootroot00000000000000""" `reconstruction_loop` originally part of CellProfiler, code licensed under both GPL and BSD licenses. Website: http://www.cellprofiler.org Copyright (c) 2003-2009 Massachusetts Institute of Technology Copyright (c) 2009-2011 Broad Institute All rights reserved. Original author: Lee Kamentsky """ # pythran export reconstruction_loop(uint32[], int32[], int32[], int32[], # intp, intp) def reconstruction_loop(ranks, prev, next, strides, current_idx, image_stride): """The inner loop for reconstruction. This algorithm uses the rank-order of pixels. If low intensity pixels have a low rank and high intensity pixels have a high rank, then this loop performs reconstruction by dilation. If this ranking is reversed, the result is reconstruction by erosion. For each pixel in the seed image, check its neighbors. If its neighbor's rank is below that of the current pixel, replace the neighbor's rank with the rank of the current pixel. This dilation is limited by the mask, i.e. the rank at each pixel cannot exceed the mask as that pixel. Parameters ---------- ranks : array The rank order of the flattened seed and mask images. prev, next: arrays Indices of previous and next pixels in rank sorted order. strides : array Strides to neighbors of the current pixel. current_idx : int Index of highest-ranked pixel used as starting point in loop. image_stride : int Stride between seed image and mask image in `aranks`. """ nstrides = strides.shape[0] while current_idx != -1: if current_idx < image_stride: current_rank = ranks[current_idx] if current_rank == 0: break for i in range(nstrides): neighbor_idx = current_idx + strides[i] neighbor_rank = ranks[neighbor_idx] # Only propagate neighbors ranked below the current rank if neighbor_rank < current_rank: mask_rank = ranks[neighbor_idx + image_stride] # Only propagate neighbors ranked below the mask rank if neighbor_rank < mask_rank: # Raise the neighbor to the mask rank if # the mask ranked below the current rank if mask_rank < current_rank: current_link = neighbor_idx + image_stride ranks[neighbor_idx] = mask_rank else: current_link = current_idx ranks[neighbor_idx] = current_rank # unlink the neighbor nprev = prev[neighbor_idx] nnext = next[neighbor_idx] next[nprev] = nnext if nnext != -1: prev[nnext] = nprev # link to the neighbor after the current link nnext = next[current_link] next[neighbor_idx] = nnext prev[neighbor_idx] = current_link if nnext >= 0: prev[nnext] = neighbor_idx next[current_link] = neighbor_idx current_idx = next[current_idx] pythran-0.10.0+ds2/pythran/tests/scikit-image/_hessian_det_appx.py000066400000000000000000000072201416264035500251740ustar00rootroot00000000000000import numpy as np def _clip(x, low, high): """Clips coordinate between high and low. This method was created so that `hessian_det_appx` does not have to make a Python call. Parameters ---------- x : int Coordinate to be clipped. low : int The lower bound. high : int The higher bound. Returns ------- x : int `x` clipped between `high` and `low`. """ if x > high: return high if x < low: return low return x def _integ(img, r, c, rl, cl): """Integrate over the integral image in the given window This method was created so that `hessian_det_appx` does not have to make a Python call. Parameters ---------- img : array The integral image over which to integrate. r : int The row number of the top left corner. c : int The column number of the top left corner. rl : int The number of rows over which to integrate. cl : int The number of columns over which to integrate. Returns ------- ans : int The integral over the given window. """ r = _clip(r, 0, img.shape[0] - 1) c = _clip(c, 0, img.shape[1] - 1) r2 = _clip(r + rl, 0, img.shape[0] - 1) c2 = _clip(c + cl, 0, img.shape[1] - 1) ans = img[r, c] + img[r2, c2] - img[r, c2] - img[r2, c] return max(0, ans) # pythran export _hessian_matrix_det(float64[:,:], float64) def _hessian_matrix_det(img, sigma): """Computes the approximate Hessian Determinant over an image. This method uses box filters over integral images to compute the approximate Hessian Determinant as described in [1]_. Parameters ---------- img : array The integral image over which to compute Hessian Determinant. sigma : float Standard deviation used for the Gaussian kernel, used for the Hessian matrix Returns ------- out : array The array of the Determinant of Hessians. References ---------- .. [1] Herbert Bay, Andreas Ess, Tinne Tuytelaars, Luc Van Gool, "SURF: Speeded Up Robust Features" ftp://ftp.vision.ee.ethz.ch/publications/articles/eth_biwi_00517.pdf Notes ----- The running time of this method only depends on size of the image. It is independent of `sigma` as one would expect. The downside is that the result for `sigma` less than `3` is not accurate, i.e., not similar to the result obtained if someone computed the Hessian and took it's determinant. """ size = int(3 * sigma) height, width = img.shape s2 = (size - 1) // 2 s3 = size // 3 w = size out = np.zeros_like(img, dtype=np.double) w_i = 1.0 / size / size if size % 2 == 0: size += 1 for r in range(height): for c in range(width): tl = _integ(img, r - s3, c - s3, s3, s3) # top left br = _integ(img, r + 1, c + 1, s3, s3) # bottom right bl = _integ(img, r - s3, c + 1, s3, s3) # bottom left tr = _integ(img, r + 1, c - s3, s3, s3) # top right dxy = bl + tr - tl - br dxy = -dxy * w_i # middle box mid = _integ(img, r - s3 + 1, c - s2, 2 * s3 - 1, w) # sides side = _integ(img, r - s3 + 1, c - s3 // 2, 2 * s3 - 1, s3) dxx = mid - 3 * side dxx = -dxx * w_i mid = _integ(img, r - s2, c - s3 + 1, w, 2 * s3 - 1) side = _integ(img, r - s3 // 2, c - s3 + 1, s3, 2 * s3 - 1) dyy = mid - 3 * side dyy = -dyy * w_i out[r, c] = (dxx * dyy - 0.81 * (dxy * dxy)) return out pythran-0.10.0+ds2/pythran/tests/scikit-image/_integ.py000066400000000000000000000007571416264035500227740ustar00rootroot00000000000000#pythran export _integ(uint8[:,:], int, int, int, int) import numpy as np def _clip(x, low, high): assert 0 <= low <= high if x > high: return high if x < low: return low return x def _integ(img, r, c, rl, cl): r = _clip(r, 0, img.shape[0] - 1) c = _clip(c, 0, img.shape[1] - 1) r2 = _clip(r + rl, 0, img.shape[0] - 1) c2 = _clip(c + cl, 0, img.shape[1] - 1) ans = img[r, c] + img[r2, c2] - img[r, c2] - img[r2, c] return max(0, ans) pythran-0.10.0+ds2/pythran/tests/scikit-image/_moments_cy.py000066400000000000000000000011671416264035500240370ustar00rootroot00000000000000import numpy as np # pythran export moments_hu(float64[:,:]) def moments_hu(nu): hu = np.zeros((7, ), dtype=np.double) t0 = nu[3, 0] + nu[1, 2] t1 = nu[2, 1] + nu[0, 3] q0 = t0 * t0 q1 = t1 * t1 n4 = 4 * nu[1, 1] s = nu[2, 0] + nu[0, 2] d = nu[2, 0] - nu[0, 2] hu[0] = s hu[1] = d * d + n4 * nu[1, 1] hu[3] = q0 + q1 hu[5] = d * (q0 - q1) + n4 * t0 * t1 t0 *= q0 - 3 * q1 t1 *= 3 * q0 - q1 q0 = nu[3, 0] - 3 * nu[1, 2] q1 = 3 * nu[2, 1] - nu[0, 3] hu[2] = q0 * q0 + q1 * q1 hu[4] = q0 * t0 + q1 * t1 hu[6] = q1 * t0 - q0 * t1 return np.asarray(hu) pythran-0.10.0+ds2/pythran/tests/scikit-image/_radon_transform.py000066400000000000000000000153431416264035500250610ustar00rootroot00000000000000import numpy as np from numpy import cos, sin, floor, ceil, sqrt def bilinear_ray_sum(image, theta, ray_position): """ Compute the projection of an image along a ray. Parameters ---------- image : 2D array, dtype=float Image to project. theta : float Angle of the projection ray_position : float Position of the ray within the projection Returns ------- projected_value : float Ray sum along the projection norm_of_weights : A measure of how long the ray's path through the reconstruction circle was """ theta = theta / 180. * np.pi radius = image.shape[0] // 2 - 1 projection_center = image.shape[0] // 2 rotation_center = image.shape[0] // 2 # (s, t) is the (x, y) system rotated by theta t = ray_position - projection_center # s0 is the half-length of the ray's path in the reconstruction circle s0 = sqrt(radius * radius - t * t) if radius*radius >= t*t else 0. Ns = 2 * int(ceil(2 * s0)) # number of steps along the ray ray_sum = 0. weight_norm = 0. if Ns > 0: # step length between samples ds = 2 * s0 / Ns dx = -ds * cos(theta) dy = -ds * sin(theta) # point of entry of the ray into the reconstruction circle x0 = s0 * cos(theta) - t * sin(theta) y0 = s0 * sin(theta) + t * cos(theta) for k in range(Ns + 1): x = x0 + k * dx y = y0 + k * dy index_i = x + rotation_center index_j = y + rotation_center i = int(floor(index_i)) j = int(floor(index_j)) di = index_i - floor(index_i) dj = index_j - floor(index_j) # Use linear interpolation between values # Where values fall outside the array, assume zero if i > 0 and j > 0: weight = (1. - di) * (1. - dj) * ds ray_sum += weight * image[i, j] weight_norm += weight * weight if i > 0 and j < image.shape[1] - 1: weight = (1. - di) * dj * ds ray_sum += weight * image[i, j+1] weight_norm += weight * weight if i < image.shape[0] - 1 and j > 0: weight = di * (1 - dj) * ds ray_sum += weight * image[i+1, j] weight_norm += weight * weight if i < image.shape[0] - 1 and j < image.shape[1] - 1: weight = di * dj * ds ray_sum += weight * image[i+1, j+1] weight_norm += weight * weight return ray_sum, weight_norm def bilinear_ray_update(image, image_update, theta, ray_position, projected_value): """Compute the update along a ray using bilinear interpolation. Parameters ---------- image : 2D array, dtype=float Current reconstruction estimate. image_update : 2D array, dtype=float Array of same shape as ``image``. Updates will be added to this array. theta : float Angle of the projection. ray_position : float Position of the ray within the projection. projected_value : float Projected value (from the sinogram). Returns ------- deviation : Deviation before updating the image. """ ray_sum, weight_norm = bilinear_ray_sum(image, theta, ray_position) if weight_norm > 0.: deviation = -(ray_sum - projected_value) / weight_norm else: deviation = 0. theta = theta / 180. * np.pi radius = image.shape[0] // 2 - 1 projection_center = image.shape[0] // 2 rotation_center = image.shape[0] // 2 # (s, t) is the (x, y) system rotated by theta t = ray_position - projection_center # s0 is the half-length of the ray's path in the reconstruction circle s0 = sqrt(radius*radius - t*t) if radius*radius >= t*t else 0. Ns = 2 * int(ceil(2 * s0)) # beta for equiripple Hamming window hamming_beta = 0.46164 if Ns > 0: # Step length between samples ds = 2 * s0 / Ns dx = -ds * cos(theta) dy = -ds * sin(theta) # Point of entry of the ray into the reconstruction circle x0 = s0 * cos(theta) - t * sin(theta) y0 = s0 * sin(theta) + t * cos(theta) for k in range(Ns + 1): x = x0 + k * dx y = y0 + k * dy index_i = x + rotation_center index_j = y + rotation_center i = int(floor(index_i)) j = int(floor(index_j)) di = index_i - floor(index_i) dj = index_j - floor(index_j) hamming_window = ((1 - hamming_beta) - hamming_beta * cos(2 * np.pi * k / (Ns - 1))) if i > 0 and j > 0: image_update[i, j] += (deviation * (1. - di) * (1. - dj) * ds * hamming_window) if i > 0 and j < image.shape[1] - 1: image_update[i, j+1] += (deviation * (1. - di) * dj * ds * hamming_window) if i < image.shape[0] - 1 and j > 0: image_update[i+1, j] += (deviation * di * (1 - dj) * ds * hamming_window) if i < image.shape[0] - 1 and j < image.shape[1] - 1: image_update[i+1, j+1] += (deviation * di * dj * ds * hamming_window) return deviation # pythran export sart_projection_update(float64[:,:], float64, float64[:], # float64) # # pythran export sart_projection_update(float64[:,:], float64, float64[:]) def sart_projection_update(image, theta, projection, projection_shift=0.): """ Compute update to a reconstruction estimate from a single projection using bilinear interpolation. Parameters ---------- image : 2D array, dtype=float Current reconstruction estimate theta : float Angle of the projection projection : 1D array, dtype=float Projected values, taken from the sinogram projection_shift : float Shift the position of the projection by this many pixels before using it to compute an update to the reconstruction estimate Returns ------- image_update : 2D array, dtype=float Array of same shape as ``image`` containing updates that should be added to ``image`` to improve the reconstruction estimate """ image_update = np.zeros_like(image) for i in range(projection.shape[0]): ray_position = i + projection_shift bilinear_ray_update(image, image_update, theta, ray_position, projection[i]) return image_update pythran-0.10.0+ds2/pythran/tests/scikit-image/_unwrap_1d.py000066400000000000000000000007231416264035500235570ustar00rootroot00000000000000from numpy import pi # pythran export unwrap_1d(float64[:], float64[:]) def unwrap_1d(image, unwrapped_image): '''Phase unwrapping using the naive approach.''' unwrapped_image[0] = image[0] periods = 0 for i in range(1, image.shape[0]): difference = image[i] - image[i - 1] if difference > pi: periods -= 1 elif difference < -pi: periods += 1 unwrapped_image[i] = image[i] + 2 * pi * periods pythran-0.10.0+ds2/pythran/tests/scikit-image/brief_cy.py000066400000000000000000000007151416264035500233030ustar00rootroot00000000000000# pythran export _brief_loop(float64[:,:], uint8[:,:], # intp[:,:], int[:,:], int[:,:]) def _brief_loop(image, descriptors, keypoints, pos0, pos1): for k in range(len(keypoints)): kr, kc = keypoints[k] for p in range(len(pos0)): pr0, pc0 = pos0[p] pr1, pc1 = pos1[p] descriptors[k, p] = (image[kr + pr0, kc + pc0] < image[kr + pr1, kc + pc1]) pythran-0.10.0+ds2/pythran/tests/scikit-image/cmorph.py000066400000000000000000000105721416264035500230130ustar00rootroot00000000000000import numpy as np # pythran export _dilate(uint8[:, :], uint8[:, :], uint8[:, :], int8, int8) # pythran export _dilate(uint8[:, :], uint8[:, :], uint8[:, :], int8) # pythran export _dilate(uint8[:, :], uint8[:, :], uint8[:, :]) # pythran export _dilate(uint8[:, :], uint8[:, :]) def _dilate(image, selem, out=None, shift_x=0, shift_y=0): """Return greyscale morphological dilation of an image. Morphological dilation sets a pixel at (i,j) to the maximum over all pixels in the neighborhood centered at (i,j). Dilation enlarges bright regions and shrinks dark regions. Parameters ---------- image : ndarray Image array. selem : ndarray The neighborhood expressed as a 2-D array of 1's and 0's. out : ndarray The array to store the result of the morphology. If None, is passed, a new array will be allocated. shift_x, shift_y : bool shift structuring element about center point. This only affects eccentric structuring elements (i.e. selem with even numbered sides). Returns ------- dilated : uint8 array The result of the morphological dilation. """ rows = image.shape[0] cols = image.shape[1] srows = selem.shape[0] scols = selem.shape[1] centre_r = int(selem.shape[0] / 2) - shift_y centre_c = int(selem.shape[1] / 2) - shift_x image = np.ascontiguousarray(image) if out is None: out = np.zeros((rows, cols), dtype=np.uint8) selem_num = np.sum(np.asarray(selem) != 0) sr = np.empty(selem_num, dtype=np.intp) sc = np.empty(selem_num, dtype=np.intp) s = 0 for r in range(srows): for c in range(scols): if selem[r, c] != 0: sr[s] = r - centre_r sc[s] = c - centre_c s += 1 for r in range(rows): for c in range(cols): local_max = 0 for s in range(selem_num): rr = r + sr[s] cc = c + sc[s] if 0 <= rr < rows and 0 <= cc < cols: value = image[rr, cc] if value > local_max: local_max = value out[r, c] = local_max return np.asarray(out) # pythran export _erode(uint8[:, :], uint8[:, :], uint8[:, :], int8, int8) # pythran export _erode(uint8[:, :], uint8[:, :], uint8[:, :], int8) # pythran export _erode(uint8[:, :], uint8[:, :], uint8[:, :]) # pythran export _erode(uint8[:, :], uint8[:, :]) def _erode(image, selem, out=None, shift_x=0, shift_y=0): """Return greyscale morphological erosion of an image. Morphological erosion sets a pixel at (i,j) to the minimum over all pixels in the neighborhood centered at (i,j). Erosion shrinks bright regions and enlarges dark regions. Parameters ---------- image : ndarray Image array. selem : ndarray The neighborhood expressed as a 2-D array of 1's and 0's. out : ndarray The array to store the result of the morphology. If None is passed, a new array will be allocated. shift_x, shift_y : bool shift structuring element about center point. This only affects eccentric structuring elements (i.e. selem with even numbered sides). Returns ------- eroded : uint8 array The result of the morphological erosion. """ rows = image.shape[0] cols = image.shape[1] srows = selem.shape[0] scols = selem.shape[1] centre_r = int(selem.shape[0] / 2) - shift_y centre_c = int(selem.shape[1] / 2) - shift_x image = np.ascontiguousarray(image) if out is None: out = np.zeros((rows, cols), dtype=np.uint8) selem_num = np.sum(np.asarray(selem) != 0) sr = np.empty(selem_num, dtype=np.intp) sc = np.empty(selem_num, dtype=np.intp) s = 0 for r in range(srows): for c in range(scols): if selem[r, c] != 0: sr[s] = r - centre_r sc[s] = c - centre_c s += 1 for r in range(rows): for c in range(cols): local_min = 255 for s in range(selem_num): rr = r + sr[s] cc = c + sc[s] if 0 <= rr < rows and 0 <= cc < cols: value = image[rr, cc] if value < local_min: local_min = value out[r, c] = local_min return np.asarray(out) pythran-0.10.0+ds2/pythran/tests/scipy/000077500000000000000000000000001416264035500177255ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/tests/scipy/_calc_binned_statistic.py000066400000000000000000000043451416264035500247540ustar00rootroot00000000000000import numpy as np import builtins #pythran export _create_binned_data(int64[:], int[:], float[:,:], int) #pythran export _create_binned_data(int64[:], int[:], int[:,:], int) #pythran export _create_binned_data(intc[:], int[:], float[:,:], int) #pythran export _create_binned_data(intc[:], int[:], int[:,:], int) #pythran export _create_binned_data(int[:], int[:], float[:,:], int) #pythran export _create_binned_data(int[:], int[:], int[:,:], int) def _create_binned_data(bin_numbers, unique_bin_numbers, values, vv): """ Create hashmap of bin ids to values in bins key: bin number value: list of binned data """ bin_map = dict() for i in unique_bin_numbers: bin_map[i] = [] for i in builtins.range(len(bin_numbers)): bin_map[bin_numbers[i]].append(values[vv, i]) return bin_map #pythran export _calc_binned_statistic(int, int64[:], float[:,:], float[:,:], str) #pythran export _calc_binned_statistic(int, int64[:], float[:,:], int[:,:], str) #pythran export _calc_binned_statistic(int, intc[:], float[:,:], float[:,:], str) #pythran export _calc_binned_statistic(int, intc[:], float[:,:], int[:,:], str) #pythran export _calc_binned_statistic(int, int[:], float[:,:], float[:,:], str) #pythran export _calc_binned_statistic(int, int[:], float[:,:], int[:,:], str) def _calc_binned_statistic(Vdim, bin_numbers, result, values, stat_func): unique_bin_numbers = np.unique(bin_numbers) for vv in builtins.range(Vdim): bin_map = _create_binned_data(bin_numbers, unique_bin_numbers, values, vv) for i in unique_bin_numbers: # if the stat_func is np.std, calc std only when binned data is 2 # or more for speed up. if stat_func == 'std': if len(bin_map) >= 2: result[vv, i] = np.std(np.array(bin_map[i])) elif stat_func == 'median': result[vv, i] = np.median(np.array(bin_map[i])) elif stat_func == 'min': result[vv, i] = np.min(np.array(bin_map[i])) elif stat_func == 'max': result[vv, i] = np.max(np.array(bin_map[i])) else: raise Exception('Exception: {stat_func} is not supported')pythran-0.10.0+ds2/pythran/tests/scipy/_hypotests.py000066400000000000000000000031741416264035500225050ustar00rootroot00000000000000#pythran export _Aij(float[:,:], int, int) #pythran export _Aij(int[:,:], int, int) def _Aij(A, i, j): """Sum of upper-left and lower right blocks of contingency table.""" # See `somersd` References [2] bottom of page 309 return A[:i, :j].sum() + A[i+1:, j+1:].sum() #pythran export _Dij(float[:,:], int, int) #pythran export _Dij(int[:,:], int, int) def _Dij(A, i, j): """Sum of lower-left and upper-right blocks of contingency table.""" # See `somersd` References [2] bottom of page 309 return A[i+1:, :j].sum() + A[:i, j+1:].sum() #pythran export _P(float[:,:]) #pythran export _P(int[:,:]) def _P(A): """Twice the number of concordant pairs, excluding ties.""" # See `somersd` References [2] bottom of page 309 m, n = A.shape count = 0 for i in range(m): for j in range(n): count += A[i, j]*_Aij(A, i, j) return count #pythran export _Q(float[:,:]) #pythran export _Q(int[:,:]) def _Q(A): """Twice the number of discordant pairs, excluding ties.""" # See `somersd` References [2] bottom of page 309 m, n = A.shape count = 0 for i in range(m): for j in range(n): count += A[i, j]*_Dij(A, i, j) return count #pythran export _a_ij_Aij_Dij2(float[:,:]) #pythran export _a_ij_Aij_Dij2(int[:,:]) def _a_ij_Aij_Dij2(A): """A term that appears in the ASE of Kendall's tau and Somers' D.""" # See `somersd` References [2] section 4: Modified ASEs to test the null hypothesis... m, n = A.shape count = 0 for i in range(m): for j in range(n): count += A[i, j]*(_Aij(A, i, j) - _Dij(A, i, j))**2 return countpythran-0.10.0+ds2/pythran/tests/scipy/_rbfinterp.py000066400000000000000000000141361416264035500224360ustar00rootroot00000000000000import numpy as np def linear(r): return -r def thin_plate_spline(r): if r == 0: return 0.0 else: return r**2*np.log(r) def cubic(r): return r**3 def quintic(r): return -r**5 def multiquadric(r): return -np.sqrt(r**2 + 1) def inverse_multiquadric(r): return 1/np.sqrt(r**2 + 1) def inverse_quadratic(r): return 1/(r**2 + 1) def gaussian(r): return np.exp(-r**2) NAME_TO_FUNC = { "linear": linear, "thin_plate_spline": thin_plate_spline, "cubic": cubic, "quintic": quintic, "multiquadric": multiquadric, "inverse_multiquadric": inverse_multiquadric, "inverse_quadratic": inverse_quadratic, "gaussian": gaussian } def kernel_vector(x, y, kernel_func, out): """Evaluate RBFs, with centers at `y`, at the point `x`.""" for i in range(y.shape[0]): out[i] = kernel_func(np.linalg.norm(x - y[i])) def polynomial_vector(x, powers, out): """Evaluate monomials, with exponents from `powers`, at the point `x`.""" for i in range(powers.shape[0]): out[i] = np.prod(x**powers[i]) def kernel_matrix(x, kernel_func, out): """Evaluate RBFs, with centers at `x`, at `x`.""" for i in range(x.shape[0]): for j in range(i+1): out[i, j] = kernel_func(np.linalg.norm(x[i] - x[j])) out[j, i] = out[i, j] def polynomial_matrix(x, powers, out): """Evaluate monomials, with exponents from `powers`, at `x`.""" for i in range(x.shape[0]): for j in range(powers.shape[0]): out[i, j] = np.prod(x[i]**powers[j]) # pythran export _kernel_matrix(float[:, :], str) def _kernel_matrix(x, kernel): """Return RBFs, with centers at `x`, evaluated at `x`.""" out = np.empty((x.shape[0], x.shape[0]), dtype=float) kernel_func = NAME_TO_FUNC[kernel] kernel_matrix(x, kernel_func, out) return out # pythran export _polynomial_matrix(float[:, :], int[:, :]) def _polynomial_matrix(x, powers): """Return monomials, with exponents from `powers`, evaluated at `x`.""" out = np.empty((x.shape[0], powers.shape[0]), dtype=float) polynomial_matrix(x, powers, out) return out # pythran export _build_system(float[:, :], # float[:, :], # float[:], # str, # float, # int[:, :]) def _build_system(y, d, smoothing, kernel, epsilon, powers): """Build the system used to solve for the RBF interpolant coefficients. Parameters ---------- y : (P, N) float ndarray Data point coordinates. d : (P, S) float ndarray Data values at `y`. smoothing : (P,) float ndarray Smoothing parameter for each data point. kernel : str Name of the RBF. epsilon : float Shape parameter. powers : (R, N) int ndarray The exponents for each monomial in the polynomial. Returns ------- lhs : (P + R, P + R) float ndarray Left-hand side matrix. rhs : (P + R, S) float ndarray Right-hand side matrix. shift : (N,) float ndarray Domain shift used to create the polynomial matrix. scale : (N,) float ndarray Domain scaling used to create the polynomial matrix. """ p = d.shape[0] s = d.shape[1] r = powers.shape[0] kernel_func = NAME_TO_FUNC[kernel] # Shift and scale the polynomial domain to be between -1 and 1 mins = np.min(y, axis=0) maxs = np.max(y, axis=0) shift = (maxs + mins)/2 scale = (maxs - mins)/2 # The scale may be zero if there is a single point or all the points have # the same value for some dimension. Avoid division by zero by replacing # zeros with ones. scale[scale == 0.0] = 1.0 yeps = y*epsilon yhat = (y - shift)/scale # Transpose to make the array fortran contiguous. This is required for # dgesv to not make a copy of lhs. lhs = np.empty((p + r, p + r), dtype=float).T kernel_matrix(yeps, kernel_func, lhs[:p, :p]) polynomial_matrix(yhat, powers, lhs[:p, p:]) lhs[p:, :p] = lhs[:p, p:].T lhs[p:, p:] = 0.0 for i in range(p): lhs[i, i] += smoothing[i] # Transpose to make the array fortran contiguous. rhs = np.empty((s, p + r), dtype=float).T rhs[:p] = d rhs[p:] = 0.0 return lhs, rhs, shift, scale # pythran export _evaluate(float[:, :], # float[:, :], # str, # float, # int[:, :], # float[:], # float[:], # float[:, :]) def _evaluate(x, y, kernel, epsilon, powers, shift, scale, coeffs): """Evaluate the RBF interpolant at `x`. Parameters ---------- x : (Q, N) float ndarray Interpolation point coordinates. y : (P, N) float ndarray Data point coordinates. kernel : str Name of the RBF. epsilon : float Shape parameter. powers : (R, N) int ndarray The exponents for each monomial in the polynomial. shift : (N,) float ndarray Shifts the polynomial domain for numerical stability. scale : (N,) float ndarray Scales the polynomial domain for numerical stability. coeffs : (P + R, S) float ndarray Coefficients for each RBF and monomial. Returns ------- (Q, S) float ndarray """ q = x.shape[0] p = y.shape[0] r = powers.shape[0] s = coeffs.shape[1] kernel_func = NAME_TO_FUNC[kernel] yeps = y*epsilon xeps = x*epsilon xhat = (x - shift)/scale out = np.zeros((q, s), dtype=float) vec = np.empty((p + r,), dtype=float) for i in range(q): kernel_vector(xeps[i], yeps, kernel_func, vec[:p]) polynomial_vector(xhat[i], powers, vec[p:]) # Compute the dot product between coeffs and vec. Do not use np.dot # because that introduces build complications with BLAS (see # https://github.com/serge-sans-paille/pythran/issues/1346) for j in range(s): for k in range(p + r): out[i, j] += coeffs[k, j]*vec[k] return out pythran-0.10.0+ds2/pythran/tests/scipy/_spectral.py000066400000000000000000000040271416264035500222560ustar00rootroot00000000000000# Author: Pim Schellart # 2010 - 2011 """Tools for spectral analysis of unequally sampled signals.""" import numpy as np #pythran export _lombscargle(float64[], float64[], float64[]) ##runas import numpy; x = numpy.arange(2., 12.); y = numpy.arange(1., 11.); z = numpy.arange(3., 13.); _lombscargle(x, y, z) def _lombscargle(x, y, freqs): """ _lombscargle(x, y, freqs) Computes the Lomb-Scargle periodogram. Parameters ---------- x : array_like Sample times. y : array_like Measurement values (must be registered so the mean is zero). freqs : array_like Angular frequencies for output periodogram. Returns ------- pgram : array_like Lomb-Scargle periodogram. Raises ------ ValueError If the input arrays `x` and `y` do not have the same shape. See also -------- lombscargle """ # Check input sizes if x.shape != y.shape: raise ValueError("Input arrays do not have the same size.") # Create empty array for output periodogram pgram = np.empty_like(freqs) c = np.empty_like(x) s = np.empty_like(x) for i in range(freqs.shape[0]): xc = 0. xs = 0. cc = 0. ss = 0. cs = 0. c[:] = np.cos(freqs[i] * x) s[:] = np.sin(freqs[i] * x) for j in range(x.shape[0]): xc += y[j] * c[j] xs += y[j] * s[j] cc += c[j] * c[j] ss += s[j] * s[j] cs += c[j] * s[j] if freqs[i] == 0: raise ZeroDivisionError() tau = np.arctan2(2 * cs, cc - ss) / (2 * freqs[i]) c_tau = np.cos(freqs[i] * tau) s_tau = np.sin(freqs[i] * tau) c_tau2 = c_tau * c_tau s_tau2 = s_tau * s_tau cs_tau = 2 * c_tau * s_tau pgram[i] = 0.5 * (((c_tau * xc + s_tau * xs)**2 / \ (c_tau2 * cc + cs_tau * cs + s_tau2 * ss)) + \ ((c_tau * xs - s_tau * xc)**2 / \ (c_tau2 * ss - cs_tau * cs + s_tau2 * cc))) return pgram pythran-0.10.0+ds2/pythran/tests/scipy/_sum_abs_axis0.py000066400000000000000000000004651416264035500232000ustar00rootroot00000000000000import numpy as np #pythran export _sum_abs_axis0(float[:,:]) def _sum_abs_axis0(X): block_size = 2**20 r = None for j in range(0, X.shape[0], block_size): y = np.sum(np.abs(X[j:j+block_size]), axis=0) if r is None: r = y else: r += y return r pythran-0.10.0+ds2/pythran/tests/scipy/hausdorff.py000066400000000000000000000054421416264035500222650ustar00rootroot00000000000000# # Copyright (C) Tyler Reddy, Richard Gowers, and Max Linke, 2016 # # Distributed under the same BSD license as Scipy. # # adapted from scipy's cython version import numpy as np import numpy.random as random #pythran export directed_hausdorff(float64[:,:], float64[:,:], int) #pythran export directed_hausdorff_noshuffle(float64[:,:], float64[:,:]) #runas import numpy as np; x = np.arange((100 * 100.)).reshape(100,-1); y = np.ones((100,100)) * 3; directed_hausdorff_noshuffle(x, y) def directed_hausdorff(ar1, ar2, seed=0): N1, data_dims = ar1.shape N2 = ar2.shape[0] i_store = j_store = i_ret = j_ret = 0 # shuffling the points in each array generally increases the likelihood of # an advantageous break in the inner search loop and never decreases the # performance of the algorithm random.seed(seed) resort1 = np.arange(N1) resort2 = np.arange(N2) random.shuffle(resort1) random.shuffle(resort2) ar1 = np.asarray(ar1)[resort1] ar2 = np.asarray(ar2)[resort2] cmax = 0 for i in range(N1): cmin = np.inf for j in range(N2): d = np.sum((ar1[i] - ar2[j]) ** 2) # faster performance with square of distance # avoid sqrt until very end if d < cmax: # break out of `for j` loop break if d < cmin: # always true on first iteration of for-j loop cmin = d i_store = i j_store = j else: # always true on first iteration of for-j loop, after that only # if d >= cmax if cmin != np.inf and cmin > cmax: cmax = cmin i_ret = i_store j_ret = j_store return np.sqrt(cmax), resort1[i_ret], resort2[j_ret] def directed_hausdorff_noshuffle(ar1, ar2, seed=0): N1, data_dims = ar1.shape N2 = ar2.shape[0] i_store = j_store = i_ret = j_ret = 0 resort1 = np.arange(N1) resort2 = np.arange(N2) ar1 = np.asarray(ar1)[resort1] ar2 = np.asarray(ar2)[resort2] cmax = 0 for i in range(N1): cmin = np.inf for j in range(N2): d = np.sum((ar1[i] - ar2[j]) ** 2) # faster performance with square of distance # avoid sqrt until very end if d < cmax: # break out of `for j` loop break if d < cmin: # always true on first iteration of for-j loop cmin = d i_store = i j_store = j else: # always true on first iteration of for-j loop, after that only # if d >= cmax if cmin != np.inf and cmin > cmax: cmax = cmin i_ret = i_store j_ret = j_store return np.sqrt(cmax), resort1[i_ret], resort2[j_ret] pythran-0.10.0+ds2/pythran/tests/scipy/max_len_seq_inner.py000066400000000000000000000015051416264035500237660ustar00rootroot00000000000000import numpy as np #pythran export max_len_seq_inner(int64 [], int8[], int, int, int8[]) #runas import numpy as np; x = np.arange(10, dtype=np.int64); y = np.arange(10, dtype=np.int8); z = np.arange(10, dtype=np.int8); max_len_seq_inner(x, y, 10, 2, z) def max_len_seq_inner(taps, state, nbits, length, seq): # Here we compute MLS using a shift register, indexed using a ring buffer # technique (faster than using something like np.roll to shift) n_taps = taps.shape[0] idx = 0 for i in range(length): feedback = state[idx] seq[i] = feedback for tap in taps: feedback ^= state[(tap + idx) % nbits] state[idx] = feedback idx = (idx + 1) % nbits # state must be rolled s.t. next run, when idx==0, it's in the right place return np.roll(state, -idx, axis=0) pythran-0.10.0+ds2/pythran/tests/scipy/solve_toeplitz.py000066400000000000000000000067321416264035500233710ustar00rootroot00000000000000# Author: Robert T. McGibbon, December 2014 from numpy import zeros, asarray, complex128, float64, zeros_like #pythran export levinson(float64[], float64[]) #pythran export levinson(complex128[], complex128[]) #runas import numpy as np; x = np.arange(1, 4.)*7; y = np.arange(-11., -9.) def levinson(a, b): """Solve a linear Toeplitz system using Levinson recursion. Parameters ---------- a : array, dtype=double or complex128, shape=(2n-1,) The first column of the matrix in reverse order (without the diagonal) followed by the first (see below) b : array, dtype=double or complex128, shape=(n,) The right hand side vector. Both a and b must have the same type (double or complex128). Notes ----- For example, the 5x5 toeplitz matrix below should be represented as the linear array ``a`` on the right :: [ a0 a1 a2 a3 a4 ] [ a-1 a0 a1 a2 a3 ] [ a-2 a-1 a0 a1 a2 ] -> [a-4 a-3 a-2 a-1 a0 a1 a2 a3 a4] [ a-3 a-2 a-1 a0 a1 ] [ a-4 a-3 a-2 a-1 a0 ] Returns ------- x : arrray, shape=(n,) The solution vector reflection_coeff : array, shape=(n+1,) Toeplitz reflection coefficients. When a is symmetric Toeplitz and ``b`` is ``a[n:]``, as in the solution of autoregressive systems, then ``reflection_coeff`` also correspond to the partial autocorrelation function. """ # Adapted from toeplitz.f90 by Alan Miller, accessed at # http://jblevins.org/mirror/amiller/toeplitz.f90 # Released under a Public domain declaration. n = b.shape[0] x = zeros_like(b) # result g = zeros_like(b) # workspace h = zeros_like(b) # workspace reflection_coeff = zeros(n+1, dtype=b.dtype) # history assert len(a) == (2*n) - 1 if a[n-1] == 0: raise ValueError('Singular principal minor') x[0] = b[0] / a[n-1] reflection_coeff[0] = 1 reflection_coeff[1] = x[0] if (n == 1): return asarray(x), asarray(reflection_coeff) g[0] = a[n-2] / a[n-1] h[0] = a[n] / a[n-1] for m in range(1, n): # Compute numerator and denominator of x[m] x_num = -b[m] x_den = -a[n-1] for j in range(m): nmj = n + m - (j+1) x_num = x_num + a[nmj] * x[j] x_den = x_den + a[nmj] * g[m-j-1] if x_den == 0: raise ValueError('Singular principal minor') x[m] = x_num / x_den reflection_coeff[m+1] = x[m] # Compute x for j in range(m): x[j] = x[j] - x[m] * g[m-j-1] if m == n-1: return asarray(x), asarray(reflection_coeff) # Compute the numerator and denominator of g[m] and h[m] g_num = -a[n-m-2] h_num = -a[n+m] g_den = -a[n-1] for j in range(m): g_num = g_num + a[n+j-m-1] * g[j] h_num = h_num + a[n+m-j-1] * h[j] g_den = g_den + a[n+j-m-1] * h[m-j-1] if g_den == 0.0: raise ValueError("Singular principal minor") # Compute g and h g[m] = g_num / g_den h[m] = h_num / x_den k = m - 1 m2 = (m + 1) >> 1 c1 = g[m] c2 = h[m] for j in range(m2): gj = g[j] gk = g[k] hj = h[j] hk = h[k] g[j] = gj - (c1 * hk) g[k] = gk - (c1 * hj) h[j] = hj - (c2 * gk) h[k] = hk - (c2 * gj) k -= 1 pythran-0.10.0+ds2/pythran/tests/scipy/spectral.py000066400000000000000000000032351416264035500221170ustar00rootroot00000000000000# Author: Pim Schellart # 2010 - 2011 """Tools for spectral analysis of unequally sampled signals.""" import numpy as np #pythran export lombscargle(float64[], float64[], float64[]) #runas import numpy; x = numpy.arange(2., 12.); y = numpy.arange(1., 11.); z = numpy.arange(3., 13.); lombscargle(x, y, z) def lombscargle(x, y, freqs): """ _lombscargle(x, y, freqs) Computes the Lomb-Scargle periodogram. Parameters ---------- x : array_like Sample times. y : array_like Measurement values (must be registered so the mean is zero). freqs : array_like Angular frequencies for output periodogram. Returns ------- pgram : array_like Lomb-Scargle periodogram. Raises ------ ValueError If the input arrays `x` and `y` do not have the same shape. See also -------- lombscargle """ # Check input sizes if x.shape != y.shape: raise ValueError("Input arrays do not have the same size.") # Local variables c = np.cos(freqs[:, None] * x) s = np.sin(freqs[:, None] * x) xc = np.sum(y * c, axis=1) xs = np.sum(y * s, axis=1) cc = np.sum(c ** 2, axis=1) ss = np.sum(s * s, axis=1) cs = np.sum(c * s, axis=1) tau = np.arctan2(2 * cs, cc - ss) / (2 * freqs) c_tau = np.cos(freqs * tau) s_tau = np.sin(freqs * tau) c_tau2 = c_tau * c_tau s_tau2 = s_tau * s_tau cs_tau = 2 * c_tau * s_tau pgram = 0.5 * (((c_tau * xc + s_tau * xs)**2 / \ (c_tau2 * cc + cs_tau * cs + s_tau2 * ss)) + \ ((c_tau * xs - s_tau * xc)**2 / \ (c_tau2 * ss - cs_tau * cs + s_tau2 * cc))) return pgram pythran-0.10.0+ds2/pythran/tests/test_advanced.py000066400000000000000000000405541416264035500217640ustar00rootroot00000000000000#encoding: utf8 from pythran.tests import TestEnv from unittest import skip, skipIf import numpy from pythran.typing import * class TestAdvanced(TestEnv): def test_generator_enumeration(self): code = ''' def dummy_generator(l): for i in l: yield i def generator_enumeration(begin, end): return [i for i in enumerate(dummy_generator(range(begin,end)))]''' self.run_test(code, 2, 10, generator_enumeration=[int, int]) def test_augassign_floordiv(self): self.run_test("def augassign_floordiv(i,j): k=i ; k//=j; return k", 2, 5, augassign_floordiv=[int, int]) def test_builtin_constructors(self): self.run_test("def builtin_constructors(l): return list(map(int,l))", [1.5, 2.5], builtin_constructors=[List[float]]) def test_tuple_sum(self): self.run_test("def tuple_sum(tpl): return sum(tpl)", (1, 2, 3.5), tuple_sum=[Tuple[int, int, float]]) def test_minus_unary_minus(self): self.run_test("def minus_unary_minus(a): return a - -1", 1, minus_unary_minus=[int]) def test_bool_op_casting(self): self.run_test(''' def bool_op_casting(): l=[] L=[1] M=[2] if (l and L) or M: return (l and L) or M else: return M''', bool_op_casting=[]) def test_map_on_generator(self): self.run_test('def map_on_generator(l): return list(map(float,(x*x for x in l)))', [1,2,3], map_on_generator=[List[int]]) def test_map2_on_generator(self): self.run_test('def map2_on_generator(l): return list(map(lambda x,y : x*y, l, (y for x in l for y in l if x < 1)))', [0,1,2,3], map2_on_generator=[List[int]]) def test_enumerate_on_generator(self): self.run_test("def enumerate_on_generator(n): return list(map(lambda z: z[0], enumerate((y for x in range(n) for y in range(x)))))", 5, enumerate_on_generator=[int]) def test_enumerate_iterate(self): self.run_test(""" def enumerate_iterate(n): s = 0 for x in enumerate(n): for y in x: s += y return s""", [5, 6], enumerate_iterate=[List[int]]) def test_max_interface_arity(self): self.run_test('def max_interface_arity({0}):pass'.format(', '.join('_'+str(i) for i in range(42))), *list(range(42)), max_interface_arity=[int]*42) def test_max_kwonly_key(self): self.run_test('def max_kwonly_key(x): return max(x, key=lambda x:-x)', list(range(42)), max_kwonly_key=[List[int]]) def test_multiple_max(self): self.run_test('def multiple_max(i,j,k): return max(i,j,k)', 1, 1.5, False, multiple_max=[int, float, bool]) def test_zip_on_generator(self): self.run_test('def zip_on_generator(n): return list(zip((i for i in range(n)), (i*2 for i in range(1,n+1))))', 5, zip_on_generator=[int]) def test_parallel_enumerate(self): self.run_test('def parallel_enumerate(l):\n k = [0]*(len(l) + 1)\n "omp parallel for"\n for i,j in enumerate(l):\n k[i+1] = j\n return k', list(range(1000)), parallel_enumerate=[List[int]]) def test_ultra_nested_functions(self): code = ''' def ultra_nested_function(n): def foo(y): def bar(t): return t return bar(y) return foo(n)''' self.run_test(code, 42, ultra_nested_function=[int]) def test_generator_sum(self): code = ''' def generator_sum(l0,l1): return sum(x*y for x,y in zip(l0,l1))''' self.run_test(code, list(range(10)), list(range(10)), generator_sum=[List[int],List[int]]) def test_tuple_to_list(self): self.run_test('def tuple_to_list(t): return list(t)', (1,2,3), tuple_to_list=[Tuple[int, int, int]]) def test_in_generator(self): self.run_test("def in_generator(n):return 1. in (i*i for i in range(n))", 5, in_generator=[int]) def test_tuple_unpacking_in_generator(self): code = ''' def foo(l): a, b = 1,0 yield a yield b def tuple_unpacking_in_generator(n): f = foo(range(n)) return 0 in f''' self.run_test(code, 10, tuple_unpacking_in_generator=[int]) def test_loop_tuple_unpacking_in_generator(self): code= ''' def foo(l): for i,j in enumerate(l): yield i,j def loop_tuple_unpacking_in_generator(n): f = foo(range(n)) return (0,0) in f''' self.run_test(code, 10, loop_tuple_unpacking_in_generator=[int]) def test_assign_in_except(self): code = ''' def assign_in_except(): try: a=1 except: a+=a return a''' self.run_test(code, assign_in_except=[]) def test_combiner_on_empty_list(self): code = ''' def b(l): l+=[1] return l def combiner_on_empty_list(): return b(list()) + b([])''' self.run_test(code, combiner_on_empty_list=[]) def test_dict_comprehension_with_tuple(self): self.run_test('def dict_comprehension_with_tuple(n): return { x:y for x,y in zip(range(n), range(1+n)) }', 10, dict_comprehension_with_tuple=[int]) def test_nested_comprehension_with_tuple(self): self.run_test('def nested_comprehension_with_tuple(l): return [[ x+y for x,y in sqrpoints ] for sqrpoints in l]', [[(x,x)]*5 for x in list(range(10))], nested_comprehension_with_tuple=[List[List[Tuple[int,int]]]]) def test_hashable_tuple(self): self.run_test('def hashable_tuple(): return { (1,"e", 2.5) : "r" }', hashable_tuple=[]) def test_conflicting_names(self): self.run_test('def map(): return 5', map=[]) def test_multiple_compares(self): self.run_test('def multiple_compares(x): return 1 < x < 2, 1 < x + 1 < 2', 0.5, multiple_compares=[float]) def test_default_arg0(self): self.run_test('def default_arg0(n=12): return n', default_arg0=[]) def test_default_arg1(self): self.run_test('def default_arg1(m,n=12): return m+n', 1, default_arg1=[int]) def test_default_arg2(self): self.run_test('def default_arg2(n=12): return n', 1, default_arg2=[int]) def test_default_arg3(self): self.run_test('def default_arg3(m,n=12): return m+n', 1, 2, default_arg3=[int,int]) def test_default_arg4(self): code = ''' import numpy as np def default_arg4(signal,sR): N = 30 F = 0. F2 = 22000 FF = 10 W = test2(sR, FF, N, F, F2) return W def test2(sr,N,M=128,F=0.0,F2=0,B=False,No=1): W = np.zeros(10) return W''' self.run_test(code, 1, 2, default_arg4=[int,int]) def test_default_arg5(self): self.run_test('import numpy\ndef default_arg5(m,n=-numpy.inf): return m, n', 1, default_arg5=[int]) def test_default_arg6(self): code = 'from numpy import empty\ndef default_arg6(x=empty(3)):pass' with self.assertRaises(SyntaxError): self.run_test(code, default_arg6=[]) @skip("lists as zeros parameter are not supported") def test_list_as_zeros_parameter(self): self.run_test('def list_as_zeros_parameter(n): from numpy import zeros ; return zeros([n,n])', 3, list_as_zeros_parameter=[int]) def test_add_arrays(self): self.run_test('def add_arrays(s): return (s,s) + (s,)', 1, add_arrays=[int]) def test_tuple_to_tuple(self): self.run_test('def tuple_to_tuple(t): return tuple((1, t))', '2', tuple_to_tuple=[str]) def test_array_to_tuple(self): self.run_test('def array_to_tuple(t): return tuple((1, t))', 2, array_to_tuple=[int]) def test_list_to_tuple(self): self.run_test('def list_to_tuple(t): return tuple([1, t])', 2, list_to_tuple=[int]) def test_tuple_to_shape(self): self.run_test('def tuple_to_shape(n): from numpy import zeros; return zeros((n,4))', 5, tuple_to_shape=[int]) def test_print_intrinsic(self): self.run_test('def print_intrinsic(): print(len)', print_intrinsic=[]) def test_function_redefinition(self): code = 'def function_redefinition(x):pass\ndef function_redefinition():pass' with self.assertRaises(SyntaxError): self.run_test(code, function_redefinition=[]) def test_global_redefinition(self): code = 'foo=0\nfoo=1\ndef global_redefinition(x):pass' with self.assertRaises(SyntaxError): self.run_test(code, global_redefinition=[]) def test_global_update(self): code = 'foo=[]\ndef global_update(x): x.append(1)' with self.assertRaises(SyntaxError): self.run_test(code, global_update=[]) def test_invalid_call0(self): code = 'def foo(x):pass\ndef invalid_call0(): return foo()' with self.assertRaises(SyntaxError): self.run_test(code, invalid_call0=[]) def test_invalid_call1(self): code = 'def foo(x=1):pass\ndef invalid_call1(l): return foo(l,l)' with self.assertRaises(SyntaxError): self.run_test(code, 1, invalid_call1=[int]) def test_invalid_call2(self): code = 'def foo(x):pass\ndef bar():pass\ndef invalid_call2(l): return (foo if l else bar)(l)' with self.assertRaises(SyntaxError): self.run_test(code, 1, invalid_call2=[int]) def test_ellipsis(self): code = 'def ellipsis_(x): return x[...,1]' with self.assertRaises(SyntaxError): self.run_test(code, numpy.ones((3,3)), ellipsis=[NDArray[float,:,:]]) def test_multiple_lambda(self): code = ''' def multiple_lambda(x): g = lambda : x return foo(g) def foo(t): g = lambda : 1 return t() + g() ''' self.run_test(code, 1, multiple_lambda=[int]) def test_function_with_non_ascii_docstring(self): code = ''' def function_with_non_ascii_docstring(): 'éàea' ''' self.run_test(code, function_with_non_ascii_docstring=[]) def test_matmul_operator(self): code = 'def matmul_operator(x, y): return x @ y' self.run_test( code, numpy.array([[1., 1.], [2., 2.]]), numpy.array([[0., 2.], [1., 3.]]), matmul_operator=[NDArray[float, :,:], NDArray[float, :,:]]) def test_generator_handler_name(self): code = ''' def foo(x): for i in range(x): if i > 1: break yield i def generator_handler_name(n): return list(foo(n))''' self.run_test(code, 3, generator_handler_name=[int]) def test_generator_handler_name2(self): code = ''' def foo(x): for i in ["1"] * x: if len(i) == 1: break yield i def generator_handler_name2(n): return list(foo(n))''' self.run_test(code, 3, generator_handler_name2=[int]) def test_builtin_slices(self): code = ''' def builtin_slices(x): s = slice(2, None, None) return (s.start, s.stop, s.step, s, x[s], x[slice(3)], x[slice(1,2)], x[slice(1,10,2)], x[slice(3, None)], x[slice(None,4)], x[slice(None,4, None)])''' self.run_test(code, numpy.arange(15), builtin_slices=[NDArray[int,:]]) def test_slicing_tuple(self): code = ''' def testFunc(): x=2 y=3 z=4 return x,y,z def slicing_tuple(n): x,y = testFunc()[0:n] return x,y''' self.run_test(code, 2, slicing_tuple=[int]) def test_static_list0(self): code = ''' def static_list0(n): s = list(n) s[1] = 1 return tuple(s)''' self.run_test(code, (2, 2), static_list0=[Tuple[int, int]]) def test_static_list1(self): code = ''' def foo(x, y): return len(y) + x def static_list1(n): s = list(n) s[1] = foo(len(s), s) return tuple(s)''' self.run_test(code, (2, 2), static_list1=[Tuple[int, int]]) def test_static_list2(self): code = ''' def static_list2(t0, t1): s = [slice(x, y) for x,y in zip(t0, t1)] return tuple(s)''' self.run_test(code, (2, 2), (3,3), static_list2=[Tuple[int, int], Tuple[int, int]]) def test_static_list3(self): code = ''' import numpy as np def StridedSlice(x,begins, ends, strides): slices = tuple([slice(b, e if e else None, s) for b, e, s in zip(begins,ends, strides)]) return x[slices] def static_list3(x): return StridedSlice(x,[0,2,3], [5,0,7], [1,1,1])''' self.run_test(code, numpy.arange(1000).reshape(10,10,10), static_list3=[NDArray[int, :,:,:]]) def test_static_list4(self): code = ''' import numpy as np def StridedSlice(x,begins, ends, strides): slices = tuple([slice(b, e if e else None, s) for b, e, s in zip(begins,ends, strides)]) return x[slices] def static_list4(x): return StridedSlice(x,np.array([0,2,3]), np.array([5,0,7]), [1,1,1])''' self.run_test(code, numpy.arange(1000).reshape(10,10,10), static_list4=[NDArray[int, :,:,:]]) def test_tuple_slicing0(self): code = ''' def tuple_slicing0(n): X = tuple([[1,2,3,4],[1,2,3,4]]) B = X[0:1] B[0][3]=n return B, X''' self.run_test(code, 20, tuple_slicing0=[int]) def test_tuple_slicing1(self): code = ''' def tuple_slicing1(n): X = tuple([[1,2,3,4],[1,2,3,4], [5,6,7,8]]) B = X[0::2] B[0][3]=n return B, X''' self.run_test(code, 20, tuple_slicing1=[int]) def test_reserved_identifier0(self): code = ''' def reserved_identifier0(x): if x == 1: case = 1 else: case = 2 return case''' self.run_test(code, 3, reserved_identifier0=[int]) def test_global_effects_partial0(self): code = ''' g = [1, 2] def return_partial(x): def partial(_): return x return partial def call_partial(fct): return return_partial(fct) all_commands = call_partial(g) def global_effects_partial0(l): return all_commands(l)''' self.run_test(code, 3, global_effects_partial0=[int]) def test_dynamic_tuple_compare(self): code = ''' def dynamic_tuple_compare(x, y): y = tuple(y) x = tuple(x) return x < y, x <= y, x > y, x >= y''' self.run_test(code, [1, 2], [1, 3], dynamic_tuple_compare=[List[int], List[int]]) def test_annotations(self): code = ''' import numpy as np def annotations(x: np.ndarray) -> np.ndarray: return x''' self.run_test(code, numpy.ones(1), annotations=[NDArray[float, :]]) def test_tuple_indexable_container(self): code = """ import numpy as np def A_I(): s = np.array([0.70817816, 0.68863678], dtype=np.float64) m = np.array([-1.11312199, -0.99629629], dtype=np.float64) self = tuple([100, s, m]) return self def A_F1(self, Input_x): Input_x = (Input_x - self[2]) / self[1] return Input_x def B_I(): self = tuple([0.0, 0]) return self def C_I(sRate): a = A_I() b = B_I() self = tuple([0, sRate, 0, a, b]) return self def C_Test(self): F = np.zeros((1, 1, 1)) ret = A_F1(self[3], F.astype(np.float32)) return ret def tuple_indexable_container(n): TD = C_I(n) return C_Test(TD)""" self.run_test(code, 44100, tuple_indexable_container=[int]) pythran-0.10.0+ds2/pythran/tests/test_base.py000066400000000000000000000724511416264035500211320ustar00rootroot00000000000000import numpy import pytest import sys import unittest from pythran.tests import TestEnv from pythran.typing import * class TestBase(TestEnv): def test_pass(self): self.run_test("def pass_(a):pass", 1, pass_=[int]) def test_empty_return(self): self.run_test("def empty_return(a,b,c):return", 1,1.,True, empty_return=[int,float,bool]) def test_identity(self): self.run_test("def identity(a): return a", 1.5, identity=[float]) def test_compare(self): self.run_test("def compare(a,b,c):\n if a < b < c: return a\n else: return b != c", 1,2,3, compare=[int, int, int]) def test_arithmetic(self): self.run_test("def arithmetic(a,b,c): return a+b*c", 1,2,3.3, arithmetic=[int,int, float]) def test_boolop(self): self.run_test("def boolop(a,b,c): return a and b or c", True, True, False, boolop=[bool,bool, bool]) def test_operator(self): self.run_test("def operator_(a,b,c): return (a+b-b*a//(a%b)**(a<>b|b^a&a//b))/c",1,2,3., operator_=[int,int, float]) def test_unaryop(self): self.run_test("def unaryop(a): return not(~(+(-a)))", 1, unaryop=[int]) def test_expression(self): self.run_test("def expression(a,b,c): a+b*c", 1,2,3.3, expression=[int,int, float]) def test_recursion1(self): code=""" def fibo(n): return n if n <2 else fibo(n-1) + fibo(n-2) def fibo2(n): return fibo2(n-1) + fibo2(n-2) if n > 1 else n """ self.run_test(code, 4, fibo=[int]) def test_recursion2(self): code=""" def fibo(n): return n if n <2 else fibo(n-1) + fibo(n-2) def fibo2(n): return fibo2(n-1) + fibo2(n-2) if n > 1 else n """ self.run_test(code, 4., fibo2=[float]) def test_manual_list_comprehension(self): self.run_test("def f(l):\n ll=list()\n for k in l:\n ll+=[k]\n return ll\ndef manual_list_comprehension(l): return f(l)", [1,2,3], manual_list_comprehension=[List[int]]) def test_list_comprehension(self): self.run_test("def list_comprehension(l): return [ x*x for x in l ]", [1,2,3], list_comprehension=[List[int]]) def test_dict_comprehension(self): self.run_test("def dict_comprehension(l): return { i: 1 for i in l if len(i)>1 }", ["1","12","123"], dict_comprehension=[List[str]]) def test_filtered_list_comprehension(self): self.run_test("def filtered_list_comprehension(l): return [ x*x for x in l if x > 1 if x <10]", [1,2,3], filtered_list_comprehension=[List[int]]) def test_multilist_comprehension(self): self.run_test("def multilist_comprehension(l): return [ x*y for x in l for y in l]", [1,2,3], multilist_comprehension=[List[int]]) def test_zipped_list_comprehension(self): self.run_test("def zipped_list_comprehension(l): return [ x*y for x,y in zip(l,l) ]", [1,2,3], zipped_list_comprehension=[List[int]]) def test_zip(self): self.run_test("def zip_(l0,l1): return list(zip(l0,l1))", [1,2,3],["one", "two", "three"], zip_=[List[int], List[str]]) def test_multizip(self): self.run_test("def multizip(l0,l1): return list(zip(l0,zip(l0,l1)))", [1,2,3],["one", "two", "three"], multizip=[List[int], List[str]]) def test_reduce(self): self.run_test("def reduce_(l): from functools import reduce; return reduce(lambda x,y:x+y, l)", [0.,1.1,2.2,3.3], reduce_=[List[float]]) def test_another_reduce(self): code = ''' def another_reduce(l0,l1): from functools import reduce return reduce(lambda x,y:x+y[0]+y[1], zip(l0, l1),0) ''' self.run_test(code, [0.4,1.4,2.4,3.4], [0.,1.1,2.2,3.3], another_reduce=[List[float], List[float]]) def test_sum(self): self.run_test("def sum_(l): return sum(l)", [0.,1.1,2.2,3.3], sum_=[List[float]]) def test_multisum(self): self.run_test("def multisum(l0, l1): return sum(l0) + sum(l1)", [0.,1.1,2.2,3.3],[1,2,3], multisum=[List[float], List[int]]) def test_max(self): self.run_test("def max_(l):return max(l)", [ 1.1, 2.2 ], max_=[List[float]]) def test_multimax(self): self.run_test("def multimax(l,v):return max(v,max(l))", [ 1.1, 2.2 ], 1, multimax=[List[float],int]) def test_min(self): self.run_test("def min_(l):return min(l)", [ 1.1, 2.2 ], min_=[List[float]]) def test_multimin(self): self.run_test("def multimin(l,v):return min(v,min(l))", [ 1.1, 2.2 ], 3, multimin=[List[float],int]) def test_map(self): self.run_test("def map_(l0, l1, v): return list(map(lambda x,y:x*v+y, l0, l1))", [0,1,2], [0.,1.1,2.2], 2, map_=[List[int], List[float], int]) def test_multimap(self): self.run_test("def multimap(l0, l1, v): return list(map(lambda x,y:x*v+y, l0, map(lambda z:z+1,l1)))", [0,1,2], [0.,1.1,2.2], 2, multimap=[List[int], List[float], int]) def test_intrinsic_map(self): self.run_test("def intrinsic_map(l): return list(map(max,l))",[[0,1,2],[2,0,1]], intrinsic_map=[List[List[int]]]) def test_range1(self): self.run_test("def range1_(e): return list(range(e))", 3, range1_=[int]) def test_range2(self): self.run_test("def range2_(b,e): return list(range(b,e))", 1, 3, range2_=[int,int]) def test_range3(self): self.run_test("def range3_(b,e,s): return list(range(b,e,s))", 8,3,-2, range3_=[int,int,int]) def test_range4(self): self.run_test("def range4_(b,e,s): return list(range(b,e,s))", 8,2,-2, range4_=[int,int,int]) def test_range5(self): self.run_test("def range5_(b,e,s): return list(range(b,e,s))", 3,8,1, range5_=[int,int,int]) def test_range6(self): self.run_test("def range6_(b,e,s): return list(range(b,e,s))", 3,8,3, range6_=[int,int,int]) def test_range7(self): self.run_test("def range7_(b,e,s): return list(range(b,e,s))", 3,9,3, range7_=[int,int,int]) def test_rrange1(self): self.run_test("def rrange1_(e): return list(reversed(range(e)))", 3, rrange1_=[int]) def test_rrange2(self): self.run_test("def rrange2_(b,e): return set(reversed(range(b,e)))", 1, 3, rrange2_=[int,int]) def test_rrange3(self): self.run_test("def rrange3_(b,e,s): return list(reversed(range(b,e,s)))", 8,3,-2, rrange3_=[int,int,int]) def test_rrange4(self): self.run_test("def rrange4_(b,e,s): return set(reversed(range(b,e,s)))", 8,2,-2, rrange4_=[int,int,int]) def test_rrange5(self): self.run_test("def rrange5_(b,e,s): return list(reversed(range(b,e,s)))", 3,8,1, rrange5_=[int,int,int]) def test_rrange6(self): self.run_test("def rrange6_(b,e,s): return set(reversed(range(b,e,s)))", 3,8,3, rrange6_=[int,int,int]) def test_rrange7(self): self.run_test("def rrange7_(b,e,s): return list(reversed(range(b,e,s)))", 3,9,3, rrange7_=[int,int,int]) def test_multirange(self): self.run_test("def multirange(i): return list(map(lambda x,y:y*x//2, range(1,i), range(i,1,-1)))", 3, multirange=[int]) def test_xrange1(self): self.run_test("def xrange1_(e): return list(range(e))", 3, xrange1_=[int]) def test_xrange2(self): self.run_test("def xrange2_(b,e): return list(range(b,e))", 1, 3, xrange2_=[int,int]) def test_xrange3(self): self.run_test("def xrange3_(b,e,s): return list(range(b,e,s))", 8,3,-2, xrange3_=[int,int,int]) def test_xrange4(self): self.run_test("def xrange4_(b,e,s): return list(range(b,e,s))", 3,8,1, xrange4_=[int,int,int]) def test_xrange5(self): self.run_test("def xrange5_(e): return max(range(e))", 3, xrange5_=[int]) def test_multixrange(self): self.run_test("def multixrange(i): return list(map(lambda x,y:y*x//2, range(1,i), range(i,1,-1)))", 3, multixrange=[int]) def test_print(self): self.run_test("def print_(a,b,c,d): print(a,b,c,d,'e',1.5)", [1.,2.,3.1],3,True, "d", print_=[List[float], int, bool, str]) def test_print_tuple(self): self.run_test("def print_tuple(a,b,c,d): t = (a,b,c,d,'e',1.5,); print(t)", [1.,2.,3.1],3,True, "d", print_tuple=[List[float], int, bool, str]) def test_fstring_noarg(self): self.run_test("def fstring_noarg(): return f'hello'", fstring_noarg=[]) def test_fstring_1arg(self): self.run_test("def fstring_1arg(a): return f'{a:s}'", "a", fstring_1arg=[str]) def test_fstring(self): self.run_test("def fstring(a,b,c): return f'a: {a: 4d}; b: {b:.2f}; c: {c:s}'", 2, 6.28, "c", fstring=[int, float, str]) def test_assign(self): self.run_test("def assign(a): b=2*a ; return b", 1, assign=[int]) def test_multiassign(self): self.run_test("def multiassign(a):\n c=b=a\n return c", [1], multiassign=[List[int]]) def test_divmax(self): self.run_test("def divmax_(a): b=4.*a;c=b/2;return max(c,b)", 1, divmax_=[int]) @unittest.skip("impossible to handle max(int, float) without conversion") def test_divmax_float(self): self.run_test("def divmax_float_(a): b=2*a;c=b/2;return max(c,b)", 1, divmax_float_=[int]) def test_if(self): self.run_test("def if_(a,b):\n if a>b: return a\n else: return b", 1, 1.1, if_=[int, float]) def test_while(self): self.run_test("def while_(a):\n while(a>0): a-=1\n return a", 8, while_=[int]) def test_for(self): self.run_test("def for_(l):\n s=0\n for i in l:\n s+=i\n return s", [0,1,2], for_=[List[int]]) def test_declarations(self): code = """ def declarations(): if True: a=0 while a <3: b = 1 a = b + a else: a=1 return a + b """ self.run_test(code, declarations=[]) def test_lambda(self): code = """ def lambda_(): l=lambda x,y: x+y return l(1,2) + l(1.2,2) """ self.run_test(code, lambda_=[]) def test_multidef1(self): self.run_test("def def10(): pass\ndef def11(): def10()", def11=[]) def test_multidef2(self): self.run_test("def def21(): def20()\ndef def20(): pass", def21=[]) def test_multidef3(self): self.run_test("def def31(): return 1\ndef def30(): return def31()", def31=[]) def test_multidef4(self): self.run_test("def def41(): return def40()\ndef def40(): return 1", def41=[]) def test_tuple(self): self.run_test("def tuple_(t): return t[0]+t[1]", (0,1), tuple_=[Tuple[int, int]]) def test_nested_list_comprehension(self): self.run_test("def nested_list_comprehension(): return [ [ x+y for x in range(10) ] for y in range(20) ]", nested_list_comprehension=[]) def test_delete(self): self.run_test("def delete_(v): del v", 1, delete_=[int]) def test_continue(self): self.run_test("def continue_():\n for i in range(3):continue\n return i", continue_=[]) def test_break(self): self.run_test("def break_():\n for i in range(3):break\n return i", break_=[]) def test_assert(self): with self.assertRaises(AssertionError): self.run_test("def assert_(i): assert i > 0", -1, assert_=[int]) def test_assert_with_msg(self): self.run_test("def assert_with_msg(i): assert i > 0, 'hell yeah'", 1, assert_with_msg=[int]) def test_import_from(self): self.run_test("def import_from(): from math import cos ; return cos(1.)", import_from=[]) def test_len(self): self.run_test("def len_(i,j,k): return len(i)+len(j)+len(k)", "youpi", [1,2],[], len_=[str,List[int], List[float]]) def test_in_string(self): self.run_test("def in_string(i,j): return i in j", "yo", "youpi", in_string=[str,str]) def test_not_in_string(self): self.run_test("def not_in_string(i,j): return i not in j", "yo", "youpi", not_in_string=[str,str]) def test_in_list(self): self.run_test("def in_list(i,j): return i in j", 1, [1,2,3], in_list=[int,List[int]]) def test_not_in_list(self): self.run_test("def not_in_list(i,j): return i not in j", False, [True, True, True], not_in_list=[bool,List[bool]]) def test_subscript(self): self.run_test("def subscript(l,i): l[0]=l[0]+l[i]", [1], 0, subscript=[List[int], int]) def test_nested_lists(self): self.run_test("def nested_lists(l,i): return l[0][i]", [[1]], 0, nested_lists=[List[List[int]],int]) def test_nested_tuples(self): self.run_test("def nested_tuples(l,i): return l[i][1]", [(0.1,1,)], 0, nested_tuples=[List[Tuple[float,int]],int]) def test_return_empty_list(self): self.run_test("def return_empty_list(): return list()", return_empty_list=[]) def test_empty_list(self): self.run_test("def empty_list(): a=[]", empty_list=[]) def test_multi_list(self): self.run_test("def multi_list(): return [[[2.0],[1,2,3]],[[2.0],[1,2,3]]]", multi_list=[]) def test_empty_tuple(self): self.run_test("def empty_tuple(): a=()", empty_tuple=[]) def test_multi_tuple(self): self.run_test("def multi_tuple(): return (1,('e',2.0),[1,2,3])", multi_tuple=[]) def test_augmented_assign0(self): self.run_test("def augmented_assign0(a):\n a+=1.5\n return a", 12, augmented_assign0=[int]) def test_augmented_assign1(self): self.run_test("def augmented_assign1(a):\n a-=1.5\n return a", 12, augmented_assign1=[int]) def test_augmented_assign2(self): self.run_test("def augmented_assign2(a):\n a*=1.5\n return a", 12, augmented_assign2=[int]) def test_augmented_assign3(self): self.run_test("def augmented_assign3(a):\n a/=1.5\n return a", 12, augmented_assign3=[int]) def test_augmented_assign4(self): self.run_test("def augmented_assign4(a):\n a %= 5\n return a", 12, augmented_assign4=[int]) def test_augmented_assign5(self): self.run_test("def augmented_assign5(a):\n a//=2\n return a", 12, augmented_assign5=[int]) def test_augmented_assign6(self): self.run_test("def augmented_assign6(a):\n a**=5\n return a", 12, augmented_assign6=[int]) def test_augmented_assign7(self): self.run_test("def augmented_assign7(a):\n a<<=1\n return a", 12, augmented_assign7=[int]) def test_augmented_assign8(self): self.run_test("def augmented_assign8(a):\n a>>=1\n return a", 12, augmented_assign8=[int]) def test_augmented_assign9(self): self.run_test("def augmented_assign9(a):\n a^=1\n return a", 12, augmented_assign9=[int]) def test_augmented_assignA(self): self.run_test("def augmented_assignA(a):\n a|=1\n return a", 12, augmented_assignA=[int]) def test_augmented_assignB(self): self.run_test("def augmented_assignB(a):\n a&=1\n return a", 12, augmented_assignB=[int]) def test_augmented_list_assign(self): self.run_test("def augmented_list_assign(l):\n a=list()\n a+=l\n return a", [1,2], augmented_list_assign=[List[int]]) def test_initialization_list(self): self.run_test("def initialization_list(): return [1, 2.3]", initialization_list=[]) def test_multiple_assign(self): self.run_test("def multiple_assign():\n a=0 ; b = a\n a=1.5\n return a, b", multiple_assign=[]) def test_multiple_return1(self): self.run_test("def multiple_return1(a):\n if True:return 1\n else:\n return a", 2, multiple_return1=[int]) def test_multiple_return2(self): self.run_test("def multiple_return2(a):\n if True:return 1\n else:\n b=a\n return b", 2, multiple_return2=[int]) def test_multiple_return3(self): self.run_test("def multiple_return3(a):\n if True:return 1\n else:\n b=a\n return a+b", 2, multiple_return3=[int]) def test_id(self): self.run_test("def id_(a):\n c=a\n return id(a)==id(c)", [1,2,3], id_=[List[int]]) def test_delayed_max(self): self.run_test("def delayed_max(a,b,c):\n m=max\n return m(a,b) + m(b,c)", 1, 2, 3.5, delayed_max=[int, int, float]) def test_slicing(self): self.run_test("def slicing(l): return l[0:1] + l[:-1]",[1,2,3,4], slicing=[List[int]]) def test_not_so_deep_recursive_calls(self): code=""" def a(i): return b(i) def b(i): return b(a(i-1)) if i else i def not_so_deep_recursive_calls(i):return b(i)""" self.run_test(code,3, not_so_deep_recursive_calls=[int]) def test_deep_recursive_calls(self): code=""" def a(i): return a(i-1) + b(i) if i else i def b(i): return b(i-1)+a(i-1) if i else c(i-1) if i+1 else i def c(i): return c(i-1) if i>0 else 1 def deep_recursive_calls(i):a(i)+b(i) +c(i)""" self.run_test(code,3, deep_recursive_calls=[int]) def test_dummy_nested_def(self): code=""" def dummy_nested_def(a): def the_dummy_nested_def(b):return b return the_dummy_nested_def(a)""" self.run_test(code,3, dummy_nested_def=[int]) def test_nested_def(self): code=""" def nested_def(a): def the_nested_def(b):return a+b return the_nested_def(3)""" self.run_test(code,3, nested_def=[int]) def test_none(self): self.run_test("def none_(l):\n if len(l)==0: return\n else: return l", [], none_=[List[int]]) def test_import(self): self.run_test("import math\ndef import_(): return math.cos(1)", import_=[]) def test_local_import(self): self.run_test("def local_import_(): import math;return math.cos(1)", local_import_=[]) def test_abs(self): """ Check builtins.abs behavior with float. """ self.run_test(""" def abs_(a): return abs(a)""", -1.3, abs_=[float]) def test_npabs(self): """ Check builtins.abs behavior with numpy.array. """ self.run_test(""" def npabs_(a): return abs(a)""", numpy.array([-1.3, 2.3, -4]), npabs_=[NDArray[float, :]]) def test_all(self): self.run_test("def all_(a): return all(a)", [True, False, True], all_=[List[bool]]) def test_any(self): self.run_test("def any_(a): return any(a)", [0, 1, 2], any_=[List[int]]) def test_bin(self): self.run_test("def bin_(a): return bin(a)", 54321, bin_=[int]) def test_bin2(self): self.run_test("def bin2_(a): return bin(a)", -543, bin2_=[int]) @pytest.mark.skipif(sys.platform == "win32", reason="Windows uses long of 32-bit, this test assumes they are 64-bit") def test_bin3(self): self.run_test("def bin3_(a): return bin(a)", -sys.maxsize - 1, bin3_=[int]) def test_bin4(self): self.run_test("def bin4_(a): return bin(a)", -1, bin4_=[int]) def test_chr(self): self.run_test("def chr_(a): return chr(a)", 42, chr_=[int]) def test_complex(self): self.run_test("def complex_(a): return complex(a)", 1, complex_=[int]) def test_divmod(self): self.run_test("def divmod_(a,b): return divmod(a,b)", 5, 2, divmod_=[int,int]) def test_enumerate(self): self.run_test("def enumerate_(l): return [ x for x in enumerate(l) ]", ["a","b","c"], enumerate_=[List[str]]) def test_enumerat2(self): self.run_test("def enumerate2_(l): return [ x for x in enumerate(l, 3) ]", ["a","b","c"], enumerate2_=[List[str]]) def test_filter(self): self.run_test("def filter_(l): return list(filter(lambda x:x%2, l))", [1,2,3], filter_=[List[int]]) def test_hex(self): self.run_test("def hex_(a): return hex(a)", 18, hex_=[int]) def test_oct(self): self.run_test("def oct_(a): return oct(a)", 18, oct_=[int]) def test_pow(self): self.run_test("def pow_(a): return pow(a,5)", 18, pow_=[int]) def test_pow_op0(self): self.run_test("def pow_op0(a): return a ** 0, a ** 1, a **2, a ** 3, a ** 4, a ** 5, a ** 6, a** 7", 18, pow_op0=[int]) def test_pow_op1(self): self.run_test("def pow_op1(a): return a ** -0, a ** -1, a **-2, a ** -3, a ** -4, a ** -5, a ** -6, a** -7", 18, pow_op1=[int]) def test_pow_op2(self): self.run_test("def pow_op2(a): return int(a ** a), a ** -a", 7, pow_op2=[int]) def test_reversed(self): self.run_test("def reversed_(l): return [x for x in reversed(l)]", [1,2,3], reversed_=[List[int]]) def test_round(self): self.run_test("def round_(v): return round(v) + round(v,2)", 0.1234, round_=[float]) def test_sorted0(self): self.run_test("def sorted0(l): return [x for x in sorted(l)]", [4, 1,2,3], sorted0=[List[int]]) def test_sorted1(self): self.run_test("def sorted1(l): return [x for x in sorted(l, reverse=True)]", [4, 1,2,3], sorted1=[List[int]]) def test_sorted2(self): self.run_test("def sorted2(l): return [x for x in sorted(l, key=lambda x:-x)]", [4, 1,2,3], sorted2=[List[int]]) def test_sorted3(self): self.run_test("def sorted3(l): return [x for x in sorted(l,reverse=True,key=lambda x:-x)]", [4, 1,2,3], sorted3=[List[int]]) def test_str(self): self.run_test("def str_(l): return str(l)", [1,2,3], str_=[List[int]]) def test_append(self): self.run_test("def append(): l=[] ; l.append(1) ; return l", append=[]) def test_append_in_call(self): self.run_test("def call(l):l.append(1.)\ndef append_in_call(): l=[] ; call(l) ; l.append(1) ; return l", append_in_call=[]) def test_complex_append_in_call(self): code=""" def foo(a,b): i = 3*b if not i in a: a.append(i) def complex_append_in_call(l1,l2): b = [] for x in l1: if not x in l2: foo(b,x)""" self.run_test(code, [1,2,3],[2],complex_append_in_call=[List[int],List[int]]) def test_complex_number(self): code=""" def complex_number(): c=complex(0,1) return c.real + c.imag""" self.run_test(code, complex_number=[]) def test_raise(self): self.run_test("def raise_():\n raise RuntimeError('pof')", raise_=[], check_exception=True) def test_complex_number_serialization(self): self.run_test("def complex_number_serialization(l): return [x+y for x in l for y in l]", [complex(1,0), complex(1,0)], complex_number_serialization=[List[complex]]) def test_complex_conj(self): self.run_test("def complex_conjugate(c): return c.conjugate()", complex(0,1), complex_conjugate=[complex]) def test_cast(self): self.run_test("def cast(i,f): return float(i)+int(f)", 1,1.5, cast=[int, float]) def test_subscript_assignment(self): code=""" def foo(A): A[0]=1.5 def subscript_assignment (): a=list(range(1)) foo(a) return a[0]""" self.run_test(code,subscript_assignment=[]) def test_conflicting_keywords(self): code=""" def export(template): return [ new*new for new in template ]""" self.run_test(code, [1], export=[List[int]]) def test_forelse(self): code=""" def forelse(): l=0 for i in range(10): if i > 3:break for j in range(10): if j > 5:break l+=1 else: l*=2 else: l*=3 return l""" self.run_test(code, forelse=[]) def test_tuples(self): self.run_test("def tuples(n): return ((1,2.,'e') , [ x for x in tuple([1,2,n])] )", 1, tuples=[int]) def test_reversed_slice(self): self.run_test("def reversed_slice(l): return l[::-2]", [0,1,2,3,4], reversed_slice=[List[int]]) def test_shadow_parameters(self): code=""" def shadow_parameters(l): if False:l=None return l""" self.run_test(code, [1], shadow_parameters=[List[int]]) def test_yielder(self): code=""" def iyielder(i): for k in range(i+18): yield k return def yielder(): f=iyielder(1) b=next(f) return [i*i for i in f]""" self.run_test(code, yielder=[]) def test_yield_with_default_param(self): code=""" def foo(a=1000): for i in range(10): yield a def yield_param(): it = foo() return [i for i in it]""" self.run_test(code, yield_param=[]) def test_set(self): code=""" def set_(a,b): S=set() S.add(a) S.add(b) return len(S)""" self.run_test(code, 1,2,set_=[int, int]) def test_in_set(self): code=""" def in_set(a): S=set() S.add(a) return a in S""" self.run_test(code, 1.5, in_set=[float]) def test_return_set(self): self.run_test("def return_set(l): return set(l)", [1,2,3,3], return_set=[List[int]]) def test_import_set(self): self.run_test("def import_set(l): l.add(1) ; return l", {0,2}, import_set=[Set[int]]) def test_raw_set(self): self.run_test("def raw_set(): return { 1, 1., 2 }", raw_set=[]) def test_iter_set(self): self.run_test("def iter_set(s):\n l=0\n for k in s: l+=k\n return l", { 1, 2, 3 } , iter_set=[Set[int]]) def test_set_comprehension(self): self.run_test("def set_comprehension(l): return { i*i for i in l }", [1 , 2, 1, 3], set_comprehension=[List[int]]) def test_slicer(self): code=""" def slicer(l): l[2:5]=[1,2] return l""" self.run_test(code,[1,2,3,4,5,6,7,8,9], slicer=[List[int]]) def test_generator_expression(self): code=""" def generator_expression(l): return sum(x for x in l if x == 1)""" self.run_test(code,[1,1,1,2], generator_expression=[List[int]]) def test_default_parameters(self): code=""" def dp(b,a=1.2): return a def default_parameters(): a=1 c=dp(a) d=dp(5,"yeah") return str(c)+d""" self.run_test(code, default_parameters=[]) def test_import_as(self): code=""" from math import cos as COS def import_as(): x=.42 import math as MATH return MATH.sin(x)**2 + COS(x)**2""" self.run_test(code, import_as=[]) def test_tuple_unpacking(self): self.run_test("def tuple_unpacking(t): a,b = t ; return a, b", (1,"e"), tuple_unpacking=[Tuple[int, str]]) def test_list_unpacking(self): self.run_test("def list_unpacking(t): [a,b] = t ; return a, b", (1,2), list_unpacking=[Tuple[int, int]]) def test_recursive_attr(self): self.run_test("def recursive_attr(): return {1,2,3}.union({1,2}).union({5})", recursive_attr=[]) def test_range_negative_step(self): self.run_test("""def range_negative_step(n): o=[] for i in range(n, 0, -1): o.append(i) return o""", 10, range_negative_step=[int]) def test_reversed_range_negative_step(self): self.run_test("""def reversed_range_negative_step(n): o=[] for i in reversed(range(n, 0, -1)): o.append(i) return o""", 10, reversed_range_negative_step=[int]) def test_update_empty_list(self): self.run_test(''' def update_empty_list(l): p = list() return p + l[:1]''', list(range(5)), update_empty_list=[List[int]]) def test_update_list_with_slice(self): self.run_test(''' def update_list_with_slice(l): p = list() for i in range(10): p += l[:1] return p,i''', list(range(5)), update_list_with_slice=[List[int]]) def test_add_slice_to_list(self): self.run_test(''' def add_slice_to_list(l): p = list() for i in range(10): p = p + l[:1] return p,i''', list(range(5)), add_slice_to_list=[List[int]]) def test_bool_(self): self.run_test("def _bool(d): return bool(d)", 3, _bool=[int]) def test_complex_add(self): self.run_test("def complex_add(): a = 1j ; b = 2 ; return a + b", complex_add=[]) def test_complex_sub(self): self.run_test("def complex_sub(): a = 1j ; b = 2 ; return a - b", complex_sub=[]) def test_complex_mul(self): self.run_test("def complex_mul(): a = 1j ; b = 2 ; return a * b", complex_mul=[]) def test_complex_div(self): self.run_test("def complex_div(): a = 1j ; b = 2 ; return a / b", complex_div=[]) def test_modulo_int0(self): self.run_test("def modulo_int0(n): return n%3, (-n)%3", 5, modulo_int0=[int]) def test_modulo_int1(self): self.run_test("def modulo_int1(n): return n%3, (-n)%3", 3, modulo_int1=[int]) def test_modulo_float0(self): self.run_test("def modulo_float0(n): return n%3, (-n)%3", 5.4, modulo_float0=[float]) def test_modulo_float1(self): self.run_test("def modulo_float1(n): return n%3, (-n)%3", 3.5, modulo_float1=[float]) def test_floordiv_int0(self): self.run_test("def floordiv_int0(n): return n%3, (-n)%3", 5, floordiv_int0=[int]) def test_floordiv_int1(self): self.run_test("def floordiv_int1(n): return n//2, (-n)//2", 3, floordiv_int1=[int]) def test_floordiv_float0(self): self.run_test("def floordiv_float0(n): return n//2, (-n)//2", 5.4, floordiv_float0=[float]) def test_floordiv_float1(self): self.run_test("def floordiv_float1(n): return n//2, (-n)//2", 3.5, floordiv_float1=[float]) def test_int_base0(self): self.run_test("def int_base(x, y): return [int(x0, y0) for x0, y0 in zip(x, y)]", ["11", "11", "16", "FF"], [2, 4, 8, 16], int_base=[List[str], List[int]]) def test_int_base1(self): self.run_test("def int_base_lit(x, y): return int(x, 8), int('A', y)", "14", 16, int_base_lit=[str, int]) def test_slice0(self): self.run_test(""" def slice0(a, x): return a[x], x, slice(1,1,1)""", numpy.array([-1.3, 2.3, -4]), slice(0,2,1), slice0=[NDArray[float, :], slice]) pythran-0.10.0+ds2/pythran/tests/test_bisect.py000066400000000000000000000042321416264035500214610ustar00rootroot00000000000000import unittest from pythran.tests import TestEnv from pythran.typing import List @TestEnv.module class TestBisect(TestEnv): def test_bisect_left0(self): self.run_test("def bisect_left0(l,a): from bisect import bisect_left ; return bisect_left(l,a)", [0,1,2,3],2, bisect_left0=[List[int],int]) def test_bisect_left1(self): self.run_test("def bisect_left1(l,a): from bisect import bisect_left ; return bisect_left(l,a,1)", [0,1,2,3],2, bisect_left1=[List[int],int]) def test_bisect_left2(self): self.run_test("def bisect_left2(l,a): from bisect import bisect_left ; return bisect_left(l,a)", [1,1,1,1],1, bisect_left2=[List[int],int]) def test_bisect_left3(self): self.run_test("def bisect_left3(l,a): from bisect import bisect_left ; return bisect_left(l,a,1,2)", [0,1,1,3],2, bisect_left3=[List[int],int]) def test_bisect_left4(self): self.run_test("def bisect_left4(l,a): from bisect import bisect_left ; return bisect_left(l,a)", [1,1,1,1],2, bisect_left4=[List[int],int]) def test_bisect_right0(self): self.run_test("def bisect_right0(l,a): from bisect import bisect_right ; return bisect_right(l,a)", [0,1,2,3],2, bisect_right0=[List[int],int]) def test_bisect_right1(self): self.run_test("def bisect_right1(l,a): from bisect import bisect_right ; return bisect_right(l,a,1)", [0,1,2,3],2, bisect_right1=[List[int],int]) def test_bisect_right2(self): self.run_test("def bisect_right2(l,a): from bisect import bisect_right ; return bisect_right(l,a)", [1,1,1,1],1, bisect_right2=[List[int],int]) def test_bisect_right3(self): self.run_test("def bisect_right3(l,a): from bisect import bisect_right ; return bisect_right(l,a,1,2)", [0,1,1,3],2, bisect_right3=[List[int],int]) def test_bisect_right4(self): self.run_test("def bisect_right4(l,a): from bisect import bisect_right ; return bisect_right(l,a)", [1,1,1,1],2, bisect_right4=[List[int],int]) def test_bisect_raise0(self): with self.assertRaises(ValueError): self.run_test("def bisect_raise0(l): from bisect import bisect ; return bisect(l,1, -1)", [0,1,2,3],bisect_raise0=[List[int]]) pythran-0.10.0+ds2/pythran/tests/test_blas.py000066400000000000000000000010641416264035500211310ustar00rootroot00000000000000from pythran.tests import TestEnv from pythran.typing import List class TestBlas(TestEnv): def test_naive_matrix_multiply(self): code=""" def matrix_multiply(m0, m1): new_matrix = [] for i in range(len(m0)): new_matrix.append([0]*len(m1[0])) for i in range(len(m0)): for j in range(len(m1[0])): for k in range(len(m1)): new_matrix[i][j] += m0[i][k]*m1[k][j] return new_matrix""" self.run_test(code, [[0,1],[1,0]], [[1,2],[2,1]], matrix_multiply=[List[List[int]],List[List[int]]]) pythran-0.10.0+ds2/pythran/tests/test_cases.py000066400000000000000000000013651416264035500213120ustar00rootroot00000000000000""" Tests for test cases directory. """ # TODO: check http://code.google.com/p/unladen-swallow/wiki/Benchmarks import os from distutils.version import LooseVersion import numpy import unittest from pythran.tests import TestFromDir class TestCases(TestFromDir): """ Class to check all tests in the cases directory. """ path = os.path.join(os.path.dirname(__file__), "cases") TestCases.populate(TestCases) if LooseVersion(numpy.__version__) >= '1.20': del TestCases.test_train_equalizer_norun0 del TestCases.test_train_eq_run0 del TestCases.test_train_eq_run1 # too template intensive for old g++ if os.environ.get('CXX', None) == 'g++-5': del TestCases.test_loopy_jacob_run0 if __name__ == '__main__': unittest.main() pythran-0.10.0+ds2/pythran/tests/test_complex.py000066400000000000000000000206261416264035500216640ustar00rootroot00000000000000import numpy as np from pythran.config import cfg from pythran.tests import TestEnv from pythran.typing import NDArray import unittest try: np.float128 has_float128 = True except AttributeError: has_float128 = False class TestComplex(TestEnv): """ Check complex support in Pythran. """ def test_complex_limited_range(self): """ Check complex computation is the same as numpy for corner case. """ # see -fcx-limited-range if cfg.getboolean('pythran', 'complex_hook'): self.run_test(""" def test_complex_limited_range(a, b): return a * b""", complex(-4, np.nan), complex(4, -np.inf), test_complex_limited_range=[complex, complex]) def test_complex128_to_complex64(self): self.run_test(""" import numpy as np def complex128_to_complex64(a): return np.complex64(a)""", complex(-4.4, 4.4), complex128_to_complex64=[complex]) def test_conjugate(self): """ Check complex conjugate. Checked for: * Method and numpy function call * conj and conjugate for each of them * complex and array (1 and 2 D) """ self.run_test(""" def test_conjugate(c, a, a2d): import numpy as np return (np.conj(c), np.conj(a), a2d.conj(), np.conjugate(c), np.conjugate(a), a2d.conjugate()) """, 3 + 2j, np.array([3 + 2j]), np.array([[3 + 2j]]), test_conjugate=[np.complex128, NDArray[np.complex128, :], NDArray[complex, :, :]]) def test_complex_array_abs(self): self.run_test('def test_complex_array_abs(a): import numpy as np ; return np.abs(a)', np.array([[3 + 2j]]), test_complex_array_abs=[NDArray[complex, :, :]]) def test_complex_floordiv(self): self.run_test('def complex_floordiv(x): import numpy as np; return np.floor_divide(x, 2 + 2j)', 3.5 - 3.5j, complex_floordiv=[complex]) def test_complex_array_sqr(self): self.run_test('def test_complex_array_sqr(a): return a ** 2', np.array([[3 + 2j]]), test_complex_array_sqr=[NDArray[complex, :, :]]) def test_complex_array_mul_i(self): self.run_test('def test_complex_array_mul_i(e): return e + 1j * e', np.array([[3.,2.,4.]]), test_complex_array_mul_i=[NDArray[float, :, :]]) def test_non_complex_array_real_imag(self): self.run_test('def test_non_complex_array_real_imag(e): return e.real + e.imag', np.array([[3.,2.,4.]]), test_non_complex_array_real_imag=[NDArray[float, :, :]]) def test_complex_array_real_imag(self): self.run_test('def test_complex_array_real_imag(e): return e.real + e.imag', np.array([[3.,2.,4.]], dtype=complex), test_complex_array_real_imag=[NDArray[complex, :, :]]) def test_complex_sum_different_types(self): self.run_test('def test_complex_different_types(a,b): return a + b', np.array([[3 + 2j]],dtype=np.complex64),np.array([[8 + 1j]],dtype=np.complex128), test_complex_different_types=[NDArray[np.complex64, :, :],NDArray[np.complex128, :, :]]) def test_complex_sum_same_types(self): self.run_test('def test_complex_same_types(a): return a + a', np.array([[3 + 2j]],dtype=np.complex64), test_complex_same_types=[NDArray[np.complex64, :, :]]) def test_complex_array_real_assign(self): self.run_test('def test_complex_array_real_assign(a): a.real = 1; return a', np.array([[3 + 2j, 2, 1, 0]] * 3,dtype=np.complex64), test_complex_array_real_assign=[NDArray[np.complex64, :, :]]) def test_complex_array_gexpr_real_assign(self): self.run_test('def test_complex_array_gexpr_real_assign(a): a.real[1:] = 1; return a', np.array([[3 + 2j, 2, 1, 0]] * 3,dtype=np.complex64), test_complex_array_gexpr_real_assign=[NDArray[np.complex64, :, :]]) def test_complex_array_iexpr_real_assign(self): self.run_test('def test_complex_array_iexpr_real_assign(a): a.real[1] = 1; return a', np.array([[3 + 2j, 2, 1, 0]] * 3,dtype=np.complex64), test_complex_array_iexpr_real_assign=[NDArray[np.complex64, :, :]]) def test_complex_broadcast_scalar0(self): self.run_test('def complex_broadcast_scalar0(x): return x + 1.5, 1.3 +x, 3.1 - x, x - 3.7, x * 5.4, 7.6 * x', 5.1 + 3j, complex_broadcast_scalar0=[complex]) def test_complex_broadcast_scalar1(self): self.run_test('def complex_broadcast_scalar1(x): return x + 1.5, 1.3 +x, 3.1 - x, x - 3.7, x * 5.4, 7.6 * x', np.complex64(5.1 + 3j), complex_broadcast_scalar1=[np.complex64]) def test_complex_array_imag_assign(self): self.run_test('def test_complex_array_imag_assign(a): a.imag = 1; return a', np.array([[3 + 2j, 2, 1, 0]] * 3,dtype=np.complex64), test_complex_array_imag_assign=[NDArray[np.complex64, :, :]]) def test_complex_array_gexpr_imag_assign(self): self.run_test('def test_complex_array_gexpr_imag_assign(a): a.imag[1:] = 1; return a', np.array([[3 + 2j, 2, 1, 0]] * 3,dtype=np.complex64), test_complex_array_gexpr_imag_assign=[NDArray[np.complex64, :, :]]) def test_complex_array_iexpr_imag_assign(self): self.run_test('def test_complex_array_iexpr_imag_assign(a): a.imag[1] = 1; return a', np.array([[3 + 2j, 2, 1, 0]] * 3,dtype=np.complex64), test_complex_array_iexpr_imag_assign=[NDArray[np.complex64, :, :]]) def test_complex_array_expr_imag(self): self.run_test('def test_complex_array_expr_imag(a): return (2.j*a).imag', np.array([[3 + 2j, 2, 1, 0]] * 3,dtype=np.complex64), test_complex_array_expr_imag=[NDArray[np.complex64, :, :]]) def test_complex_array_expr_real(self): self.run_test('def test_complex_array_expr_real(a): return (2+a).real', np.array([[3 + 2j, 2, 1, 0]] * 3,dtype=np.complex64), test_complex_array_expr_real=[NDArray[np.complex64, :, :]]) @unittest.skipIf(not has_float128, "not float128") def test_complex256_array0(self): self.run_test('def complex256_array0(x): import numpy as np; return np.cos(x * 2j)', np.array([1.2,3.1], dtype=np.complex256) ** 6, complex256_array0=[NDArray[np.complex256, :]]) @unittest.skipIf(not has_float128, "not float128") def test_complex256_array1(self): self.run_test('def complex256_array1(x): import numpy as np; return (x * 2j)**2', np.array([1.2,3.1], dtype=np.complex256) ** 6, complex256_array1=[NDArray[np.complex256, :]]) @unittest.skipIf(not has_float128, "not float128") def test_complex256_array2(self): self.run_test('def complex256_array2(x): import numpy as np; return np.ones(x, dtype=np.complex256)', 10, complex256_array2=[int]) @unittest.skipIf(not has_float128, "not float128") def test_complex256_array3(self): self.run_test('def complex256_array3(x): return x.real, x.imag', np.array([2j, 2], dtype=np.complex256)** 5, complex256_array3=[NDArray[np.complex256, :]]) @unittest.skipIf(not has_float128, "not float128") def test_complex256_array4(self): self.run_test('def complex256_array4(x): return x.conj(), x.sum()', np.array([2j, 2], dtype=np.complex256)** 7, complex256_array4=[NDArray[np.complex256, :]]) @unittest.skipIf(not has_float128, "not float128") def test_complex256_array5(self): self.run_test('def complex256_array5(x): return x', np.complex256(1 + 1j), complex256_array5=[np.complex256]) pythran-0.10.0+ds2/pythran/tests/test_conversion.py000066400000000000000000000247411416264035500224040ustar00rootroot00000000000000import numpy as np import unittest from pythran.typing import * from pythran.tests import TestEnv try: np.float128 has_float128 = True except AttributeError: has_float128 = False class TestConversion(TestEnv): def test_list_of_uint16(self): self.run_test('def list_of_uint16(l): return l', [np.uint16(1),np.uint16(2)], list_of_uint16=[List[np.uint16]]) def test_set_of_int32(self): self.run_test('def set_of_int32(l): return l', {np.int32(1),np.int32(-4)}, set_of_int32=[Set[np.int32]]) def test_dict_of_int64_and_int8(self): self.run_test('def dict_of_int64_and_int8(l): return l', {np.int64(1):np.int8(1), np.int64(2):np.int8(3), np.int64(3):np.int8(4), np.int64(-4):np.int8(-5)}, dict_of_int64_and_int8=[Dict[np.int64,np.int8]]) def test_tuple_of_uint8_and_int16(self): self.run_test('def tuple_of_uint8_and_int16(l): return l', (np.uint8(5), np.int16(-146)), tuple_of_uint8_and_int16=[Tuple[np.uint8, np.int16]]) def test_array_of_uint32(self): self.run_test('def array_of_uint32(l): return l', np.ones(2,dtype=np.uint32), array_of_uint32=[NDArray[np.uint32, :]]) def test_array_of_uint64_to_uint32(self): self.run_test('def array_of_uint64_to_uint32(l): import numpy ; return l, numpy.array(l, numpy.uint32)', np.ones(2,dtype=np.uint64), array_of_uint64_to_uint32=[NDArray[np.uint64, :]]) def test_list_of_float64(self): self.run_test('def list_of_float64(l): return [2. * _ for _ in l]', [1.,2.], list_of_float64=[List[np.float64]]) @unittest.skipIf(not has_float128, "not float128") def test_list_of_float128(self): self.run_test('def list_of_float128(l): return [2. * _ for _ in l]', [np.float128(1.),np.float128(2.)], list_of_float128=[List[np.float128]]) @unittest.skipIf(not has_float128, "not float128") def test_array_of_float128(self): self.run_test('def array_of_float128(l): return l + 1', np.array([1.,2.], dtype=np.float128), array_of_float128=[NDArray[np.float128, :]]) def test_set_of_float32(self): """ Check np.float32 conversion. """ code = """ def set_of_float32(l): return { _ / 2 for _ in l}""" self.run_test(code, {np.float32(1), np.float32(2)}, set_of_float32=[Set[np.float32]]) def test_dict_of_complex64_and_complex_128(self): """ Check numpy complex type conversion. """ code = """ def dict_of_complex64_and_complex_128(l): return list(l.keys()), list(l.values())""" interface = [Dict[np.complex64, np.complex128]] self.run_test(code, {np.complex64(3.1 + 1.1j): 4.5 + 5.5j}, dict_of_complex64_and_complex_128=interface) def test_ndarray_bad_dimension(self): code = 'def ndarray_bad_dimension(a): return a' with self.assertRaises(BaseException): self.run_test(code, np.ones((10,10)), ndarray_bad_dimension=[NDArray[float, :]]) def test_ndarray_bad_dtype(self): code = 'def ndarray_bad_dtype(a): return a' with self.assertRaises(BaseException): self.run_test(code, np.ones((10,10)), ndarray_bad_dtype=[NDArray[np.uint8, :, :]]) def test_ndarray_bad_stride_type(self): """ Check an error is raised when pythran input is strided. """ code = 'def ndarray_bad_stride_type(a): return a' with self.assertRaises(BaseException): self.run_test(code, np.ones((10, 10), dtype=np.uint8)[::2], ndarray_bad_stride_type=[NDArray[np.uint8, :, :]]) def test_ndarray_with_stride_type(self): code = 'def ndarray_with_stride_type(a): return a' self.run_test(code, np.arange((10), dtype=np.uint8)[::2], ndarray_with_stride_type=[NDArray[np.uint8, ::-1]]) def test_ndarray_with_stride_and_offset(self): code = 'def ndarray_with_stride_and_offset(a): return a' self.run_test(code, np.arange((10), dtype=np.uint8)[1::2], ndarray_with_stride_and_offset=[NDArray[np.uint8, ::-1]]) def test_ndarray_with_negative_stride(self): code = 'def ndarray_with_negative_stride(a): return a' with self.assertRaises(BaseException): self.run_test(code, np.arange((10), dtype=np.uint8)[::-2], ndarray_with_negative_stride=[NDArray[np.uint8, ::-1]]) def iexpr_with_strides_and_offsets(self): code = 'def iexpr_with_strides_and_offsets(a): return a' self.run_test(code, np.array(np.arange((160), dtype=np.uint8).reshape((4, 5, 8)))[1][1::][:-1], ndarray_with_strides_and_offsets=[NDArray[np.uint8, :, ::-1]]) def test_ndarray_with_strides_and_offsets(self): code = 'def ndarray_with_strides_and_offsets(a): return a' self.run_test(code, np.array(np.arange((128), dtype=np.uint8).reshape((16,8)))[1::3,2::2], ndarray_with_strides_and_offsets=[NDArray[np.uint8, :, ::-1]]) def test_ndarray_with_stride_and_offset_and_end(self): code = 'def ndarray_with_stride_and_offset_and_end(a): return a' self.run_test(code, np.arange((10), dtype=np.uint16)[1:6:2], ndarray_with_stride_and_offset_and_end=[NDArray[np.uint16, ::-1]]) def test_ndarray_with_multi_strides(self): code = 'def ndarray_with_multi_strides(a): return a' self.run_test(code, np.array(np.arange((128), dtype=np.uint8).reshape((16,8)))[:,1::3], ndarray_with_multi_strides=[NDArray[np.uint8, :, ::-1]]) def test_ndarray_unsupported_reshaped_array_with_stride(self): code = 'def ndarray_unsupported_reshaped_array_with_stride(a): return a' with self.assertRaises(BaseException): self.run_test(code, np.arange((128), dtype=np.uint8).reshape((16,8))[1::3,2::2], ndarray_unsupported_reshaped_array_with_stride=[NDArray[np.uint8, :, ::-1]]) def test_transposed_arg0(self): self.run_test("def np_transposed_arg0(a): return a", np.arange(9).reshape(3,3).T, np_transposed_arg0=[NDArray[int, -1::, :]]) def test_transposed_arg1(self): self.run_test("def np_transposed_arg1(a): return a", np.arange(12).reshape(3,4).T, np_transposed_arg1=[NDArray[int, -1::, :]]) def test_transposed_arg2(self): self.run_test("def np_transposed_arg2(a): return a", np.arange(12, dtype=complex).reshape(3,4).T, np_transposed_arg2=[NDArray[complex, -1::, :]]) def test_transposed_targ0(self): self.run_test("def np_transposed_targ0(a): return a.T", np.arange(9).reshape(3,3).T, np_transposed_targ0=[NDArray[int, -1::, :]]) def test_transposed_targ1(self): self.run_test("def np_transposed_targ1(a): return a.T", np.arange(12).reshape(3,4).T, np_transposed_targ1=[NDArray[int, -1::, :]]) def test_transposed_targ2(self): self.run_test("def np_transposed_targ2(a): return a.T", np.arange(12, dtype=complex).reshape(3,4).T, np_transposed_targ2=[NDArray[complex, -1::, :]]) def test_transposed_argt0(self): self.run_test("def np_transposed_argt0(a): return a.T", np.arange(9).reshape(3,3), np_transposed_argt0=[NDArray[int, :, :]]) def test_transposed_argt1(self): self.run_test("def np_transposed_argt1(a): return a.T", np.arange(12).reshape(3,4), np_transposed_argt1=[NDArray[int, :, :]]) def test_transposed_argt2(self): self.run_test("def np_transposed_argt2(a): return a.T", np.arange(12, dtype=complex).reshape(3,4), np_transposed_argt2=[NDArray[complex, :, :]]) def test_broadcasted_int8(self): self.run_test('def broadcasted_int8(l): return l + 4', np.ones(10,dtype=np.int8).reshape(5,2), broadcasted_int8=[NDArray[np.int8, :, :]]) def test_broadcasted_uint8(self): self.run_test('def broadcasted_uint8(l): return l - 4', np.ones(10,dtype=np.uint8).reshape(5,2), broadcasted_uint8=[NDArray[np.uint8, :, :]]) def test_broadcasted_int16(self): self.run_test('def broadcasted_int16(l): return l * 4', np.ones(10,dtype=np.int16).reshape(5,2), broadcasted_int16=[NDArray[np.int16, :, :]]) def test_broadcasted_uint16(self): self.run_test('def broadcasted_uint16(l): return l / 4', np.ones(10,dtype=np.uint16).reshape(5,2), broadcasted_uint16=[NDArray[np.uint16, :, :]]) @unittest.skip("no dynamic type promotion in pythran :-/") def test_broadcasted_large_int8(self): self.run_test('def broadcasted_large_int8(l): return l + 400', np.ones(10,dtype=np.int8).reshape(5,2), broadcasted_large_int8=[NDArray[np.int8, :, :]]) def test_builtin_type0(self): self.run_test("def builtin_type0(x): return type(x)(x)", 1, builtin_type0=[int]) def test_builtin_type1(self): self.run_test("def builtin_type1(x): return type(x)(x)", True, builtin_type1=[bool]) def test_builtin_type2(self): self.run_test("def builtin_type2(x): return type(x)(x)", 1., builtin_type2=[float]) def test_builtin_type3(self): self.run_test("def builtin_type3(x): return type(x)(x)", (1,), builtin_type3=[Tuple[int]]) def test_builtin_type3b(self): self.run_test("def builtin_type3b(x): return type(x)(x)", (1,1.), builtin_type3b=[Tuple[int, float]]) def test_builtin_type4(self): self.run_test("def builtin_type4(x): return type(x)(x)", [1], builtin_type4=[List[int]]) def test_builtin_type5(self): self.run_test("def builtin_type5(x): return type(x)(x)", {1}, builtin_type5=[Set[int]]) def test_builtin_type6(self): self.run_test("def builtin_type6(x): return type(x)(x)", {1:1}, builtin_type6=[Dict[int, int]]) def test_builtin_type7(self): self.run_test("def builtin_type7(x): s = type(x)([1]); s[0] = 9; return s", np.array([2, 4, 8]), builtin_type7=[NDArray[int, :]]) def test_builtin_type8(self): self.run_test("def builtin_type8(x): return type(x)(x)", "1", builtin_type8=[str]) def test_builtin_type9(self): npt = ("numpy.int8", "numpy.uint8", "numpy.int16", "numpy.uint16", "numpy.int32", "numpy.uint32", "numpy.int64", "numpy.uint64", "numpy.intp", "numpy.uintp",) for i, t in enumerate(npt): kwargs = {"builtin_type9p{}".format(i): [int]} self.run_test("def builtin_type9p{}(x): import numpy; x = {}(x); return type(x)(x)".format(i, t), 1, **kwargs) pythran-0.10.0+ds2/pythran/tests/test_copperhead.py000066400000000000000000000146451416264035500223330ustar00rootroot00000000000000from pythran.tests import TestEnv from pythran.typing import * class TestCopperhead(TestEnv): # from copperhead test suite # https://github.com/copperhead def test_saxpy(self): self.run_test( "def saxpy(a, x, y): return list(map(lambda xi, yi: a * xi + yi, x, y))", 1.5, [1,2,3], [0.,2.,4.], saxpy=[float, List[int], List[float]]) def test_saxpy2(self): self.run_test( "def saxpy2(a, x, y): return [a*xi+yi for xi,yi in zip(x,y)]", 1.5, [1,2,3], [0.,2.,4.], saxpy2=[float,List[int], List[float]]) def test_saxpy3(self): code=""" def saxpy3(a, x, y): def triad(xi, yi): return a * xi + yi return list(map(triad, x, y)) """ self.run_test( code, 1.5, [1,2,3], [0.,2.,4.], saxpy3=[float,List[int], List[float]]) def test_saxpy4(self): code=""" def saxpy4(a, x, y): return manual(y,x,a) def manual(y,x,a): __list=list() for __tuple in zip(y,x): __list.append(__tuple[0]*a+__tuple[1]) return __list """ self.run_test( code, 1.5, [1,2,3], [0.,2.,4.], saxpy4=[float,List[int], List[float]]) def test_sxpy(self): code=""" def sxpy(x, y): def duad(xi, yi): return xi + yi return list(map(duad, x, y)) """ self.run_test( code, [1,2,3], [0.,2.,4.], sxpy=[List[int], List[float]]) def test_incr(self): self.run_test( "def incr(x): return list(map(lambda xi: xi + 1, x))", [0., 0., 0.], incr=[List[float]]) def test_as_ones(self): self.run_test( "def as_ones(x): return list(map(lambda xi: 1, x))", [0., 0., 0.], as_ones=[List[float]]) def test_idm(self): self.run_test( "def idm(x): return list(map(lambda b: b, x))", [1, 2, 3], idm=[List[int]]) def test_incr_list(self): self.run_test( "def incr_list(x): return [xi + 1 for xi in x]", [1., 2., 3.], incr_list=[List[float]]) def test_idx(self): code=""" def idx(x): def id(xi): return xi return list(map(id, x))""" self.run_test(code, [1,2,3], idx=[List[int]]) def test_rbf(self): code=""" from math import exp def norm2_diff(x, y): def el(xi, yi): diff = xi - yi return diff * diff return sum(map(el, x, y)) def rbf(ngamma, x, y): return exp(ngamma * norm2_diff(x,y))""" self.run_test( code, 2.3, [1,2,3], [1.1,1.2,1.3], rbf=[float, List[int], List[float]]) # from copperhead-new/copperhead/prelude.py def test_indices(self): self.run_test( "def indices(A):return list(range(len(A)))", [1,2], indices=[List[int]]) def test_gather(self): self.run_test( "def gather(x, indices): return [x[i] for i in indices]", [1,2,3,4,5], [0,2,4], gather=[List[int], List[int]]) def test_scatter(self): code=""" def indices(x): return list(range(len(x))) def scatter(src, indices_, dst): assert len(src)==len(indices_) result = list(dst) for i in range(len(src)): result[indices_[i]] = src[i] return result """ self.run_test( code, [0.0,1.0,2.,3.,4.,5.,6.,7.,8.,9.],[5,6,7,8,9,0,1,2,3,4],[0,0,0,0,0,0,0,0,0,0,18], scatter=[List[float], List[int], List[int]]) def test_scan(self): code=""" def prefix(A): return scan(lambda x,y:x+y, A) def scan(f, A): B = list(A) for i in range(1, len(B)): B[i] = f(B[i-1], B[i]) return B """ self.run_test(code, [1.,2.,3.], prefix=[List[float]]) # from Copperhead: Compiling an Embedded Data Parallel Language # by Bryan Catanzaro, Michael Garland and Kurt Keutzer # http://www.eecs.berkeley.edu/Pubs/TechRpts/2010/EECS-2010-124.html def test_spvv_csr(self): code=""" def spvv_csr(x, cols, y): def gather(x, indices): return [x[i] for i in indices] z = gather(y, cols) return sum(map(lambda a, b: a * b, x, z)) """ self.run_test(code, [1,2,3],[0,1,2],[5.5,6.6,7.7], spvv_csr=[List[int], List[int], List[float]]) def test_spmv_csr(self): code=""" def spvv_csr(x, cols, y): def gather(x, indices): return [x[i] for i in indices] z = gather(y, cols) return sum(map(lambda a, b: a * b, x, z)) def spmv_csr(Ax, Aj, x): return list(map(lambda y, cols: spvv_csr(y, cols, x), Ax, Aj)) """ self.run_test(code, [[0,1,2],[0,1,2],[0,1,2]],[[0,1,2],[0,1,2],[0,1,2]],[0,1,2], spmv_csr=[List[List[int]], List[List[int]], List[int]]) def test_spmv_ell(self): code=""" def indices(x): return range(len(x)) def spmv_ell(data, idx, x): def kernel(i): return sum(map(lambda Aj, J: Aj[i] * x[J[i]], data, idx)) return list(map(kernel, indices(x))) """ self.run_test(code, [[0,1,2],[0,1,2],[0,1,2]],[[0,1,2],[0,1,2],[0,1,2]],[0,1,2], spmv_ell=[List[List[int]], List[List[int]], List[int]]) def test_vadd(self): self.run_test("def vadd(x, y): return list(map(lambda a, b: a + b, x, y))", [0.,1.,2.],[5.,6.,7.], vadd=[List[float], List[float]]) def test_vmul(self): self.run_test("def vmul(x, y): return list(map(lambda a, b: a * b, x, y))", [0.,1.,2.],[5.,6.,7.], vmul=[List[float], List[float]]) def test_form_preconditioner(self): code=""" def vadd(x, y): return list(map(lambda a, b: a + b, x, y)) def vmul(x, y): return list(map(lambda a, b: a * b, x, y)) def form_preconditioner(a, b, c): def det_inverse(ai, bi, ci): return 1.0/(ai * ci - bi * bi) indets = list(map(det_inverse, a, b, c)) p_a = vmul(indets, c) p_b = list(map(lambda a, b: -a * b, indets, b)) p_c = vmul(indets, a) return p_a, p_b, p_c """ self.run_test(code, [1,2,3],[0,1,2],[5.5,6.6,7.7],form_preconditioner=[List[int], List[int], List[float]]) def test_precondition(self): code=""" def precondition(u, v, p_a, p_b, p_c): def vadd(x, y): return map(lambda a, b: a + b, x, y) def vmul(x, y): return map(lambda a, b: a * b, x, y) e = vadd(vmul(p_a, u), vmul(p_b, v)) f = vadd(vmul(p_b, u), vmul(p_c, v)) return list(e), list(f) """ self.run_test(code, [1,2,3], [5.5,6.6,7.7],[1,2,3], [5.5,6.6,7.7],[8.8,9.9,10.10], precondition=[List[int], List[float], List[int], List[float], List[float]]) pythran-0.10.0+ds2/pythran/tests/test_cython.py000066400000000000000000000017411416264035500215160ustar00rootroot00000000000000import os import glob import sys import unittest class TestCython(unittest.TestCase): pass def add_test(name, runner, target): setattr(TestCython, "test_" + name, lambda s: runner(s, target)) for intermediate in glob.glob(os.path.join(os.path.dirname(__file__), "cython", "*.cpp")): os.remove(intermediate) try: import Cython targets = glob.glob(os.path.join(os.path.dirname(__file__), "cython", "setup_*.py")) sys.path.append(os.path.join(os.path.dirname(__file__), "cython")) for target in targets: def runner(self, target): cwd = os.getcwd() try: os.chdir(os.path.dirname(target)) exec(open(os.path.basename(target)).read()) except: raise finally: os.chdir(cwd) name, _ = os.path.splitext(os.path.basename(target)) add_test(name, runner, target) except ImportError: pass pythran-0.10.0+ds2/pythran/tests/test_dict.py000066400000000000000000000122621416264035500211350ustar00rootroot00000000000000from pythran.tests import TestEnv from pythran.typing import Dict, List class TestDict(TestEnv): def test_dict_(self): self.run_test("def dict_(): a=dict()", dict_=[]) def test_assigned_dict(self): self.run_test("def assigned_dict(k):\n a=dict() ; a[k]=18", "yeah", assigned_dict=[str]) def test_print_empty_dict(self): self.run_test("def print_empty_dict():\n print(dict())", print_empty_dict=[]) def test_print_dict(self): self.run_test("def print_dict(k):\n a= dict() ; a[k]='youpi'\n print(a)", 5, print_dict=[int]) def test_empty_dict(self): self.run_test("def empty_dict(): return {}", empty_dict=[]) def test_initialized_dict(self): self.run_test("def initialized_dict(): return {1:'e', 5.2:'f'}", initialized_dict=[]) def test_dict_contains(self): self.run_test("def dict_contains(v): return v in { 'a':1, 'e': 2 }", "e", dict_contains=[str]) def test_emptydict_contains(self): self.run_test("def emptydict_contains(v): return v in dict()", "e", emptydict_contains=[str]) def test_dict_get_item(self): self.run_test("def dict_get_item(a): return a['e']", {'e':1, 'f':2}, dict_get_item=[Dict[str, int]]) def test_dict_len(self): self.run_test("def dict_len(d): return len(d)", {1:'e', 2:'f'}, dict_len=[Dict[int, str]]) def test_dict_set_item(self): self.run_test("def dict_set_item():\n a= dict() ; a[1.5]='s'\n return a", dict_set_item=[]) def test_dict_set_item_bis(self): self.run_test("def dict_set_item_bis():\n a= dict() ; a[1]='s'\n return a", dict_set_item_bis=[]) def test_dict_clear(self): self.run_test("def dict_clear(a):\n a.clear()\n return a", {'e':'E' }, dict_clear=[Dict[str, str]]) def test_dict_copy(self): code=""" def dict_copy(a): b = a.copy() c = a a.clear() return c,b""" self.run_test(code, {1:2 }, dict_copy=[Dict[int, int]]) def test_dict_from_keys(self): return self.run_test("def dict_from_keys(a): return dict.fromkeys(a), dict.fromkeys(a,1)", [1.5,2.5,3.5], dict_from_keys=[List[float]]) def test_dict_get(self): return self.run_test("def dict_get(a): return a.get(1.5), a.get(2, 18) + 1", {1.5:2 }, dict_get=[Dict[float, int]]) def test_dict_get_none(self): return self.run_test("def dict_get_none(a): return a.get(1)", {1.5:2 }, dict_get_none=[Dict[float, int]]) def test_dict_items(self): return self.run_test("def dict_items(a): return sorted(a.items())", { 'a':1, 'e': 2 }, dict_items=[Dict[str, int]]) def test_dict_for(self): return self.run_test("def dict_for(a): return sorted([x for x in a])", { 'a':1, 'e': 2 }, dict_for=[Dict[str, int]]) def test_dict_keys(self): return self.run_test("def dict_keys(a): return sorted([ x*2 for x in a.keys()])", { 1:'a', 2:'b' }, dict_keys=[Dict[int, str]]) def test_dict_values(self): return self.run_test("def dict_values(a): return sorted([ x*2 for x in a.values()])", { 1:'a', 2:'b' }, dict_values=[Dict[int, str]]) def test_dict_pop(self): return self.run_test("def dict_pop(a): return a.pop(1), a.pop(3,'e'), a", { 1:'a', 2:'b' }, dict_pop=[Dict[int, str]]) def test_dict_popitem0(self): return self.run_test( "def dict_popitem0(a): return a.popitem(), a", {1:'a' }, dict_popitem0=[Dict[int, str]]) def test_dict_popitem1(self): return self.run_test( "def dict_popitem1(a): return a.popitem(), a", { 1: 2 }, dict_popitem1=[Dict[int, int]]) def test_dict_setdefault(self): return self.run_test("def dict_setdefault():\n a={1.5:2 }\n return a.setdefault(1.5) + a.setdefault(2, 18)", dict_setdefault=[]) def test_dict_update(self): return self.run_test("def dict_update(a):\n a.update([(1,'e')])\n a.update({2:'c'})\n return a", { 1:'a', 2:'b' }, dict_update=[Dict[int, str]]) def test_dict_items_contains(self): return self.run_test("def dict_viewitems_contains(a):\n d=a.items()\n return (1,'a') in d, (2,'e') in d", { 1:'a', 2:'b' }, dict_viewitems_contains=[Dict[int, str]]) def test_dict_keys_contains(self): return self.run_test("def dict_viewkeys_contains(a):\n d=a.keys()\n return 1 in d, 3 in d", { 1:'a', 2:'b' }, dict_viewkeys_contains=[Dict[int, str]]) def test_dict_values_contains(self): return self.run_test("def dict_viewvalues_contains(a):\n d=a.values()\n return 'a' in d, 'e' in d", { 1:'a', 2:'b' }, dict_viewvalues_contains=[Dict[int, str]]) def test_dict_update_combiner(self): return self.run_test("def dict_update_combiner():\n a=dict()\n a.update({1:'e'})\n return a", dict_update_combiner=[]) def test_dict_setdefault_combiner(self): return self.run_test("def dict_setdefault_combiner():\n a=dict()\n a.setdefault(1,'e')\n return a", dict_setdefault_combiner=[]) def test_dict_iterate_item(self): return self.run_test( """def dict_iterate_item(d): s = 0 for kv in d.items(): for e in kv: s += e return s""", {1:2,3:4}, dict_iterate_item=[Dict[int, int]]) pythran-0.10.0+ds2/pythran/tests/test_distutils.py000066400000000000000000000155411416264035500222410ustar00rootroot00000000000000from subprocess import check_call import os import re import shutil import sys import sysconfig import unittest cwd = os.path.dirname(__file__) python_version = "python{}.{}".format(sys.version_info.major, sys.version_info.minor) def find_so(name, path): for root, dirs, files in os.walk(path): for filename in files: if re.match(name, filename): return os.path.join(root, filename) class TestDistutils(unittest.TestCase): def test_setup_build(self): check_call(['python', 'setup.py', 'build'], cwd=os.path.join(cwd, 'test_distutils')) check_call(['python', 'setup.py', 'install', '--prefix=demo_install'], cwd=os.path.join(cwd, 'test_distutils')) base = os.path.join(cwd, 'test_distutils', 'demo_install',) libdir = os.path.join(base, 'lib') if not os.path.isdir(libdir): libdir = os.path.join(base, 'lib64') check_call(['python', '-c', 'import demo'], cwd=os.path.join(libdir, python_version, 'site-packages')) check_call(['python', 'setup.py', 'clean'], cwd=os.path.join(cwd, 'test_distutils')) shutil.rmtree(os.path.join(cwd, 'test_distutils', 'demo_install')) shutil.rmtree(os.path.join(cwd, 'test_distutils', 'build')) def test_setup_sdist_install(self): check_call(['python', 'setup.py', 'sdist', "--dist-dir=sdist"], cwd=os.path.join(cwd, 'test_distutils')) check_call(['tar', 'xzf', 'demo-1.0.tar.gz'], cwd=os.path.join(cwd, 'test_distutils', 'sdist')) check_call(['python', 'setup.py', 'install', '--prefix=demo_install'], cwd=os.path.join(cwd, 'test_distutils', 'sdist', 'demo-1.0')) shutil.rmtree(os.path.join(cwd, 'test_distutils', 'sdist')) def test_setup_bdist_install(self): check_call(['python', 'setup.py', 'bdist', "--dist-dir=bdist"], cwd=os.path.join(cwd, 'test_distutils')) dist_path = os.path.join(cwd, 'test_distutils', 'bdist') tgz = [f for f in os.listdir(dist_path) if f.endswith(".tar.gz")][0] check_call(['tar', 'xzf', tgz], cwd=dist_path) demo_so = find_so(r"demo.*\.so", dist_path) self.assertIsNotNone(demo_so) shutil.rmtree(dist_path) def test_setup_wheel_install(self): check_call(['python', 'setup.py', 'bdist_wheel', "--dist-dir=bdist_wheel"], cwd=os.path.join(cwd, 'test_distutils_setuptools')) dist_path = os.path.join(cwd, 'test_distutils_setuptools', 'bdist_wheel') wheel_dir = 'wheeeeeeel' whl = [f for f in os.listdir(dist_path) if f.endswith(".whl")][0] check_call(['unzip', whl, '-d', wheel_dir], cwd=dist_path) demo_so = find_so(r"demo.*\.so", os.path.join(dist_path, wheel_dir)) self.assertIsNotNone(demo_so) shutil.rmtree(dist_path) def test_setup_build2(self): check_call(['python', 'setup.py', 'build'], cwd=os.path.join(cwd, 'test_distutils_packaged')) check_call(['python', 'setup.py', 'install', '--prefix=demo_install2'], cwd=os.path.join(cwd, 'test_distutils_packaged')) base = os.path.join(cwd, 'test_distutils_packaged', 'demo_install2',) libdir = os.path.join(base, 'lib') if not os.path.isdir(libdir): libdir = os.path.join(base, 'lib64') check_call(['python', '-c', 'import demo2.a'], cwd=os.path.join(libdir, python_version, 'site-packages')) check_call(['python', 'setup.py', 'clean'], cwd=os.path.join(cwd, 'test_distutils_packaged')) shutil.rmtree(os.path.join(cwd, 'test_distutils_packaged', 'demo_install2')) shutil.rmtree(os.path.join(cwd, 'test_distutils_packaged', 'build')) def test_setup_sdist_install2(self): check_call(['python', 'setup.py', 'sdist', "--dist-dir=sdist2"], cwd=os.path.join(cwd, 'test_distutils_packaged')) check_call(['tar', 'xzf', 'demo2-1.0.tar.gz'], cwd=os.path.join(cwd, 'test_distutils_packaged', 'sdist2')) check_call(['python', 'setup.py', 'install', '--prefix=demo_install2'], cwd=os.path.join(cwd, 'test_distutils_packaged', 'sdist2', 'demo2-1.0')) shutil.rmtree(os.path.join(cwd, 'test_distutils_packaged', 'sdist2')) def test_setup_bdist_install2(self): check_call(['python', 'setup.py', 'bdist', "--dist-dir=bdist"], cwd=os.path.join(cwd, 'test_distutils_packaged')) dist_path = os.path.join(cwd, 'test_distutils_packaged', 'bdist') tgz = [f for f in os.listdir(dist_path) if f.endswith(".tar.gz")][0] check_call(['tar', 'xzf', tgz], cwd=dist_path) demo_so = find_so(r"a.*\.so", dist_path) self.assertIsNotNone(demo_so) shutil.rmtree(dist_path) def test_setup_build3(self): check_call(['python', 'setup.py', 'build'], cwd=os.path.join(cwd, 'test_distutils_numpy')) check_call(['python', 'setup.py', 'install', '--prefix=demo_install3'], cwd=os.path.join(cwd, 'test_distutils_numpy')) base = os.path.join(cwd, 'test_distutils_numpy', 'demo_install3',) libdir = os.path.join(base, 'lib') if not os.path.isdir(libdir): libdir = os.path.join(base, 'lib64') check_call(['python', '-c', 'import a'], cwd=os.path.join(libdir, python_version, 'site-packages', 'demo3')) check_call(['python', 'setup.py', 'clean'], cwd=os.path.join(cwd, 'test_distutils_numpy')) shutil.rmtree(os.path.join(cwd, 'test_distutils_numpy', 'demo_install3')) shutil.rmtree(os.path.join(cwd, 'test_distutils_numpy', 'build')) def test_setup_sdist_install3(self): check_call(['python', 'setup.py', 'sdist', "--dist-dir=sdist3"], cwd=os.path.join(cwd, 'test_distutils_numpy')) check_call(['tar', 'xzf', 'demo3-1.0.tar.gz'], cwd=os.path.join(cwd, 'test_distutils_numpy', 'sdist3')) check_call(['python', 'setup.py', 'install', '--prefix=demo_install3'], cwd=os.path.join(cwd, 'test_distutils_numpy', 'sdist3', 'demo3-1.0')) shutil.rmtree(os.path.join(cwd, 'test_distutils_numpy', 'sdist3')) def test_setup_bdist_install3(self): check_call(['python', 'setup.py', 'bdist', "--dist-dir=bdist"], cwd=os.path.join(cwd, 'test_distutils_numpy')) dist_path = os.path.join(cwd, 'test_distutils_numpy', 'bdist') tgz = [f for f in os.listdir(dist_path) if f.endswith(".tar.gz")][0] check_call(['tar', 'xzf', tgz], cwd=dist_path) demo_so = find_so(r"a.*\.so", dist_path) self.assertIsNotNone(demo_so) shutil.rmtree(dist_path) pythran-0.10.0+ds2/pythran/tests/test_distutils/000077500000000000000000000000001416264035500216615ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/tests/test_distutils/a.py000066400000000000000000000000771416264035500224570ustar00rootroot00000000000000#pythran export a() import numpy def a(): return numpy.ones(1) pythran-0.10.0+ds2/pythran/tests/test_distutils/setup.py000066400000000000000000000004751416264035500234010ustar00rootroot00000000000000from distutils.core import setup from pythran.dist import PythranExtension, PythranBuildExt module1 = PythranExtension('demo', sources = ['a.py']) setup(name = 'demo', version = '1.0', description = 'This is a demo package', cmdclass={"build_ext": PythranBuildExt}, ext_modules = [module1]) pythran-0.10.0+ds2/pythran/tests/test_distutils_numpy/000077500000000000000000000000001416264035500231115ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/tests/test_distutils_numpy/demo3/000077500000000000000000000000001416264035500241205ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/tests/test_distutils_numpy/demo3/a.py000066400000000000000000000000771416264035500247160ustar00rootroot00000000000000#pythran export a() import numpy def a(): return numpy.ones(1) pythran-0.10.0+ds2/pythran/tests/test_distutils_numpy/setup.py000066400000000000000000000006451416264035500246300ustar00rootroot00000000000000from numpy.distutils.core import setup from numpy.distutils.command.build_ext import build_ext as npy_build_ext from pythran.dist import PythranExtension, PythranBuildExt module1 = PythranExtension('demo3.a', sources = ['demo3/a.py']) setup(name = 'demo3', version = '1.0', description = 'This is a demo package', cmdclass={"build_ext": PythranBuildExt[npy_build_ext]}, ext_modules = [module1]) pythran-0.10.0+ds2/pythran/tests/test_distutils_packaged/000077500000000000000000000000001416264035500235005ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/tests/test_distutils_packaged/a.py000066400000000000000000000000461416264035500242720ustar00rootroot00000000000000#pythran export a() def a(): return 1 pythran-0.10.0+ds2/pythran/tests/test_distutils_packaged/demo2/000077500000000000000000000000001416264035500245065ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/tests/test_distutils_packaged/demo2/__init__.py000066400000000000000000000000001416264035500266050ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/tests/test_distutils_packaged/setup.py000066400000000000000000000005331416264035500252130ustar00rootroot00000000000000from distutils.core import setup, Extension from pythran.dist import PythranExtension, PythranBuildExt setup(name = 'demo2', version = '1.0', description = 'This is another demo package', packages = ['demo2'], cmdclass={"build_ext": PythranBuildExt}, ext_modules = [PythranExtension('demo2.a', sources = ['a.py'])]) pythran-0.10.0+ds2/pythran/tests/test_distutils_setuptools/000077500000000000000000000000001416264035500241625ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/tests/test_distutils_setuptools/a.py000066400000000000000000000000771416264035500247600ustar00rootroot00000000000000#pythran export a() import numpy def a(): return numpy.ones(1) pythran-0.10.0+ds2/pythran/tests/test_distutils_setuptools/setup.py000066400000000000000000000004711416264035500256760ustar00rootroot00000000000000from setuptools import setup from pythran.dist import PythranExtension, PythranBuildExt module1 = PythranExtension('demo', sources = ['a.py']) setup(name = 'demo', version = '1.0', description = 'This is a demo package', cmdclass={"build_ext": PythranBuildExt}, ext_modules = [module1]) pythran-0.10.0+ds2/pythran/tests/test_euler.py000066400000000000000000000004531416264035500213250ustar00rootroot00000000000000import unittest from pythran.tests import TestFromDir import os import glob class TestEuler(TestFromDir): path = os.path.join(os.path.dirname(__file__),"euler") files = glob.glob(os.path.join(path,"euler*.py")) TestEuler.populate(TestEuler) if __name__ == '__main__': unittest.main() pythran-0.10.0+ds2/pythran/tests/test_exception.py000066400000000000000000000370251416264035500222140ustar00rootroot00000000000000from pythran.tests import TestEnv import builtins import unittest exceptions = [i for i in dir(builtins) if (isinstance(getattr(builtins, i), type) and issubclass(getattr(builtins, i), builtins.BaseException))] exception_args = { "BaseException": "('abc')", "SystemExit": "('a','b','c')", "KeyboardInterrupt": "('a','b','c')", "GeneratorExit": "('a','b','c')", "Exception": "('a','b','c')", "StopIteration": "('a','b','c')", "Warning": "('a','b','c')", "BytesWarning": "('a','b','c')", "UnicodeWarning": "('a','b','c')", "ImportWarning": "('a','b','c')", "FutureWarning": "('a','b','c')", "UserWarning": "('a','b','c')", "SyntaxWarning": "('a','b','c')", "RuntimeWarning": "('a','b','c')", "PendingDeprecationWarning": "('a','b','c')", "DeprecationWarning": "('a','b','c')", "BufferError": "('a','b','c')", "ArithmeticError": "('a','b','c')", "AssertionError": "('a','b','c')", "AttributeError": "('a','b','c')", "EnvironmentError": "('a','b','c','d')", "EOFError": "('a','b','c')", "ImportError": "('a','b','c')", "LookupError": "('a','b','c')", "MemoryError": "('a','b','c')", "NameError": "('a','b','c')", "ReferenceError": "('a','b','c')", "RuntimeError": "('a','b','c')", "SyntaxError": "('a','b','c')", "SystemError": "('a','b','c')", "TypeError": "('a','b','c')", "ValueError": "('a','b','c')", "FloatingPointError": "('a','b','c')", "OverflowError": "('a','b','c')", "ZeroDivisionError": "('a','b','c')", "IOError": "('a','b','c')", "OSError": "('a','b','c')", "IndexError": "('a','b','c')", "KeyError": "('a','b','c')", "UnboundLocalError": "('a','b','c')", "NotImplementedError": "('a','b','c')", "IndentationError": "('a','b','c')", "TabError": "('a','b','c')", "UnicodeError": "('a','b','c')", } class TestException(TestEnv): def test_BaseException(self): self.run_test("def BaseException_():\n try: raise BaseException('a','b','c')\n except BaseException as e: return e.args", BaseException_=[]) def test_SystemExit(self): self.run_test("def SystemExit_():\n try: raise SystemExit('a','b','c')\n except SystemExit as e: return e.args", SystemExit_=[]) def test_KeyboardInterrupt(self): self.run_test("def KeyboardInterrupt_():\n try: raise KeyboardInterrupt('a','b','c')\n except KeyboardInterrupt as e: return e.args", KeyboardInterrupt_=[]) def test_GeneratorExit(self): self.run_test("def GeneratorExit_():\n try: raise GeneratorExit('a','b','c')\n except GeneratorExit as e: return e.args", GeneratorExit_=[]) def test_Exception(self): self.run_test("def Exception_():\n try: raise Exception('a','b','c')\n except Exception as e: return e.args", Exception_=[]) def test_StopIteration(self): self.run_test("def StopIteration_():\n try: raise StopIteration('a','b','c')\n except StopIteration as e: return e.args", StopIteration_=[]) def test_Warning(self): self.run_test("def Warning_():\n try: raise Warning('a','b','c')\n except Warning as e: return e.args", Warning_=[]) def test_BytesWarning(self): self.run_test("def BytesWarning_():\n try: raise BytesWarning('a','b','c')\n except BytesWarning as e: return e.args", BytesWarning_=[]) def test_UnicodeWarning(self): self.run_test("def UnicodeWarning_():\n try: raise UnicodeWarning('a','b','c')\n except UnicodeWarning as e: return e.args", UnicodeWarning_=[]) def test_ImportWarning(self): self.run_test("def ImportWarning_():\n try: raise ImportWarning('a','b','c')\n except ImportWarning as e: return e.args", ImportWarning_=[]) def test_FutureWarning(self): self.run_test("def FutureWarning_():\n try: raise FutureWarning('a','b','c')\n except FutureWarning as e: return e.args", FutureWarning_=[]) def test_UserWarning(self): self.run_test("def UserWarning_():\n try: raise UserWarning('a','b','c')\n except UserWarning as e: return e.args", UserWarning_=[]) def test_SyntaxWarning(self): self.run_test("def SyntaxWarning_():\n try: raise SyntaxWarning('a','b','c')\n except SyntaxWarning as e: return e.args", SyntaxWarning_=[]) def test_RuntimeWarning(self): self.run_test("def RuntimeWarning_():\n try: raise RuntimeWarning('a','b','c')\n except RuntimeWarning as e: return e.args", RuntimeWarning_=[]) def test_PendingDeprecationWarning(self): self.run_test("def PendingDeprecationWarning_():\n try: raise PendingDeprecationWarning('a','b','c')\n except PendingDeprecationWarning as e: return e.args", PendingDeprecationWarning_=[]) def test_DeprecationWarning(self): self.run_test("def DeprecationWarning_():\n try: raise DeprecationWarning('a','b','c')\n except DeprecationWarning as e: return e.args", DeprecationWarning_=[]) def test_BufferError(self): self.run_test("def BufferError_():\n try: raise BufferError('a','b','c')\n except BufferError as e: return e.args", BufferError_=[]) def test_ArithmeticError(self): self.run_test("def ArithmeticError_():\n try: raise ArithmeticError('a','b','c')\n except ArithmeticError as e: return e.args", ArithmeticError_=[]) @unittest.skip("incompatible with py.test") def test_AssertionError(self): self.run_test("def AssertionError_():\n try: raise AssertionError('a','b','c')\n except AssertionError as e: return e.args", AssertionError_=[]) def test_AttributeError(self): self.run_test("def AttributeError_():\n try: raise AttributeError('a','b','c')\n except AttributeError as e: return e.args", AttributeError_=[]) def test_EnvironmentError2(self): self.run_test("def EnvironmentError2_():\n try: raise EnvironmentError('a','b')\n except EnvironmentError as e: return e.args", EnvironmentError2_=[]) def test_EnvironmentError1(self): self.run_test("def EnvironmentError1_():\n try: raise EnvironmentError('a')\n except EnvironmentError as e: return e.args", EnvironmentError1_=[]) def test_EOFError(self): self.run_test("def EOFError_():\n try: raise EOFError('a','b','c')\n except EOFError as e: return e.args", EOFError_=[]) def test_ImportError(self): self.run_test("def ImportError_():\n try: raise ImportError('a','b','c')\n except ImportError as e: return e.args", ImportError_=[]) def test_LookupError(self): self.run_test("def LookupError_():\n try: raise LookupError('a','b','c')\n except LookupError as e: return e.args", LookupError_=[]) def test_MemoryError(self): self.run_test("def MemoryError_():\n try: raise MemoryError('a','b','c')\n except MemoryError as e: return e.args", MemoryError_=[]) def test_NameError(self): self.run_test("def NameError_():\n try: raise NameError('a','b','c')\n except NameError as e: return e.args", NameError_=[]) def test_ReferenceError(self): self.run_test("def ReferenceError_():\n try: raise ReferenceError('a','b','c')\n except ReferenceError as e: return e.args", ReferenceError_=[]) def test_RuntimeError(self): self.run_test("def RuntimeError_():\n try: raise RuntimeError('a','b','c')\n except RuntimeError as e: return e.args", RuntimeError_=[]) def test_SyntaxError(self): self.run_test("def SyntaxError_():\n try: raise SyntaxError('a','b','c')\n except SyntaxError as e: return e.args", SyntaxError_=[]) def test_SystemError(self): self.run_test("def SystemError_():\n try: raise SystemError('a','b','c')\n except SystemError as e: return e.args", SystemError_=[]) def test_TypeError(self): self.run_test("def TypeError_():\n try: raise TypeError('a','b','c')\n except TypeError as e: return e.args", TypeError_=[]) def test_ValueError(self): self.run_test("def ValueError_():\n try: raise ValueError('a','b','c')\n except ValueError as e: return e.args", ValueError_=[]) def test_FloatingPointError(self): self.run_test("def FloatingPointError_():\n try: raise FloatingPointError('a','b','c')\n except FloatingPointError as e: return e.args", FloatingPointError_=[]) def test_OverflowError(self): self.run_test("def OverflowError_():\n try: raise OverflowError('a','b','c')\n except OverflowError as e: return e.args", OverflowError_=[]) def test_ZeroDivisionError(self): self.run_test("def ZeroDivisionError_():\n try: raise ZeroDivisionError('a','b','c')\n except ZeroDivisionError as e:\n return e.args", ZeroDivisionError_=[]) def test_IOError(self): self.run_test("def IOError_():\n try: raise IOError('a','b','c')\n except IOError as e: return e.args", IOError_=[]) def test_OSError(self): self.run_test("def OSError_():\n try: raise OSError('a','b','c')\n except OSError as e: return e.args", OSError_=[]) def test_IndexError(self): self.run_test("def IndexError_():\n try: raise IndexError('a','b','c')\n except IndexError as e: return e.args", IndexError_=[]) def test_KeyError(self): self.run_test("def KeyError_():\n try: raise KeyError('a','b','c')\n except KeyError as e: return e.args", KeyError_=[]) def test_UnboundLocalError(self): self.run_test("def UnboundLocalError_():\n try: raise UnboundLocalError('a','b','c')\n except UnboundLocalError as e: return e.args", UnboundLocalError_=[]) def test_NotImplementedError(self): self.run_test("def NotImplementedError_():\n try: raise NotImplementedError('a','b','c')\n except NotImplementedError as e: return e.args", NotImplementedError_=[]) def test_IndentationError(self): self.run_test("def IndentationError_():\n try: raise IndentationError('a','b','c')\n except IndentationError as e: return e.args", IndentationError_=[]) def test_TabError(self): self.run_test("def TabError_():\n try: raise TabError('a','b','c')\n except TabError as e: return e.args", TabError_=[]) def test_UnicodeError(self): self.run_test("def UnicodeError_():\n try: raise UnicodeError('a','b','c')\n except UnicodeError as e: return e.args", UnicodeError_=[]) def test_multiple_exception(self): self.run_test("def multiple_exception_():\n try:\n raise OverflowError('a','b','c')\n except IOError as e:\n a=2 ; print(a) ; return e.args\n except OverflowError as e:\n return e.args", multiple_exception_=[]) def test_multiple_tuple_exception(self): self.run_test("def multiple_tuple_exception_():\n try:\n raise OverflowError('a','b','c')\n except (IOError, OSError):\n a=3;print(a)\n except OverflowError as e:\n return e.args", multiple_tuple_exception_=[]) def test_reraise_exception(self): self.run_test("def reraise_exception_():\n try:\n raise OverflowError('a','b','c')\n except IOError:\n raise\n except: return 'ok'", reraise_exception_=[]) def test_else2_exception(self): self.run_test("def else2_exception_():\n try:\n raise 1\n return 0,'bad'\n except:\n a=2\n else:\n return 0,'bad2'\n return a,'ok'", else2_exception_=[]) def test_else_exception(self): self.run_test("def else_exception_():\n try:\n a=2\n except:\n return 0,'bad'\n else:\n return a,'ok'\n return 0,'bad2'", else_exception_=[]) def test_enverror_exception(self): self.run_test("def enverror_exception_():\n try:\n raise EnvironmentError('a','b','c')\n except EnvironmentError as e:\n return (e.errno,e.strerror,e.filename)", enverror_exception_=[]) def test_finally_exception(self): self.run_test("def finally_exception_():\n try:\n a=2\n except:\n return 0,'bad'\n finally:\n return a,'good'", finally_exception_=[]) def test_finally2_exception(self): self.run_test("def finally2_exception_():\n try:\n a=1\n except:\n a=2\n finally:\n return a,'good'", finally2_exception_=[]) def test_str1_exception(self): self.run_test("def str1_exception_():\n try:\n raise EnvironmentError('a')\n except EnvironmentError as e:\n return str(e)", str1_exception_=[]) def test_str2_exception(self): self.run_test("def str2_exception_():\n try:\n raise EnvironmentError('a','b')\n except EnvironmentError as e:\n return str(e)", str2_exception_=[]) def test_str3_exception(self): self.run_test("def str3_exception_():\n try:\n raise EnvironmentError('a','b','c')\n except EnvironmentError as e:\n return str(e)", str3_exception_=[]) def test_no_msg_exception(self): self.run_test("def no_msg_exception_():\n try: raise IndexError()\n except IndexError as e: return e.args", no_msg_exception_=[]) # test if exception translators are registered in pythran def test_EnvironmentError3_register(self): self.run_test("def EnvironmentError3_register():\n raise EnvironmentError('a','b','c')", EnvironmentError3_register=[], check_exception=True) def test_EnvironmentError2_register(self): self.run_test("def EnvironmentError2_register():\n raise EnvironmentError('a','b')", EnvironmentError2_register=[], check_exception=True) def test_EnvironmentError1_register(self): self.run_test("def EnvironmentError1_register():\n raise EnvironmentError('a')", EnvironmentError1_register=[], check_exception=True) def test_multiple_exception_register(self): self.run_test("def multiple_exception_register():\n raise OverflowError('a','b','c')", multiple_exception_register=[], check_exception=True) def test_multiple_tuple_exception_register(self): self.run_test("def multiple_tuple_exception_register():\n raise OverflowError('a','b','c')", multiple_tuple_exception_register=[], check_exception=True) def test_reraise_exception_register(self): self.run_test("def reraise_exception_register():\n raise OverflowError('a','b','c')", reraise_exception_register=[], check_exception=True) def test_enverror_exception_register(self): self.run_test("def enverror_exception_register():\n raise EnvironmentError('a','b','c')", enverror_exception_register=[], check_exception=True) def test_str1_exception_register(self): self.run_test("def str1_exception_register():\n raise EnvironmentError('a')", str1_exception_register=[], check_exception=True) def test_str2_exception_register(self): self.run_test("def str2_exception_register():\n raise EnvironmentError('a','b')", str2_exception_register=[], check_exception=True) def test_str3_exception_register(self): self.run_test("def str3_exception_register():\n raise EnvironmentError('a','b','c')", str3_exception_register=[], check_exception=True) def test_str4_exception_register(self): self.run_test("def str4_exception_register():\n raise EnvironmentError('a','b','c','d')", str4_exception_register=[], check_exception=True) def test_str5_exception_register(self): self.run_test("def str5_exception_register():\n raise EnvironmentError('a','b','c','d','e')", str5_exception_register=[], check_exception=True) def test_no_msg_exception_register(self): self.run_test("def no_msg_exception_register():\n raise IndexError()", no_msg_exception_register=[], check_exception=True) for exception in exceptions: # This one is not compatible with pytest if str(exception) in ("AssertionError", "UnicodeDecodeError", "UnicodeEncodeError", "UnicodeTranslateError"): continue if exception not in exception_args: continue args = exception_args[exception] code = 'def {exception}_register(): raise {exception}{args}'.format(**locals()) setattr(TestException, 'test_' + str(exception) + "_register", eval("""lambda self: self.run_test('''{0}''', {1}_register=[], check_exception=True)""".format(code, exception))) pythran-0.10.0+ds2/pythran/tests/test_file.py000066400000000000000000000244471416264035500211410ustar00rootroot00000000000000from tempfile import mkstemp from pythran.tests import TestEnv import unittest from pythran.typing import List class TestFile(TestEnv): def __init__(self, *args, **kwargs): super(TestFile, self).__init__(*args, **kwargs) self.file_content = """azerty\nqwerty\n\n""" def tempfile(self): filename=mkstemp()[1] with open(filename,"w") as f: f.write(self.file_content) self.filename = filename return filename def reinit_file(self): with open(self.filename,"w") as f: f.write(self.file_content) return self.filename def test_filename_only_constructor(self): filename=mkstemp()[1] self.run_test("def filename_only_constructor(filename):\n open(filename)", filename, filename_only_constructor=[str]) def test_open(self): filename=mkstemp()[1] self.run_test("def _open(filename):\n open(filename)", filename, _open=[str]) def test_open_write(self): filename=mkstemp()[1] self.run_test("""def _open_write(filename):\n f=open(filename,"w+")\n f.write("azert")""", filename, _open_write=[str]) self.assertEqual(open(filename).read(), "azert") def test_open_append(self): filename=mkstemp()[1] self.run_test("""def _open_append(filename):\n f=open(filename,"a")\n f.write("azert")""", filename, _open_append=[str]) self.assertEqual(open(filename).read(), "azert"*2) def test_writing_mode_constructor(self): # Expecting file to be erased. # But python execution of test will erase it before pythran can :s self.tempfile() self.run_test("""def writing_mode_constructor(filename):\n f=open(filename, "w")\n f.close()""", self.filename,prelude=self.reinit_file, writing_mode_constructor=[str]) self.assertEqual(open(self.filename).read(), "") #TODO : tester le differents modes du constructeur def test_write(self): self.filename=mkstemp()[1] content="""q2\naze23\n""" self.run_test("""def _write(filename):\n f=open(filename,'a+')\n n = f.write("""+str('str("""q2\naze23\n""")')+""")\n f.close()\n return n""", self.filename, _write=[str]) self.assertEqual(open(self.filename).read(), content*2) def test_writelines(self): self.filename=mkstemp()[1] content=["""azerty""", "qsdfgh", "12345524"] self.run_test("""def _writelines(filename,_content):\n f=open(filename,'a+')\n f.writelines(_content)\n f.close()""", self.filename, content, _writelines=[str, List[str]]) self.assertEqual(open(self.filename).read(), str().join(content)*2) def test_close(self): filename=mkstemp()[1] self.run_test(""" def file_close(filename): f=open(filename,'w') f.close() try: f.write("q") except:pass""", filename, file_close=[str]) def test_truncate(self): self.tempfile() self.run_test("def _truncate(filename):\n f=open(filename)\n f.truncate(3); return f.read()", self.filename, _truncate=[str]) def test_read(self): self.tempfile() self.run_test("def _read(filename):\n f=open(filename)\n return f.read()", self.filename, _read=[str]) def test_read_size(self): self.tempfile() self.run_test("def _read_size(filename, size):\n f=open(filename)\n return f.read(size)", self.filename, 10, _read_size=[str, int]) def test_read_oversize(self): self.tempfile() self.run_test("def _read_oversize(filename, size):\n f=open(filename)\n return f.read(size)", self.filename, len(self.file_content)+5, _read_oversize=[str, int]) def test_readline(self): self.tempfile() self.run_test("def _readline(filename):\n f=open(filename)\n return [f.readline(),f.readline(), f.readline(),f.readline(),f.readline()]", self.filename, _readline=[str]) def test_readline_size(self): self.tempfile() self.run_test("def _readline_size(filename):\n f=open(filename)\n return [f.readline(7),f.readline(3),f.readline(4),f.readline(),f.readline(10)]", self.filename, _readline_size=[str]) def test_readline_size_bis(self): self.tempfile() self.run_test("def _readline_size_bis(filename):\n f=open(filename)\n return [f.readline(4),f.readline(3),f.readline(10),f.readline(),f.readline(5)]", self.filename, _readline_size_bis=[str]) def test_readlines(self): self.tempfile() self.run_test("def _readlines(filename):\n f=open(filename)\n return f.readlines()", self.filename, _readlines=[str]) def test_offset_read(self): self.tempfile() self.run_test("""def _offset_read(filename):\n f=open(filename)\n f.seek(5)\n return f.read()""", self.filename, _offset_read=[str]) def test_offset_write(self): self.tempfile() self.run_test("""def _offset_write(filename):\n f=open(filename, "a")\n f.seek(5)\n f.write("aze")\n f.close()\n return open(filename,"r").read()""", self.filename, prelude = self.reinit_file, _offset_write=[str]) def test_next(self): self.tempfile() self.run_test("""def _next(filename):\n f=open(filename)\n return [next(f), next(f)]""", self.filename, _next=[str]) def test_iter(self): self.tempfile() self.run_test("""def _iter(filename):\n f=open(filename)\n return [l for l in f]""", self.filename, _iter=[str]) def test_fileno(self): self.tempfile() # Useless to check if same fileno, just checking if fct can be called self.run_test("""def _fileno(filename):\n f=open(filename)\n a=f.fileno()\n return a!= 0""", self.filename, _fileno=[str]) def test_isatty(self): self.tempfile() self.run_test("""def _isatty(filename):\n f=open(filename)\n return f.isatty()""", self.filename, _isatty=[str]) def test_truncate(self): self.tempfile() self.run_test("""def _truncate(filename):\n f=open(filename, 'a')\n f.seek(3)\n f.truncate()\n f.close()\n return open(filename).read()""", self.filename, _truncate=[str]) def test_truncate_size(self): self.tempfile() self.run_test("""def _truncate_size(filename):\n f=open(filename, 'a')\n f.truncate(4)\n f.close()\n return open(filename).read()""", self.filename, _truncate_size=[str]) def test_flush(self): self.tempfile() # Don't know how to check properly, just checking fct call. self.run_test("""def _flush(filename):\n f=open(filename, 'a')\n f.flush()""", self.filename, _flush=[str]) def test_tell(self): self.tempfile() self.run_test("""def _tell(filename):\n f=open(filename)\n f.read(3)\n return f.tell()""", self.filename, _tell=[str]) def test_seek(self): self.tempfile() self.run_test("""def _seek(filename):\n f=open(filename, 'a')\n f.seek(3)\n return f.tell()""", self.filename, _seek=[str]) def test_attribute_closed(self): self.tempfile() self.run_test("""def _attribute_closed(filename):\n f=open(filename, 'a')\n return f.closed""", self.filename, _attribute_closed=[str]) def test_attribute_name(self): self.tempfile() self.run_test("""def _attribute_name(filename):\n return open(filename, 'a').name""", self.filename, _attribute_name=[str]) def test_attribute_mode(self): self.tempfile() self.run_test("""def _attribute_mode(filename):\n return open(filename, 'a').mode""", self.filename, _attribute_mode=[str]) def test_attribute_newlines(self): self.tempfile() self.run_test("""def _attribute_newlines(filename):\n return open(filename, 'a').newlines""", self.filename, _attribute_newlines=[str]) def test_map_iter(self): self.tempfile() self.run_test("""def _map_iter(filename):\n f=open(filename)\n return list(map(lambda s: len(s), f))""", self.filename, _map_iter=[str]) # The following tests insures the PROXY compatibility with rvalues def test_rvalue_write(self): self.filename=mkstemp()[1] self.run_test("""def _rvalue_write(filename):\n open(filename,'a+').write("aze")""", self.filename, _rvalue_write=[str]) def test_rvalue_writelines(self): self.filename=mkstemp()[1] self.run_test("""def _rvalue_writelines(filename):\n open(filename,'a+').writelines(["azerty", "qsdfgh", "12345524"])""", self.filename, _rvalue_writelines=[str]) def test_rvalue_close(self): filename=mkstemp()[1] self.run_test(""" def _rvalue_close(filename): open(filename,'w').close()""", filename, _rvalue_close=[str]) def test_rvalue_read(self): self.tempfile() self.run_test("def _rvalue_read(filename):\n return open(filename).read()", self.filename, _rvalue_read=[str]) def test_rvalue_readline(self): self.tempfile() self.run_test("def _rvalue_readline(filename):\n return open(filename).readline()", self.filename, _rvalue_readline=[str]) def test_rvalue_readlines(self): self.tempfile() self.run_test("def _rvalue_readlines(filename):\n return open(filename).readlines()", self.filename, _rvalue_readlines=[str]) def test_rvalue_next(self): self.tempfile() self.run_test("""def _rvalue_next(filename):\n return next(open(filename))""", self.filename, _rvalue_next=[str]) def test_rvalue_fileno(self): self.tempfile() # Useless to check if same fileno, just checking if fct can be called self.run_test("""def _rvalue_fileno(filename):\n open(filename).fileno()""", self.filename, _rvalue_fileno=[str]) def test_rvalue_isatty(self): self.tempfile() self.run_test("""def _rvalue_isatty(filename):\n return open(filename).isatty()""", self.filename, _rvalue_isatty=[str]) def test_rvalue_truncate(self): self.tempfile() self.run_test("""def _rvalue_truncate(filename):\n open(filename, 'a').truncate(3)""", self.filename, _rvalue_truncate=[str]) def test_rvalue_flush(self): self.tempfile() self.run_test("""def _rvalue_flush(filename):\n open(filename, 'a').flush()""", self.filename, _rvalue_flush=[str]) def test_rvalue_tell(self): self.tempfile() self.run_test("""def _rvalue_tell(filename):\n return open(filename, 'a').tell()""", self.filename, _rvalue_tell=[str]) def test_rvalue_seek(self): self.tempfile() self.run_test("""def _rvalue_seek(filename):\n open(filename, 'a').seek(3)""", self.filename, _rvalue_seek=[str]) pythran-0.10.0+ds2/pythran/tests/test_format.py000066400000000000000000000017341416264035500215040ustar00rootroot00000000000000"""Module to check coding style.""" import glob import os import subprocess import unittest excluded_files = ['pdqsort.hpp'] def test_clang_format(): """Check coding style for cpp files.""" clang_format = 'clang-format-3.6' try: subprocess.check_call([clang_format, '--version']) except OSError: raise unittest.SkipTest(clang_format + " not available") pythran_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) for root, _, _ in os.walk(os.path.join(pythran_dir, "pythonic")): for file_ in glob.glob(os.path.join(root, "*.hpp")): if os.path.basename(file_) in excluded_files: continue cmd = "{cmd} {file} | diff -u {file} -".format( cmd=clang_format, file=os.path.join(root, file_)) process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, universal_newlines=True) assert not process.wait(), process.stdout.read() pythran-0.10.0+ds2/pythran/tests/test_gwebb.py000066400000000000000000000003551416264035500213000ustar00rootroot00000000000000import unittest from pythran.tests import TestFromDir import os class TestGWebb(TestFromDir): path = os.path.join(os.path.dirname(__file__),"g webb") TestGWebb.populate(TestGWebb) if __name__ == '__main__': unittest.main() pythran-0.10.0+ds2/pythran/tests/test_import_all.py000066400000000000000000000055501416264035500223560ustar00rootroot00000000000000from pythran.tests import TestEnv from textwrap import dedent import pythran class TestImportAll(TestEnv): def test_import_all(self): self.run_test("from math import *\ndef import_all(l): return cos(l)", 3.3, import_all=[float]) def test_import_cmath_all(self): self.run_test("from cmath import *\ndef import_cmath_all(l): return cos(l)", 2.2, import_cmath_all=[float]) def test_import_all_cos(self): self.run_test("from math import *\nfrom math import cos\ndef import_all_cos(l): return cos(l)", 1.1, import_all_cos=[float]) def test_import_all_twice(self): self.run_test("from math import *\nfrom math import *\ndef import_all_twice(l): return cos(l)", 0.1, import_all_twice=[float]) def test_import_same_name(self): self.run_test("from math import *\ndef cos(l): return 100", 0.1, cos=[float]) def test_import_collections(self): """ Check correct error is returned for incorrect module import. Check is done for module as .py file. """ code = """ import collections def unsupported_module(): return collections.Counter()""" with self.assertRaises(pythran.syntax.PythranSyntaxError) as ex: pythran.compile_pythrancode("import_collections", dedent(code), pyonly=True) self.assertIn("Module 'collections' not found.", str(ex.exception)) def test_complex_import_manipulation0(self): """ Check correct error is returned for incorrect module manipulation. """ code = """ import math def unsupported_module(): return math""" with self.assertRaises(pythran.syntax.PythranSyntaxError) as ex: pythran.compile_pythrancode("complex_import_manipulation0", dedent(code), pyonly=True) def test_complex_import_manipulation1(self): code = """ import bisect def unsupported_module(): return bisect()""" with self.assertRaises(pythran.syntax.PythranSyntaxError) as ex: pythran.compile_pythrancode("complex_import_manipulation1", dedent(code), pyonly=True) def test_complex_import_manipulation2(self): code = """ from bisect import bisect_right def unsupported_module(): return bisect()""" with self.assertRaises(pythran.syntax.PythranSyntaxError) as ex: pythran.compile_pythrancode("complex_import_manipulation2", dedent(code), pyonly=True) def test_complex_import_manipulation3(self): code = """ from numpy import random def unsupported_module(): return random.i_do_not_exist()""" with self.assertRaises(pythran.syntax.PythranSyntaxError) as ex: pythran.compile_pythrancode("complex_import_manipulation3", dedent(code), pyonly=True) pythran-0.10.0+ds2/pythran/tests/test_ipython.py000066400000000000000000000010451416264035500217010ustar00rootroot00000000000000import os import subprocess from unittest import TestCase class TestIpythonMagic(TestCase): def test_loadext_and_run(self): subprocess.check_call(['ipython', os.path.join(os.path.dirname(__file__), 'ipython_script.ipy')]) def test_loadext_and_run_timeit_twice(self): subprocess.check_call(['ipython', os.path.join(os.path.dirname(__file__), 'ipython_script_timeit.ipy')]) pythran-0.10.0+ds2/pythran/tests/test_itertools.py000066400000000000000000000236321416264035500222410ustar00rootroot00000000000000from pythran.tests import TestEnv import unittest from pythran.typing import List @TestEnv.module class TestItertools(TestEnv): def test_imap(self): self.run_test("def imap_(l0,v): return sum(map(lambda x:x*v, l0))", [0,1,2], 2, imap_=[List[int], int]) def test_imap_on_generator(self): self.run_test("def imap_on_generator(l,v): return sum(map(lambda x:x*v, (y for x in l for y in range(x))))", [2,3,5], 1, imap_on_generator=[List[int], int]) def test_imap2(self): self.run_test("def imap2_(l0, l1,v): return sum(map(lambda x,y:x*v+y, l0, l1))", [0,1,2], [0.,1.1,2.2], 1, imap2_=[List[int], List[float], int]) def test_imap2_ineq_size(self): """ Check imap with different size for the two list operand. """ self.run_test(""" def imap2_ineq_size(l0, l1, v): return sum(map(lambda x, y : x * v + y, l0, l1))""", [0, 1, 2, 3], [0., 1.1, 2.2], 1, imap2_ineq_size=[List[int], List[float], int]) def test_imap2_on_generator(self): self.run_test("def imap2_on_generator(l0,l1,v): return sum(map(lambda x,y:x*v+y, (z*z for x in l0 for z in range(x)), (z*2 for y in l1 for z in range(y))))", [0,1,2,3], [3,2,1,0], 2, imap2_on_generator=[List[int], List[int], int]) def test_ifilter_init(self): self.run_test("def ifilter_init(l0): return list(filter(lambda x: x > 2 , l0))", [0,1,2,3,4,5], ifilter_init=[List[int]]) def test_ifilter_final(self): self.run_test("def ifilter_final(l0): return list(filter(lambda x: x < 2, l0))", [0,1,2,3,4,5], ifilter_final=[List[int]]) def test_ifilter_on_generator(self): self.run_test("def ifilterg_(l0): return list(filter(lambda x: (x % 2) == 1, (y for x in l0 for y in range(x))))", [0,1,2,3,4,5], ifilterg_=[List[int]]) def test_product(self): self.run_test("def product_(l0,l1): from itertools import product; return sum(map(lambda t : t[0]*t[1], product(l0,l1)))", [0,1,2,3,4,5], [10,11], product_=[List[int],List[int]]) def test_product_on_generator(self): self.run_test("def product_g(l0,l1): from itertools import product; return sum(map(lambda t : t[0]*t[1], product((y for x in l0 for y in range(x)),(y for x in l1 for y in range(x)))))", [0,1,2,3,4], [4,3,2,1,0], product_g=[List[int],List[int]]) def test_itertools(self): self.run_test("def test_it(l0,l1): import itertools; return sum(map(lambda t : t[0]*t[1], itertools.product(filter(lambda x : x > 2, l0), filter(lambda x : x < 12, l1))))", [0,1,2,3,4,5], [10,11,12,13,14,15], test_it=[List[int],List[int]]) def test_izip(self): self.run_test("def izip_(l0,l1): return sum(map(lambda t : t[0]*t[1], zip(l0,l1)))", [0,1,2], [10,11,12], izip_=[List[int],List[int]]) def test_izip_on_generator(self): self.run_test("def izipg_(l0,l1): return sum(map(lambda t : t[0]*t[1], zip((z for x in l0 for z in range(x)),(z for x in l1 for z in range(x)))))", [0,1,2,3], [3,2,1,0], izipg_=[List[int],List[int]]) def test_zip_iter(self): self.run_test(""" def zip_iter(l0): s = 0 for x in zip(l0, l0): for y in x: s += y return s""", [0,1,2], zip_iter=[List[int]]) def test_islice0(self): self.run_test("def islice0(l): from itertools import islice ; return [x for x in islice(l, 1,30,3)]", list(range(100)), islice0=[List[int]]) def test_islice1(self): self.run_test("def islice1(l): from itertools import islice ; return [x for x in islice(l, 16)]", list(range(100)), islice1=[List[int]]) def test_count0(self): self.run_test("def count0(): from itertools import count ; c = count() ; next(c); next(c); return next(c)", count0=[]) def test_count1(self): self.run_test("def count1(n): from itertools import count ; c = count(n) ; next(c); next(c); return next(c)", 100, count1=[int]) def test_count2(self): self.run_test("def count2(n): from itertools import count ; c = count(n,3.2) ; next(c); next(c); return next(c)", 100, count2=[int]) def test_count3(self): self.run_test("def count3(n):\n from itertools import count\n j = 1\n for i in count(n):\n if i == 10: return j\n else: j +=1", 1, count3=[int]) def test_next_enumerate(self): self.run_test("def next_enumerate(n): x = enumerate(n) ; next(x) ; return list(map(str, x))", list(range(5)), next_enumerate=[List[int]]) def test_next_generator(self): self.run_test("def next_generator(n): x = (i for i in range(n) for j in range(i)) ; next(x) ; return list(map(str, x))", 5, next_generator=[int]) def test_next_imap(self): self.run_test("def next_imap(n): x = map(abs,n) ; next(x) ; return list(map(str, x))", list(range(-5,5)), next_imap=[List[int]]) def test_next_imap_none(self): self.run_test("def next_imap_none(n): x = map(str,n) ; next(x) ; return list(map(str, x))", list(range(-5,5)), next_imap_none=[List[int]]) def test_next_ifilter(self): self.run_test("def next_ifilter(n): x = filter(abs,n) ; next(x) ; return list(map(str, x))", list(range(-5,5)), next_ifilter=[List[int]]) def test_product_next(self): self.run_test("def next_product(n): from itertools import product ; x = product(n,n) ; next(x) ; return list(map(str, x))", list(range(-5,5)), next_product=[List[int]]) def test_product_iter(self): self.run_test(""" def product_iter(n): from itertools import product s = 0 for x in product(n,n): for y in x: s += y return s""", list(range(-5,5)), product_iter=[List[int]]) def test_next_izip(self): self.run_test("def next_izip(n): x = zip(n,n) ; next(x) ; return list(map(str, x))", list(range(-5,5)), next_izip=[List[int]]) def test_next_islice(self): self.run_test("def next_islice(n): from itertools import islice ; x = islice(n,8) ; next(x) ; return list(map(str, x))", list(range(-5,5)), next_islice=[List[int]]) def test_next_count(self): self.run_test("def next_count(n): from itertools import count ; x = count(n) ; next(x) ; return next(x)", 5, next_count=[int]) def test_iter(self): self.run_test("def iter_(n): r = iter(range(5,n)) ; next(r) ; return next(r)", 12, iter_=[int]) def test_ifilter_with_nested_lambdas(self): code = ''' def ifilter_with_nested_lambdas(N): perf = lambda n: n == sum(i for i in range(1, n) if n % i == 0) return list(map(perf, range(20)))''' self.run_test(code, 10, ifilter_with_nested_lambdas=[int]) def test_combinations_on_generator(self): self.run_test("def combinations_g(l0,a): from itertools import combinations; return sum(map(lambda t : t[0]*t[1], combinations((y for x in l0 for y in range(x)),a)))", [0,1,2], 2, combinations_g=[List[int],int]) def test_next_combinations(self): self.run_test("def next_combinations(n): from itertools import combinations ; x = combinations(n,2) ; next(x) ; return list(map(lambda y:y, x))", list(range(5)), next_combinations=[List[int]]) def test_combinations(self): self.run_test("def combinations_(l0,a): from itertools import combinations; return sum(map(lambda t : t[0]*t[1], combinations(l0,a)))", [0,1,2,3,4,5], 2, combinations_=[List[int],int]) def test_permutations_on_generator(self): self.run_test("def permutations_g(l0,a): from itertools import permutations; return sum(map(lambda t : t[0]*t[1], permutations((y for x in l0 for y in range(x)),a)))", [0,1,2], 2, permutations_g=[List[int],int]) def test_next_permutations(self): self.run_test("def next_permutations(n):" " from itertools import permutations ;" " x = permutations(n,2) ;" " next(x) ;" " return list(map(str, x))", list(range(5)), next_permutations=[List[int]]) def test_permutations(self): '''Test permutation without second arg''' self.run_test("def permutations_2_(l0): " " from itertools import permutations;" " return list(permutations(l0))", [0, 1, 2, 3], permutations_2_=[List[int]]) def test_permutations_with_prefix(self): self.run_test("def permutations_(l0,a):" " from itertools import permutations;" " return list(permutations(l0,a))", [0,1,2,3,4,5], 2, permutations_=[List[int],int]) def test_imap_over_array(self): self.run_test("def imap_over_array(l):" " from numpy import arange ;" " t = tuple(map(lambda x: 1, (l,l))) ;" " return arange(10).reshape((5,2))[t]", 3, imap_over_array=[int]) def test_imap_over_several_arrays(self): self.run_test("def imap_over_several_arrays(l):" " from numpy import arange ;" " t = tuple(map(lambda x,y: 1, (l,l), (l, l, l))) ;" " return arange(10).reshape((5,2))[t]", 3, imap_over_several_arrays=[int]) def test_itertools_repeat0(self): code = 'def itertools_repeat0(n): import itertools; return list(itertools.repeat(n, n))' self.run_test(code, 3, itertools_repeat0=[int]) def test_itertools_repeat1(self): code = ''' def itertools_repeat1(n): import itertools s = [] i = 0 for l in itertools.repeat([n]): s.append(l) i += 1 if i < n: break return s''' self.run_test(code, 3, itertools_repeat1=[int]) pythran-0.10.0+ds2/pythran/tests/test_list.py000066400000000000000000000155141416264035500211700ustar00rootroot00000000000000from pythran.tests import TestEnv from pythran.typing import List, NDArray import numpy as np class TestList(TestEnv): def test_contains_(self): self.run_test("def contains_(a):\n b=[1,2,3,8,7,4]\n return a in b", 8, contains_=[int]) def test_contains_slice(self): self.run_test("def contains_slice(a):\n b=[1,2,3,8,7,4]\n return a in b[1:a//2]", 8, contains_slice=[int]) def test_extend_(self): self.run_test("def extend_(a):\n b=[1,2,3]\n b.extend(a)\n return b", [1.2], extend_=[List[float]]) def test_remove_(self): self.run_test("def remove_(a):\n b=[1,2,3]\n b.remove(a)\n return b", 2, remove_=[int]) def test_index_(self): self.run_test("def index_(a):\n b=[1,2,3,8,7,4]\n return b.index(a)", 8, index_=[int]) def test_index_tuple(self): self.run_test("def index_tuple(a):\n b=[1,2,3,8,7,4]\n return tuple(b).index(a)", 1, index_tuple=[int]) def test_pop_(self): self.run_test("def pop_(a):\n b=[1,3,4,5,6,7]\n return b.pop(a)", 2, pop_=[int]) def test_popnegatif_(self): self.run_test("def popnegatif_(a):\n b=[1,3,4,5,6,7]\n return b.pop(a)", -2, popnegatif_=[int]) def test_popempty_(self): self.run_test("def popempty_():\n b=[1,3,4,5,6,7]\n return b.pop()", popempty_=[]) def test_count_(self): self.run_test("def count_(a):\n b=[1,3,4,5,3,7]\n return b.count(a)",3, count_=[int]) def test_count_slice(self): self.run_test("def count_slice(a):\n b=[1,3,4,5,3,7]\n return b[:a].count(a)",3, count_slice=[int]) def test_reverse_(self): self.run_test("def reverse_():\n b=[1,2,3]\n b.reverse()\n return b", reverse_=[]) def test_sort_(self): self.run_test("def sort_():\n b=[1,3,5,4,2]\n b.sort()\n return b", sort_=[]) def test_sort_key(self): self.run_test("def sort_key(n):\n b=[(1,3),(5,4),(2,n)]\n b.sort(key=lambda x: x[1])\n return b", 1, sort_key=[int]) def test_insert_(self): self.run_test("def insert_(a,b):\n c=[1,3,5,4,2]\n c.insert(a,b)\n return c",2,5, insert_=[int,int]) def test_mult_0(self): self.run_test("def mult_0(a):\n b=[1,2,3,8,7,4]\n b*=a\n return b", 8, mult_0=[int]) def test_mult_1(self): self.run_test("def mult_1(a):\n b=[1,2,3,8,7,4] * a\n return b", 8, mult_1=[int]) def test_insertneg_(self): self.run_test("def insertneg_(a,b):\n c=[1,3,5,4,2]\n c.insert(a,b)\n return c",-1,-2, insertneg_=[int,int]) def test_subscripted_slice(self): self.run_test("def subscripted_slice(l): a=l[2:6:2] ; return a[1]", list(range(10)), subscripted_slice=[List[int]]) def test_list_comparison(self): self.run_test("def list_comparison(l): return max(l)", [[1,2,3],[1,4,1],[1,4,8,9]], list_comparison=[List[List[int]]]) def test_list_comparison0(self): self.run_test("def list_comparison0(l0, l1):" " return l0 < l1, l0 == l1, l0 <= l1, l0 > l1, l0 >= l1, l0 != l1", [1,2,3], [1, 2], list_comparison0=[List[int], List[int]]) def test_list_comparison1(self): self.run_test("def list_comparison1(l0, l1):" " return l0 < l1, l0 == l1, l0 <= l1, l0 > l1, l0 >= l1, l0 != l1", [3], [1], list_comparison1=[List[int], List[int]]) def test_list_comparison2(self): self.run_test("def list_comparison2(l0, l1):" " return l0 < l1, l0 == l1, l0 <= l1, l0 > l1, l0 >= l1, l0 != l1", [3], [1, 2], list_comparison2=[List[int], List[int]]) def test_list_comparison3(self): self.run_test("def list_comparison3(l0, l1):" " return l0 < l1, l0 == l1, l0 <= l1, l0 > l1, l0 >= l1, l0 != l1", [1,2,3], [1,2,4], list_comparison3=[List[int], List[int]]) def test_list_equal_comparison_true(self): self.run_test("def list_comparison_true(l1,l2): return l1==l2", [1,2,3],[1,4,1], list_comparison_true=[List[int],List[int]]) def test_list_equal_comparison_false(self): self.run_test("def list_comparison_false(l1,l2): return l1==l2", [1,4,1],[1,4,1], list_comparison_false=[List[int],List[int]]) def test_list_equal_comparison_different_sizes(self): self.run_test("def list_comparison_different_sizes(l1,l2): return l1==l2", [1,4,1],[1,4,1,5], list_comparison_different_sizes=[List[int],List[int]]) def test_list_unequal_comparison_false(self): self.run_test("def list_comparison_unequal_false(l1,l2): return l1!=l2", [1,2,3],[1,4,1], list_comparison_unequal_false=[List[int],List[int]]) def test_list_unequal_comparison_true(self): self.run_test("def list_comparison_unequal_true(l1,l2): return l1!=l2", [1,4,1],[1,4,1], list_comparison_unequal_true=[List[int],List[int]]) def test_list_unequal_comparison_different_sizes(self): self.run_test("def list_unequal_comparison_different_sizes(l1,l2): return l1!=l2", [1,4,1],[1,4,1,5], list_unequal_comparison_different_sizes=[List[int],List[int]]) def test_assigned_slice(self): self.run_test("def assigned_slice(l): l[0]=l[2][1:3] ; return l", [[1,2,3],[1,4,1],[1,4,8,9]], assigned_slice=[List[List[int]]]) def test_add_list_of_arrays(self): self.run_test("def add_list_of_arrays(x, y): return x + y", [np.array([1,2])], [np.array([3,4])], add_list_of_arrays=[List[NDArray[int, :]], List[NDArray[int, :]]]) def test_slice_get_item_assign(self): self.run_test('def slice_get_item_assign(x): y = x[:]; y.remove(0); return x, y', [0, 1,2,3], slice_get_item_assign=[List[int]]) def test_init_array_empty_list(self): code = ''' def init_array_empty_list(X,f): A = [] for i in range(int(f)): if i==0: A = f*X[:,i] else: A+=f*X[:,i] return A''' self.run_test( code, np.array([[2,3],[4,5]]), 1., init_array_empty_list=[NDArray[int, :,:], float]) def test_pop_while_iterating(self): code = ''' def pop_while_iterating(n): AA=[[n,2,3],[n,2,3],[1,2,3],[1,2,3],[1,2,3],[1,n,3]] for ii in range(len(AA)): AA.pop() return AA''' self.run_test(code, 0, pop_while_iterating=[int]) pythran-0.10.0+ds2/pythran/tests/test_math.py000066400000000000000000000127041416264035500211440ustar00rootroot00000000000000from pythran.tests import TestEnv @TestEnv.module class TestMath(TestEnv): def test_cos_(self): self.run_test("def cos_(a):\n from math import cos\n return cos(a)", 1, cos_=[int]) def test_exp_(self): self.run_test("def exp_(a):\n from math import exp\n return exp(a)", 1, exp_=[int]) def test_sqrt_(self): self.run_test("def sqrt_(a):\n from math import sqrt\n return sqrt(a)", 1, sqrt_=[int]) def test_log10_(self): self.run_test("def log10_(a):\n from math import log10\n return log10(a)", 1, log10_=[int]) def test_isnan_(self): self.run_test("def isnan_(a):\n from math import isnan\n return isnan(a)", 1, isnan_=[int]) def test_pi_(self): self.run_test("def pi_():\n from math import pi\n return pi", pi_=[]) def test_e_(self): self.run_test("def e_():\n from math import e\n return e", e_=[]) def test_asinh_(self): self.run_test("def asinh_(a):\n from math import asinh\n return asinh(a)",1., asinh_=[float]) def test_atanh_(self): self.run_test("def atanh_(a):\n from math import atanh\n return atanh(a)",.1, atanh_=[float]) def test_acosh_(self): self.run_test("def acosh_(a):\n from math import acosh\n return acosh(a)",1, acosh_=[int]) def test_radians_(self): self.run_test("def radians_(a):\n from math import radians\n return radians(a)",1, radians_=[int]) def test_degrees_(self): self.run_test("def degrees_(a):\n from math import degrees\n return degrees(a)",1, degrees_=[int]) def test_hypot_(self): self.run_test("def hypot_(a,b):\n from math import hypot\n return hypot(a,b)",3,4, hypot_=[int,int]) def test_tanh_(self): self.run_test("def tanh_(a):\n from math import tanh\n return tanh(a)",1, tanh_=[int]) def test_cosh_(self): self.run_test("def cosh_(a):\n from math import cosh\n return cosh(a)",1., cosh_=[float]) def test_sinh_(self): self.run_test("def sinh_(a):\n from math import sinh\n return sinh(a)",1, sinh_=[int]) def test_atan_(self): self.run_test("def atan_(a):\n from math import atan\n return atan(a)",1, atan_=[int]) def test_atan2_(self): self.run_test("def atan2_(a,b):\n from math import atan2\n return atan2(a,b)",2,4, atan2_=[int,int]) def test_asin_(self): self.run_test("def asin_(a):\n from math import asin\n return asin(a)",1, asin_=[int]) def test_tan_(self): self.run_test("def tan_(a):\n from math import tan\n return tan(a)",1, tan_=[int]) def test_log_(self): self.run_test("def log_(a):\n from math import log\n return log(a)",1, log_=[int]) def test_log1p_(self): self.run_test("def log1p_(a):\n from math import log1p\n return log1p(a)",1, log1p_=[int]) def test_expm1_(self): self.run_test("def expm1_(a):\n from math import expm1\n return expm1(a)",1, expm1_=[int]) def test_ldexp_(self): self.run_test("def ldexp_(a,b):\n from math import ldexp\n return ldexp(a,b)",3,4, ldexp_=[int,int]) def test_fmod_(self): self.run_test("def fmod_(a,b):\n from math import fmod\n return fmod(a,b)",5.3,2, fmod_=[float,int]) def test_fabs_(self): self.run_test("def fabs_(a):\n from math import fabs\n return fabs(a)",1, fabs_=[int]) def test_copysign_(self): self.run_test("def copysign_(a,b):\n from math import copysign\n return copysign(a,b)",2,-2, copysign_=[int,int]) def test_acos_(self): self.run_test("def acos_(a):\n from math import acos\n return acos(a)",1, acos_=[int]) def test_erf_(self): self.run_test("def erf_(a):\n from math import erf\n return erf(a)",1, erf_=[int]) def test_erfc_(self): self.run_test("def erfc_(a):\n from math import erfc\n return erfc(a)",1, erfc_=[int]) def test_gamma_(self): self.run_test("def gamma_(a):\n from math import gamma\n return gamma(a)",1, gamma_=[int]) def test_lgamma_(self): self.run_test("def lgamma_(a):\n from math import lgamma\n return lgamma(a)",1, lgamma_=[int]) def test_trunc_(self): self.run_test("def trunc_(a):\n from math import trunc\n return trunc(a)",1, trunc_=[int]) def test_factorial_(self): self.run_test("def factorial_(a):\n from math import factorial\n return factorial(a)",2, factorial_=[int]) def test_modf_(self): self.run_test("def modf_(a):\n from math import modf\n return modf(a)",2, modf_=[int]) def test_frexp_(self): self.run_test("def frexp_(a):\n from math import frexp\n return frexp(a)",2.2, frexp_=[float]) def test_isinf_(self): self.run_test("def isinf_(a):\n from math import isinf\n n=1\n while not isinf(a):\n a=a*a\n n+=1\n return isinf(a)", 2., isinf_=[float]) def test_pow_accuracy(self): code = ''' from math import factorial def pow_accuracy(N, i): N = N ** i p = 0.0000001 * 1.0 binomial_coef = 1. * factorial(N) / factorial(i) / factorial(N-i) pp = binomial_coef * p**i * (1-p)**(N-i) return pp''' self.run_test(code, 3, 2, pow_accuracy=[int, int]) def test_pow_array_accuracy(self): code = ''' import numpy as np def pow_array_accuracy(N, i): p = np.arange(N) * 0.0000001 pp = p**i * (1-p)**(N-i) return pp''' self.run_test(code, 3, 2, pow_array_accuracy=[int, int]) pythran-0.10.0+ds2/pythran/tests/test_named_parameters.py000066400000000000000000000073731416264035500235300ustar00rootroot00000000000000from pythran.tests import TestEnv from pythran.syntax import PythranSyntaxError class TestNamedParameters(TestEnv): def test_call_with_named_argument(self): self.run_test(""" def foo(a): return a def call_with_named_argument(n): return foo(a=n)""", 1, call_with_named_argument=[int]) def test_call_with_named_arguments(self): self.run_test(""" def foo(a,b): return a / b def call_with_named_arguments(n): return foo(b=n, a=2*n)""", 1, call_with_named_arguments=[int]) def test_call_with_args_and_named_argument(self): self.run_test(""" def foo(a, b): return a - b def call_with_args_and_named_argument(m,n): return foo(m, b=n)""", 1, 2, call_with_args_and_named_argument=[int, int]) def test_call_with_args_and_named_arguments(self): self.run_test(""" def foo(a,b,c): return c + a / b def call_with_args_and_named_arguments(n, m): return foo(m, c=2*n, b=n)""", 1, 2, call_with_args_and_named_arguments=[int, int]) def test_call_with_default_and_named_argument(self): self.run_test(""" def foo(a, b=1): return a - b def call_with_default_and_named_argument(m,n): return foo(a=m)""", 1, 2, call_with_default_and_named_argument=[int, int]) def test_call_with_default_and_named_arguments(self): self.run_test(""" def foo(a,b,c=1): return c + a / b def call_with_default_and_named_arguments(n, m): return foo(m, b=n)""", 1, 2, call_with_default_and_named_arguments=[int, int]) def test_intrinsic_named_argument(self): """ Check named arguments with attributes as value. """ self.run_test(""" def intrinsic_named_argument(n): import numpy return numpy.ones(n, dtype=numpy.uint8).nbytes""", 4, intrinsic_named_argument=[int]) def test_intrinsic_named_argument_without_default(self): self.run_test(""" def intrinsic_named_argument_without_default(n): import numpy as np return np.expand_dims(np.ones(n), axis=0)""", 4, intrinsic_named_argument_without_default=[int]) def test_nested_function_with_named_arguments(self): self.run_test(''' def nested_function_with_named_arguments(a): b = a * 2 def foo(c): return b + c return foo(c=a)''', 4, nested_function_with_named_arguments=[int]) def test_nested_function_with_several_named_arguments(self): self.run_test(''' def nested_function_with_several_named_arguments(a): b = a * 2 def foo(c, e): return b + c + e return foo(e = 4, c=a)''', 4, nested_function_with_several_named_arguments=[int]) def test_aliasing_functions_with_named_arguments(self): self.run_test(''' def aliasing_functions_with_named_arguments(n): import numpy if n > 10: my = numpy.ones else: my = numpy.zeros return my(n, dtype=numpy.uint8).nbytes''', 4, aliasing_functions_with_named_arguments=[int]) def test_aliasing_functions_with_different_structural_types(self): with self.assertRaises(PythranSyntaxError): self.run_test(''' def aliasing_functions_with_different_structural_types(n): import numpy if n > 10: my = sum else: my = numpy.zeros return my(n, dtype=numpy.uint8).nbytes''', 4, aliasing_functions_with_different_structural_types=[int]) def test_default_argument_all_filled(self): code = ''' def default_argument_all_filled(x): return test2(x,2) def test2(a, b=3): return a, b''' self.run_test(code, 10, default_argument_all_filled=[int]) pythran-0.10.0+ds2/pythran/tests/test_ndarray.py000066400000000000000000001560441416264035500216610ustar00rootroot00000000000000from pythran.tests import TestEnv from pythran.typing import NDArray, Tuple, List import numpy from distutils.version import LooseVersion import unittest try: numpy.float128 has_float128 = True except AttributeError: has_float128 = False huge = numpy.iinfo(numpy.intp).max // numpy.intp().itemsize def raisesMemoryError(): try: numpy.ones(huge) return False except MemoryError: return True except ValueError: return False @TestEnv.module class TestNdarray(TestEnv): @unittest.skipIf(not raisesMemoryError(), "memory error not triggered on that arch") def test_ndarray_memory_error(self): code = 'def ndarray_memory_error(n): import numpy as np; return np.ones(n)' self.run_test(code, huge, ndarray_memory_error=[numpy.intp], check_exception=True) def test_ndarray_intc(self): self.run_test('def ndarray_intc(a): import numpy as np; return np.intc(a), np.array([a, a], dtype=np.intc)', numpy.intc(5), ndarray_intc=[numpy.intc]) def test_ndarray_uintc(self): self.run_test('def ndarray_uintc(a): import numpy as np; return np.uintc(a), np.array([a, a], dtype=np.uintc)', numpy.uintc(5), ndarray_uintc=[numpy.uintc]) def test_ndarray_intp(self): self.run_test('def ndarray_intp(a): import numpy as np; return np.intp(a), np.array([a, a], dtype=np.intp)', numpy.intp(-5), ndarray_intp=[numpy.intp]) def test_ndarray_uintp(self): self.run_test('def ndarray_uintp(a): import numpy as np; return np.uintp(a), np.array([a, a], dtype=np.uintp)', numpy.uintp(5), ndarray_uintp=[numpy.uintp]) def test_ndarray_real_attr_read(self): self.run_test('def ndarray_real_attr_read(a): return a.real + 1', numpy.arange(100, dtype=numpy.complex128).reshape((10, 10)), ndarray_real_attr_read=[NDArray[complex, :, :]]) def test_ndarray_imag_attr_read(self): self.run_test('def ndarray_imag_attr_read(a): return a.imag + 1', 1j * numpy.arange(10, dtype=numpy.complex128), ndarray_imag_attr_read=[NDArray[complex, :]]) def test_ndarray_real_attr_read_complex64(self): self.run_test('def ndarray_real_attr_read_complex64(a): return a.real + 1', numpy.arange(100, dtype=numpy.complex64).reshape((10, 10)), ndarray_real_attr_read_complex64=[NDArray[numpy.complex64, :, :]]) def test_ndarray_imag_attr_read_complex64(self): self.run_test('def ndarray_imag_attr_read_complex64(a): return a.imag + 1', 1j * numpy.arange(10, dtype=numpy.complex64), ndarray_imag_attr_read_complex64=[NDArray[numpy.complex64, :]]) def test_ndarray_real_attr_write(self): self.run_test('def ndarray_real_attr_write(a): a.real = 1 ; return a', numpy.arange(100, dtype=numpy.complex128).reshape((10, 10)), ndarray_real_attr_write=[NDArray[complex, :, :]]) def test_ndarray_imag_attr_write(self): self.run_test('def ndarray_imag_attr_write(a): a.imag = 1 ; return a', 1j * numpy.arange(10, dtype=numpy.complex128), ndarray_imag_attr_write=[NDArray[complex, :]]) def test_ndarray_real_fun_read(self): self.run_test('def ndarray_real_fun_read(a): import numpy as np ; return np.real(a)[1:]', numpy.arange(100, dtype=numpy.complex128).reshape((10, 10)), ndarray_real_fun_read=[NDArray[complex, :, :]]) def test_ndarray_imag_fun_read(self): self.run_test('def ndarray_imag_fun_read(a): import numpy as np ; return - np.imag(a)', 1j * numpy.arange(10, dtype=numpy.complex128), ndarray_imag_fun_read=[NDArray[complex, :]]) def test_ndarray_real_vexpr_read(self): self.run_test('def ndarray_real_vexpr_read(a): import numpy as np ; return a[np.argsort(a)].real', numpy.arange(10, dtype=numpy.complex128), ndarray_real_vexpr_read=[NDArray[complex, :]]) def test_ndarray_imag_vexpr_read(self): self.run_test('def ndarray_imag_vexpr_read(a): import numpy as np ; return a[np.argsort(a)].imag', 1j * numpy.arange(10, dtype=numpy.complex128), ndarray_imag_vexpr_read=[NDArray[complex, :]]) def test_numpy_augassign0(self): self.run_test('def numpy_augassign0(a): a+=1; return a', numpy.arange(100).reshape((10, 10)), numpy_augassign0=[NDArray[int, :, :]]) def test_numpy_augassign1(self): self.run_test('def numpy_augassign1(a): a*=2; return a', numpy.arange(100).reshape((10, 10)), numpy_augassign1=[NDArray[int, :, :]]) def test_numpy_augassign2(self): self.run_test('def numpy_augassign2(a): a-=2; return a', numpy.arange(100).reshape((10, 10)), numpy_augassign2=[NDArray[int, :, :]]) def test_numpy_augassign3(self): self.run_test('def numpy_augassign3(a): a//=2; return a', numpy.arange(100).reshape((10, 10)), numpy_augassign3=[NDArray[int, :, :]]) def test_numpy_augassign4(self): self.run_test('def numpy_augassign4(a): a|=2; return a', numpy.arange(100).reshape((10, 10)), numpy_augassign4=[NDArray[int, :, :]]) def test_numpy_augassign5(self): self.run_test('def numpy_augassign5(a): a&=2; return a', numpy.arange(100).reshape((10, 10)), numpy_augassign5=[NDArray[int, :, :]]) def test_numpy_augassign6(self): self.run_test('def helper(x): x //= 2; x+=3\ndef numpy_augassign6(a): a&=2; helper(a); return a', numpy.arange(100).reshape((10, 10)), numpy_augassign6=[NDArray[int, :, :]]) def test_numpy_faugassign0(self): self.run_test('def numpy_faugassign0(a): a[a>5]+=1; return a', numpy.arange(100), numpy_faugassign0=[NDArray[int, :]]) def test_numpy_faugassign1(self): self.run_test('def numpy_faugassign1(a): a[a>3]*=2; return a', numpy.arange(100), numpy_faugassign1=[NDArray[int, :]]) def test_numpy_faugassign2(self): self.run_test('def numpy_faugassign2(a): a[a>30]-=2; return a', numpy.arange(100), numpy_faugassign2=[NDArray[int, :]]) def test_numpy_faugassign3(self): self.run_test('def numpy_faugassign3(a): a[a<40]//=2; return a', numpy.arange(100), numpy_faugassign3=[NDArray[int, :]]) def test_numpy_faugassign4(self): self.run_test('def numpy_faugassign4(a): a[a<4]|=2; return a', numpy.arange(100), numpy_faugassign4=[NDArray[int, :]]) def test_numpy_faugassign5(self): self.run_test('def numpy_faugassign5(a): a[a>8]&=2; return a', numpy.arange(100), numpy_faugassign5=[NDArray[int, :]]) def test_broadcast0(self): self.run_test('def numpy_broadcast0(a): a[0] = 1 ; return a', numpy.arange(100).reshape((10, 10)), numpy_broadcast0=[NDArray[int, :, :]]) def test_broadcast1(self): self.run_test('def numpy_broadcast1(a): a[1:-1] = 1 ; return a', numpy.arange(100).reshape((10, 10)), numpy_broadcast1=[NDArray[int, :, :]]) def test_broadcast2(self): self.run_test('def numpy_broadcast2(a): a[1:-1,1:-1] = 1 ; return a', numpy.arange(100).reshape((10, 10)), numpy_broadcast2=[NDArray[int, :, :]]) def test_broadcast3(self): self.run_test('def numpy_broadcast3(a): a[1:-1,1] = 1 ; return a', numpy.arange(100).reshape((10, 10)), numpy_broadcast3=[NDArray[int, :, :]]) def test_broadcast4(self): self.run_test('def numpy_broadcast4(a): a[:,1,1] = 1 ; return a', numpy.arange(100).reshape((5,5,4)), numpy_broadcast4=[NDArray[int, :, :, :]]) def test_broadcast5(self): self.run_test('def numpy_broadcast5(a): import numpy as np ; return a + np.array([1,2,3,4])', numpy.arange(20).reshape((5,4)), numpy_broadcast5=[NDArray[int, :, :]]) def test_extended_slicing0(self): self.run_test("def numpy_extended_slicing0(a): return a[2,1:-1]", numpy.arange(100).reshape((10, 10)), numpy_extended_slicing0=[NDArray[int, :, :]]) def test_extended_slicing1(self): self.run_test("def numpy_extended_slicing1(a): return a[1:-1,2]", numpy.arange(100).reshape((10, 10)), numpy_extended_slicing1=[NDArray[int, :, :]]) def test_extended_slicing2(self): self.run_test("def numpy_extended_slicing2(a): return a[2,1:-1]", numpy.arange(30).reshape((3,5,2)), numpy_extended_slicing2=[NDArray[int, :, :, :]]) def test_extended_slicing3(self): self.run_test("def numpy_extended_slicing3(a): return a[1:-1,2]", numpy.arange(30).reshape((3,5,2)), numpy_extended_slicing3=[NDArray[int, :,:,:]]) def test_extended_slicing4(self): self.run_test("def numpy_extended_slicing4(a): return a[1:-1,2:-2]", numpy.arange(100).reshape((10, 10)), numpy_extended_slicing4=[NDArray[int, :, :]]) def test_extended_slicing5(self): self.run_test("def numpy_extended_slicing5(a): return a[1:-1]", numpy.arange(100).reshape((10, 10)), numpy_extended_slicing5=[NDArray[int, :, :]]) def test_extended_slicing6(self): self.run_test("def numpy_extended_slicing6(a): return a[1:-1,2:-2, 3:-3]", numpy.arange(5*6*7).reshape((5,6,7)), numpy_extended_slicing6=[NDArray[int,:,:,:]]) def test_extended_slicing7(self): self.run_test("def numpy_extended_slicing7(a): return a[1:-1, 2, 1]", numpy.arange(120).reshape((3,5,4,2)), numpy_extended_slicing7=[NDArray[int,:,:,:,:]]) def test_extended_slicing8(self): self.run_test("def numpy_extended_slicing8(a): return a[1:-1,2:-2, 1:2]", numpy.arange(60).reshape((3,5,4)), numpy_extended_slicing8=[NDArray[int,:,:,:]]) def test_extended_slicing9(self): self.run_test("def numpy_extended_slicing9(a): return a[1:-1, 2, 1, 1:2]", numpy.arange(120).reshape((3,5,2,4)), numpy_extended_slicing9=[NDArray[int,:,:,:,:]]) def test_extended_slicing10(self): self.run_test("def numpy_extended_slicing10(a): return a[1, 2, 1:-1]", numpy.arange(120).reshape((3,5,4,2)), numpy_extended_slicing10=[NDArray[int,:,:,:,:]]) def test_extended_slicing11(self): self.run_test("def numpy_extended_slicing11(a): return a[1, 2, 1:-1, 1]", numpy.arange(120).reshape((3,5,4,2)), numpy_extended_slicing11=[NDArray[int,:,:,:,:]]) def test_numpy_mask0(self): self.run_test("def numpy_mask0(n): import numpy ; return n[n>0][ n[n>0] < 1]", numpy.cos(numpy.arange(10)), numpy_mask0=[NDArray[float, :]]) def test_numpy_bool(self): self.run_test("def numpy_bool(n): import numpy ; return numpy.ones(n, bool)", 5, numpy_bool=[int]) def test_numpy_int(self): self.run_test("def numpy_int(n): import numpy ; return numpy.ones(n, int)", 5, numpy_int=[int]) def test_numpy_float(self): self.run_test("def numpy_float(n): import numpy ; return numpy.ones(n, float)", 5, numpy_float=[int]) def test_numpy_int16(self): self.run_test("def numpy_int16(n): import numpy ; return numpy.ones(n, numpy.int16)", 5, numpy_int16=[int]) def test_numpy_uint16(self): self.run_test("def numpy_uint16(n): import numpy ; return numpy.ones(n, numpy.uint16)", 5, numpy_uint16=[int]) def test_numpy_uint64(self): self.run_test("def numpy_uint64(n): import numpy ; return numpy.ones(n, numpy.uint64)", 5, numpy_uint64=[int]) def test_numpy_np_float(self): """ Check dtype == numpy.float for numpy array. """ self.run_test(""" def numpy_np_float(n): import numpy return numpy.ones(n, float)""", 5, numpy_np_float=[int]) def test_numpy_complex(self): self.run_test("def numpy_complex(n): import numpy ; return numpy.ones(n, complex)", 5, numpy_complex=[int]) def test_numpy_complex64(self): self.run_test("def numpy_complex64(n): import numpy ; return numpy.ones(n, numpy.complex64)", 5, numpy_complex64=[int]) def test_numpy_double(self): self.run_test("def numpy_double(n): import numpy ; return numpy.ones(n, numpy.double)", 5, numpy_double=[int]) def test_numpy_complex_export(self): self.run_test("def numpy_complex_export(a): import numpy ; return numpy.sum(a)", numpy.array([1+1j]), numpy_complex_export=[NDArray[complex, :]]) def test_assign_gsliced_array(self): self.run_test("""def assign_gsliced_array(): import numpy as np; a = np.array([[[1,2],[3,4]],[[5,6],[7,8]]]) b = np.array([[[9,10],[11,12]],[[13,14],[15,16]]]) a[:,:] = b[:,:] return a,b;""", assign_gsliced_array=[]) def test_assign_sliced_array(self): self.run_test("""def assign_sliced_array(): import numpy as np; a = np.array([1,2,3]); b = np.array([1,2,3]); c=a[1:] c=b[1:] b[2] = -1; return c;""", assign_sliced_array=[]) def test_index_array_0(self): self.run_test(''' def index_array_0(n): import numpy a = numpy.arange(n) return a[numpy.array([1, 0])]''', 10, index_array_0=[int]) def test_index_array_1(self): self.run_test(''' def index_array_1(n): import numpy a = numpy.arange(n * 3).reshape(3, n) return a[numpy.array([0, 1, 0, 2])]''', 10, index_array_1=[int]) def test_filter_array_0(self): self.run_test('def filter_array_0(n): import numpy ; a = numpy.zeros(n) ; return a[a>1]', 10, filter_array_0=[int]) def test_filter_array_1(self): self.run_test('def filter_array_1(n): import numpy ; a = numpy.arange(n) ; return a[a>4]', 10, filter_array_1=[int]) def test_filter_array_2(self): self.run_test('def filter_array_2(n): import numpy ; a = numpy.arange(n) ; return (a+a)[a>4]', 10, filter_array_2=[int]) def test_filter_array_3(self): self.run_test('def filter_array_3(n): import numpy ; a = numpy.arange(n) ; return (-a)[a>4]', 10, filter_array_3=[int]) @unittest.skip("filtering a slice") def test_filter_array_4(self): self.run_test('def filter_array_4(n): import numpy ; a = numpy.arange(n) ; return a[1:-1][a[1:-1]>4]', 10, filter_array_4=[int]) @unittest.skip("filtering a slice") def test_filter_array_5(self): self.run_test('def filter_array_5(n): import numpy ; a = numpy.arange(n) ; return (a[1:-1])[a[1:-1]>4]', 10, filter_array_5=[int]) def test_assign_ndarray(self): code = """ def assign_ndarray(t): import numpy as np; a = np.array([1,2,3]); b = np.array([1,2,3]); if t: c = a; else: c=b; if t: c=b; b[0] = -1; return c;""" self.run_test(code, 1, assign_ndarray=[int]) def test_bitwise_nan_bool(self): self.run_test("def np_bitwise_nan_bool(a): import numpy as np ; return ~(a<5)", numpy.arange(10), np_bitwise_nan_bool=[NDArray[int, :]]) def test_gslice0(self): self.run_test("def np_gslice0(a): import numpy as np; return a[1:9,5:7]", numpy.array(range(10*9)).reshape((10,9)), np_gslice0=[NDArray[int, :, :]]) def test_gslice1(self): self.run_test("def np_gslice1(a): import numpy as np ; return a[1:9,0:1, 3:6]", numpy.array(range(10*9*8)).reshape((10,9,8)), np_gslice1=[NDArray[int, :, :, :]]) def test_gslice2(self): self.run_test("def np_gslice2(a): import numpy as np ; return a[:,0:1, 3:6]", numpy.array(range(10*9*8)).reshape((10,9,8)), np_gslice2=[NDArray[int, :, :, :]]) def test_gslice3(self): self.run_test("def np_gslice3(a): import numpy as np ; return a[:-1,0:-1, -3:7]", numpy.array(range(10*9*8)).reshape((10,9,8)), np_gslice3=[NDArray[int, :, :, :]]) def test_gslice4(self): self.run_test("def np_gslice4(a): import numpy as np ; return a[1,0:-1, -3:7]", numpy.array(range(10*9*8)).reshape((10,9,8)), np_gslice4=[NDArray[int, :, :, :]]) def test_gslice5(self): self.run_test("def np_gslice5(a): import numpy as np ; return a[1,0:-1, 7]", numpy.array(range(10*9*8)).reshape((10,9,8)), np_gslice5=[NDArray[int, :, :, :]]) def test_gslice6(self): self.run_test("def np_gslice6(a): import numpy as np ; return a[:-1, :][1:,:]", numpy.array(range(10*9*8)).reshape((10,9,8)), np_gslice6=[NDArray[int, :,:,:]]) def test_iexpr0(self): self.run_test("def np_iexpr0(a,i): return a[i][0,0]", numpy.array(range(10*9*8)).reshape(10,9,8), 0, np_iexpr0=[NDArray[int, :,:,:], int]) def test_iexpr1(self): self.run_test("def np_iexpr1(a,i): return a[i,0][0]", numpy.array(range(10*9*8)).reshape(10,9,8), 0, np_iexpr1=[NDArray[int, :,:,:], int]) def test_iexpr2(self): self.run_test("def np_iexpr2(a,m): a[m==False] = 1; return a", numpy.arange(10).reshape(5,2), numpy.arange(10).reshape(5,2), np_iexpr2=[NDArray[int, :,:], NDArray[int, :,:]]) def test_iexpr3(self): code = ''' import numpy as np def np_iexpr3 (x, nbits): out = np.zeros ((len(x), 1<30] # <- does not compiles lastBin = 0 for peak in peaks: lastBin = peak return lastBin''' self.run_test(code, numpy.array([1,32,33,4]), test_fexpr0=[NDArray[int, :]]) def test_fexpr1(self): code = ''' import numpy as np def yy(x): a = np.zeros(x, np.float32) return a[:-1] def test_fexpr1(x): c = yy(x) d = yy(x) c[d == 0] = np.nan return c''' self.run_test(code, 10, test_fexpr1=[int]) def test_vexpr0(self): code = ''' import numpy as np def vexpr0(a, b=None): if b is None: assert len(a) > 0 b = np.copy(a[0]) a = a[1:] else: b = np.copy(b) m = b >= 0 for array in a: b[m] *= array[m] return b''' self.run_test(code, [numpy.arange(10, dtype=numpy.int32).reshape(5,2)], 2 * numpy.arange(10, dtype=numpy.int32).reshape(5,2), vexpr0=[List[NDArray[numpy.int32,:,:]], NDArray[numpy.int32,:,:]]) def test_array_of_pshape(self): code = 'def array_of_pshape(x): from numpy import array; return array(x[None].shape)' self.run_test(code, numpy.arange(10), array_of_pshape=[NDArray[int,:]]) def test_vexpr_of_texpr(self): code = ''' import numpy as np def apply_mask(mat, mask): assert mask.shape == mat.shape mat[mask == False] = np.nan return mat def vexpr_of_texpr(a, b): return apply_mask(a.T, b), apply_mask(a, b.T), apply_mask(a.T, b.T)''' self.run_test(code, numpy.arange(4., dtype=numpy.float32).reshape(2,2), numpy.array([[False,True],[True, False]]), vexpr_of_texpr=[NDArray[numpy.float32,:,:], NDArray[numpy.bool,:,:]]) def test_indexing_through_int8(self): code = ''' def indexing_through_int8(x): return x[x[0,0],x[0,1]]''' self.run_test(code, numpy.arange(10, dtype=numpy.uint8).reshape(5,2), indexing_through_int8=[NDArray[numpy.uint8,:,:]]) def test_indexing_through_byte(self): code = ''' def indexing_through_byte(x): return x[x[0,0],x[0,1]]''' self.run_test(code, numpy.arange(10, dtype=numpy.byte).reshape(5,2), indexing_through_byte=[NDArray[numpy.byte,:,:]]) def test_indexing_through_uint8(self): code = ''' def indexing_through_uint8(x): import numpy as np return x[np.uint8(2), np.uint8(1)]''' self.run_test(code, numpy.arange(9).reshape(3,3), indexing_through_uint8=[NDArray[int,:,:]]) def test_complex_scalar_broadcast(self): self.run_test('def complex_scalar_broadcast(a): return (a**2 * (1 + a) + 2) / 5.', numpy.ones((10,10), dtype=complex), complex_scalar_broadcast=[NDArray[complex, :, :]]) @unittest.skipIf(not has_float128, "not float128") def test_float128_0(self): self.run_test('def float128_0(x): return x, x **2', numpy.float128(numpy.finfo(numpy.float64).max), float128_0=[numpy.float128]) @unittest.skipIf(not has_float128, "not float128") def test_float128_1(self): self.run_test('def float128_1(x): return x, x **2', numpy.ones((10,10), dtype=numpy.float128), float128_1=[NDArray[numpy.float128,:, :]]) @unittest.skipIf(not has_float128, "not float128") def test_float128_2(self): self.run_test('def float128_2(x): from numpy import ones, float128; return ones(x,dtype=float128)', 3, float128_2=[int]) def test_texpr_expr_combined(self): self.run_test("def texpr_expr_combined(x, y):\n if x: return y.T\n else: return y * 2", 1, numpy.arange(10).reshape(5, 2), texpr_expr_combined=[int, NDArray[int,:,:]]) def test_built_slice_indexing(self): self.run_test(''' def built_slice_indexing(x,n,axis,val=0.): import numpy as np y = np.roll(x,n,axis) S = [slice(None,None)]*x.ndim if n>0: S[axis] = slice(0, n) elif n<0: S[axis] = slice(n, None) if n: y[tuple(S)] = val return y''', numpy.array([-1.2, 1, 1.2]), 1, 0, 5., built_slice_indexing=[NDArray[float, :], int, int, float]) def test_dtype_type(self): self.run_test(''' def dtype_type(x): import numpy as np c = np.complex64(x) f = np.float32(x) u = np.uint8(x) return c.dtype.type(1), f.dtype.type(2), u.dtype.type(3)''', 2, dtype_type=[int]) def test_transposed_slice_assign0(self): self.run_test("""def transposed_slice_assign0(shape): import numpy as np xx = np.empty(shape, dtype=int) xx.T[:] = np.arange(0, shape[0], 1, dtype=int) return xx""", (3, 5), transposed_slice_assign0=[Tuple[int, int]]) def test_transposed_slice_assign1(self): self.run_test("""def transposed_slice_assign1(shape): import numpy as np xx = np.ones(shape, dtype=int) xx.T[:] = 3 return xx""", (3, 5), transposed_slice_assign1=[Tuple[int, int]]) def test_transposed_slice_assign2(self): self.run_test("""def transposed_slice_assign2(shape): import numpy as np xx = np.ones(shape, dtype=int) xx.T[:1] = 3 return xx""", (10, 20), transposed_slice_assign2=[Tuple[int, int]]) def test_transposed_slice_assign3(self): self.run_test("""def transposed_slice_assign3(shape): import numpy as np xx = np.ones(shape, dtype=int).T xx[:1, :2] = 3 return xx""", (10, 20), transposed_slice_assign3=[Tuple[int, int]]) def test_slice_through_list0(self): self.run_test("""def slice_through_list0(shape): import numpy as np xx = np.ones(shape, dtype=int) return xx[[1,3,5],1]""", (10, 20), slice_through_list0=[Tuple[int, int]]) def test_slice_through_list1(self): self.run_test("""def slice_through_list1(shape): import numpy as np xx = np.ones(shape, dtype=int) return xx[[1,3,5],1:]""", (10, 20), slice_through_list1=[Tuple[int, int]]) def test_slice_through_list2(self): self.run_test("""def slice_through_list2(arr): import numpy as np; return arr[np.array([1,3,5]),1:]""", numpy.ones((10, 20)), slice_through_list2=[NDArray[float, :,:]]) def test_slice_through_list3(self): self.run_test("""def slice_through_list3(arr): import numpy as np; return arr[[1,3,5],1:]""", numpy.ones((10, 20)), slice_through_list3=[NDArray[float, :,:]]) def test_slice_through_list4(self): self.run_test("""def slice_through_list4(arr): import numpy as np; return arr[(1,3,5),1:]""", numpy.ones((10, 20)), slice_through_list4=[NDArray[float, :,:]]) def test_transposed_array(self): self.run_test("""def transposed_array(shape): import numpy as np xx = np.ones(shape, dtype=int) return xx.T""", (3, 5), transposed_array=[Tuple[int, int]]) def assign_transposed(self, last): params = [numpy.arange(30.).reshape(5,6), 3, 1, 1] code = ''' def helper(signal, N, A): return (signal[N:N-A:-1], signal[N:N+A], signal[N-A:N+A],) def assign_transposed(signal, N, A): return (helper(signal, N, A), helper(signal[:,:], N, A), helper(signal.T, N, A),) ''' self.run_test(code, *params, assign_transposed=[NDArray[float,:,:], int, int, int]) def test_input_asarray(self): code = 'def input_asarray(x): return x' self.run_test(code, numpy.arange(12.).reshape((1, 3,4))[:, :2, :], input_asarray=[NDArray[float,:,:,:]]) def test_input_strided_array(self): code = 'def input_strided_array(x): return x' with self.assertRaises(TypeError): self.run_test(code, numpy.arange(12.)[::2], input_strided_array=[NDArray[float,:]]) def test_input_unit_strided_array(self): code = 'def input_unit_strided_array(x): return x' self.run_test(code, numpy.arange(12.)[::1], input_unit_strided_array=[NDArray[float,:]]) def test_input_empty_strided_array(self): code = 'def input_empty_strided_array(x): return x' self.run_test(code, numpy.random.random((0, 2, 3))[:,:,:2], input_empty_strided_array=[NDArray[float,:,:,:]]) def test_hanning(self): code = ''' import numpy as np def helper(M): if M < 1: return np.array([]) if M == 1: return np.ones(1, float) n = np.arange(0,float(M)) return 0.5 - 0.5*np.cos(2.0*np.pi*n/(M-1)) def hanning(M): return helper(M * 0), helper(M * 1), helper(M * 4)''' self.run_test(code, 1, hanning=[int]) def test_ones_on_updated_shape(self): code = ''' import numpy as np def ones_on_updated_shape(array,n,axis,padVal): shape = list(array.shape) shape[axis]=n toPad = padVal*np.ones(shape,dtype=array.dtype) return np.concatenate((np.diff(array, n, axis=axis), toPad), axis=axis)''' self.run_test(code, numpy.arange(10.).reshape(5, 2), 1, 0, 3., ones_on_updated_shape=[NDArray[float,:,:], int, int, float]) def test_numpy_vexpr_static_shape(self): code = ''' import numpy as np def numpy_vexpr_static_shape(A): x_transFrames = np.array([3,6,20,40,70,245]) x_transScores = A[x_transFrames] x_transList = np.array([0,2,3,4]) B = x_transScores[x_transList] return x_transFrames[x_transList].astype(int),B''' self.run_test(code, numpy.arange(300.), numpy_vexpr_static_shape=[NDArray[float, :]]) def test_subscripting_slice_array_transpose(self): code = 'def subscripting_slice_array_transpose(x): return x.T[(slice(0,1),slice(0,1))]' self.run_test(code, numpy.arange(200.).reshape(10, 20), subscripting_slice_array_transpose=[NDArray[float, :, :]]) def test_combiner_0(self): code = ''' import numpy as np def test_combiner_0(X): O=test1(X[:,0], False) P=test1(X[:,0], True) # <- Ask to concatenate return O,P def test1(X,A): N = 20 if A: X = np.concatenate((np.zeros(N),X)) return X''' self.run_test(code, numpy.ones((10,10)), test_combiner_0=[NDArray[float,:,:]]) def test_combiner_1(self): code = ''' import numpy as np def test_combiner_1(X): O=test1(X[0], False) P=test1(X[1], True) # <- Ask to concatenate return O,P def test1(X,A): N = 20 if A: X = np.concatenate((np.zeros((N)),X)) return X''' self.run_test(code, numpy.ones((10,10)), test_combiner_1=[NDArray[float,:,:]]) def test_combiner_2(self): code = ''' import numpy as np def test_combiner_2(X): O=test1(X[X==1], False) P=test1(X[X==1], True) # <- Ask to concatenate return O,P def test1(X,A): N = 20 if A: X = np.concatenate((np.zeros((N)),X)) return X''' self.run_test(code, numpy.ones((10)), test_combiner_2=[NDArray[float,:]]) pythran-0.10.0+ds2/pythran/tests/test_nogil.py000066400000000000000000000023711416264035500213220ustar00rootroot00000000000000from pythran.tests import TestEnv from pythran.typing import List, Set, Dict, NDArray import numpy as np class TestNoGil(TestEnv): def test_list_param(self): code=""" def list_param(l): return l, sum(i*j for i in l for j in l) """ self.run_test(code, list(range(30)), list_param=[List[int]], thread_count=4) def test_set_param(self): code=""" def set_param(l): return {sum(l), sum(i*j for i in l for j in l)}, l """ self.run_test(code, set(range(30)), set_param=[Set[int]], thread_count=4) def test_dict_param(self): code=""" def dict_param(l): return {sum(i-j for i in l.keys() for j in l.values()): l}, l """ self.run_test(code, dict(zip(range(30), range(30))), dict_param=[Dict[int, int]], thread_count=4) def test_ndarray_param(self): code=""" import numpy as np def ndarray_param(l): return np.array([i*j for i in l for j in l]), l """ self.run_test(code, np.ones(100, dtype=int), ndarray_param=[NDArray[int, :]], thread_count=4) pythran-0.10.0+ds2/pythran/tests/test_none.py000066400000000000000000000512561416264035500211570ustar00rootroot00000000000000from pythran.tests import TestEnv from unittest import skip from pythran.typing import List, Dict, NDArray import pythran import numpy as np class TestNone(TestEnv): def test_returned_none(self): code = ''' def dummy(l): if l: return None else: return l def returned_none(a): return dummy(a)''' self.run_test(code, [1, 2], returned_none=[List[int]]) def test_eq_neq_scalar(self): code = 'def eq_neq_scalar(x): return x == 1, x!= 1' self.run_test(code, 1, eq_neq_scalar=[int]) def test_eq_neq_nonscalar(self): code = 'def eq_neq_nonscalar(x): return x == [1], x!= [1]' self.run_test(code, [1], eq_neq_nonscalar=[List[int]]) def test_returned_none_member(self): code = ''' def dummy(l): if not l: return None else: return l def returned_none_member(a): t = dummy(a) if t is None: return None else: return t.count(1)''' self.run_test(code, [1, 2], returned_none_member=[List[int]]) def test_use_none(self): code = """ def none_input(a): l = list(range(10)) if a is None: return l[a:8] else: return l[a:3] """ self.run_test(code, None, none_input=[type(None)]) def test_is_none_default(self): code = ''' import numpy as np def is_none_default(sr,N): C = test2(sr, N) return C def test2(sr, N,fmax=None): if fmax is None: fmax = float(sr) / 2 A = np.arange(1.+N/2) B = np.arange(1.+fmax/2) return A[0] - B[0]''' self.run_test(code, 1, 3, is_none_default=[int, int]) def test_is_none_return_None(self): code = ''' import numpy as np def is_none_return_None(signal=None): if signal is None: return signal[0,] += 1.2''' self.run_test(code, np.array([1.]), is_none_return_None=[NDArray[float, :]]) def test_is_none_return_None_bis(self): code = ''' import numpy as np def is_none_return_None_bis(signal=None): if signal is None: return signal[0,] += 1.2''' self.run_test(code, is_none_return_None_bis=[]) def test_return_in_true(self): code = ''' def return_in_true(x): return helper(x, x), helper(x, None) def helper(x, y): if y == None: return x return x + 1''' self.run_test(code, 1, return_in_true=[int]) def test_return_in_false(self): code = ''' def return_in_false(x): return helper(x, x), helper(x, None) def helper(x, y): if y != None: x += y else: return x return x + 1''' self.run_test(code, 1, return_in_false=[int]) def test_potential_return_in_true(self): code = ''' def potential_return_in_true(x): return helper(0, x), helper(x, None), helper(18, x) def helper(x, y): if None != y: if y > 1: return 5 else: x += y else: return x return x + 1''' self.run_test(code, 1, potential_return_in_true=[int]) def test_potential_return_in_false(self): code = ''' def potential_return_in_false(x): return helper(0, x), helper(x, None), helper(18, x) def helper(x, y): if None is not y: x += y else: if x > 1: return x else: return 7 return x + 1''' self.run_test(code, 1, potential_return_in_false=[int]) def test_and_is_none(self): code = ''' def and_is_none(x): return helper(0, x), helper(x, None), helper(18, x) def helper(x, y): if None is not y and x is not None: x += y else: return 7 return x + 1''' self.run_test(code, 1, and_is_none=[int]) def test_multiple_is_none(self): code = ''' def multiple_is_none(x): return helper(0, x), helper(x, None), helper(18, x) def helper(x, y): if not (x is None is y): x += y else: return 7 return x + 1''' with self.assertRaises(pythran.syntax.PythranSyntaxError): self.run_test(code, 1, multiple_is_none=[int]) def test_different_return(self): code = ''' def different_return(x): return helper(0, x), helper(x, None), helper(18, x) def helper(x, y): if not (x is None is y): return "7" else: return 7 return x + 1''' with self.assertRaises(pythran.syntax.PythranSyntaxError): self.run_test(code, 1, different_return=[int]) def test_is_none_in_expr(self): code = ''' def is_none_in_expr(x): return (1 if None == x else 2), (x if None is x else 1), (1 if None != x else 2), (x if None is not x else 1) ''' self.run_test(code, 1, is_none_in_expr=[int]) def test_none_bool_tuple(self): code = ''' def none_bool_tuple(x): if x > 2: y = (1, '2') else: y = None if x > 2: z = () else: z = None return bool(y), bool(z)''' self.run_test(code, 1, none_bool_tuple=[int]) def test_none_large_cond(self): code = ''' def none_large_cond(x): return helper(x, None, None), helper(None, x, None), helper(None, None, x) def helper(x, y, z): if x is None or y is None and z is None: return 5 else: return 6''' self.run_test(code, 1, none_large_cond=[int]) def test_none_mixed_test0(self): code = ''' def none_mixed_test0(x): return helper(x, 1), helper(x, 3) def helper(x, y): if x is None or x > 2: return 5 if y > 0 or x is not None: return 6''' self.run_test(code, 1, none_mixed_test0=[int]) def test_none_mixed_test1(self): code = ''' def none_mixed_test1(x): return helper(x, 1), helper(x, 3) def helper(x, y): if x is not None and x > 2 and x < 8: return 5 if y > 0 and x is not None: return 6''' self.run_test(code, 1, none_mixed_test1=[int]) def test_none_mixed_test2(self): code = ''' def none_mixed_test2(x): return helper(x, 1), helper(x, 3) def helper(x, y): if x is not None and x > 2 or x < 8: return 5 if y > 0 or x is None: return 6''' self.run_test(code, 1, none_mixed_test2=[int]) def test_none_mixed_test3(self): code = ''' def none_mixed_test3(x): return helper(x, 1), helper(x, 3) def helper(x, y): return 5 if x is None or x > 2 or x < 8 else 6''' self.run_test(code, 1, none_mixed_test3=[int]) def test_none_mixed_test4(self): code = ''' def none_mixed_test4(x): return helper(x, 4), helper(x, 3) def helper(x, y): return 5 if x is not None and x > 2 and x < 8 else 6''' self.run_test(code, 4, none_mixed_test4=[int]) def test_none_mixed_test5(self): code = ''' def none_mixed_test5(x): return helper(x, 1), helper(x, 3) def helper(x, y): return 5 if x is not None and x > 5 or x < 8 else 6''' self.run_test(code, 1, none_mixed_test5=[int]) def test_987(self): code = ''' def test_987(x): a = None if x == 5: a = 3 if a is not None: return 1 return 0''' self.run_test(code, 1, test_987=[int]) def test_987_0(self): code = ''' def test_987_0(x): a = None if x == 5: a = 3 if a is not None: return 1 return 0''' self.run_test(code, 5, test_987_0=[int]) def test_987_1(self): code = ''' def test_987_1(x): a = None b = 0 if x != 12: a = x if a is not None and a < 5: b = 1 return (b, a)''' self.run_test(code, 12, test_987_1=[int]) def test_987_2(self): code = ''' def test_987_2(x): a = None b = 0 if x != 12: a = x if a is not None and a < 5: b = 1 return (b, a)''' self.run_test(code, 13, test_987_2=[int]) def test_987_3(self): code = ''' def test_987_3(x): a = None b = 0 if x != 12: a = x if a is not None and a < 5: b = 1 return (b, a)''' self.run_test(code, 3, test_987_3=[int]) def test_none_to_str(self): code = """ def dump_as_str(x): y = x return str(y) + str(y) * 2 def none_to_str(x): if x > 1: y = None else: y = 2 return dump_as_str(None), dump_as_str(y)""" self.run_test(code, 0, none_to_str=[int]) def test_is_none_attribute(self): code = ''' def is_none_attribute(x): a = [x, x] if x is not None: a.pop() return a''' self.run_test(code, 0, is_none_attribute=[int]) def test_none_combine_tuple(self): code = ''' def none_combine_tuple(n): def get_header(name): if name: return 1 else: return None a = None lpix = get_header(n) # Don't work if lpix is None: a = 3 return a''' self.run_test(code, 'hello', none_combine_tuple=[str]) def test_none_tuple(self): code = ''' def test_none_tuple(i): a = 0 b = 0 c = 0 h = None if i % 2 == 0: h = (1, 2, 3) if h is not None: a, b, c = h return a''' self.run_test(code, 10, test_none_tuple=[int]) def test_none_loop_break(self): code = ''' def none_loop_break(n): x = [None if i%2 else 1 for i in range(n)] s = 3 for j in x: if j is None: break s += 1 return s''' self.run_test(code, 7, none_loop_break=[int]) def test_none_loop_continue(self): code = ''' def none_loop_continue(n): x = [None if i%2 else 1 for i in range(n)] s = 3 for j in x: if j is None: continue s += 1 return s''' self.run_test(code, 7, none_loop_continue=[int]) def test_none_loop_break_continue(self): code = ''' def none_loop_break_continue(n): x = [None if i%2 else 1 for i in range(n)] s = 3 for j in x: if j is None: s *= 9 continue else: s += 1 break return s''' self.run_test(code, 7, none_loop_break_continue=[int]) def test_none_loop_break_or_ret(self): code = ''' def none_loop_break_or_ret(n): x = [None if i%2 else 1 for i in range(n)] s = 3 for j in x: if j is None: break else: return 3 s += 1 return s''' self.run_test(code, 7, none_loop_break_or_ret=[int]) def test_none_loop_continue_or_ret(self): code = ''' def none_loop_continue_or_ret(n): x = [None if i%2 else 1 for i in range(n)] s = 3 for j in x: if j is None: continue else: return 3 s += 1 return s''' self.run_test(code, 7, none_loop_continue_or_ret=[int]) def test_none_loop_break_continue_or_ret(self): code = ''' def none_loop_break_continue_or_ret(n): x = [None if i%2 else 1 for i in range(n)] s = 3 for j in x: if j is None: s *= 9 break elif j < 5: s += 1 continue else: return 8 return s''' self.run_test(code, 7, none_loop_break_continue_or_ret=[int]) def test_none_operators0(self): code = ''' def helper(x): if x is not None: x = 3.5 x += 1 x *= 5 x -= 3 x /= 2 x //= 2 return (x + 1, x - 1, x / 3, x // 3, x % 3, x * 3, x ** 3) def none_operators0(x): if x > 1: y = None else: y = 2 return helper(x), helper(y)''' self.run_test(code, 0, none_operators0=[int]) def test_none_diorcet0(self): code = ''' def none_diorcet0(a): x = None if a < 0 else 1 y = None if a % 2 else 2 z = -1 # Doesn't compile if x is not None and y is not None: # Without test on other than none it doesn't work z = 0 # Doesn't compile if x is not None: if y is not None: return 0 # Doesn't compile if x is not None: if y is not None and a != -666: z = 0 # Compile but wrong results if x is not None: if y is not None: z = 0 # Compile but wrong results (not the same that previous one) if x is not None and a != -666 and y is not None: z = 0 return z''' self.run_test(code, 3, none_diorcet0=[int]) self.run_test(code, 2, none_diorcet0=[int]) self.run_test(code, -2, none_diorcet0=[int]) self.run_test(code, -3, none_diorcet0=[int]) def test_none_diorcet1(self): code = ''' def none_diorcet1(l): import numpy as np return tuple([None if np.isnan(a) else a for a in l])''' self.run_test(code, [3., float('nan')], none_diorcet1=[List[float]]) def test_none_diorcet2(self): code = ''' def none_diorcet2(headers): errors = [] xxx = None def add_error(type, args): errors.append((type, args)) if "DUMMY_PYTHRAN" in headers: xxx = True if xxx is not None: add_error(1, ['AAAAA']) if xxx is not None and xxx: # Can't compile add_error(1, ['AAAAA']) if "DUMMY_PYTHRAN" in headers: # Without that we have a missing symbol at runtime add_error(2, ['DUMMY_PYTHRAN']) return errors''' self.run_test(code, {"DUMMY_PYTHRAN":"DUMMY_PYTHRAN"}, none_diorcet2=[Dict[str,str]]) def test_none_escaping0(self): code = ''' def none_escaping0(i): a = 0 c = None if i % 2 == 0: c = 1 if c is not None: a = 2 b = 3 if i < 5: print(b + i) return a''' self.run_test(code, 3, none_escaping0=[int]) def test_none_escaping1(self): code = ''' def none_escaping1(headers): def get_header(): return None lpix = float(headers.get('a')) diameter = None if lpix is not None and lpix > 0.0: diameter = 20.0 return diameter''' self.run_test(code, {'a': '10'}, none_escaping1=[Dict[str,str]]) class TestIsInstance(TestEnv): def test_isinstance_int0(self): self.run_test( 'def isinstance_int0(x, y): return isinstance(x, int), isinstance(y, int)', 1, '1', isinstance_int0=[int, str]) def test_isinstance_int1(self): code = 'def isinstance_int1a(x):\n if isinstance(x, int): return x + 1\n else: return x * 3' self.run_test(code, 1, isinstance_int1a=[int]) code = 'def isinstance_int1b(x):\n if isinstance(x, int): return x + 1\n else: return x * 3' self.run_test(code, '1', isinstance_int1b=[str]) def test_isinstance_int2(self): code = 'def isinstance_int2(x):\n if isinstance(x, int): return 1\n else: return "3"' self.run_test(code, 1, isinstance_int2=[int]) def test_isinstance_int3(self): code = 'def isinstance_int3(x):\n if isinstance(x, int) or x is None: return 1\n else: return "3"' self.run_test(code, 1, isinstance_int3=[int]) def test_isinstance_complex0(self): code = ''' import numpy as np def conj (x): if isinstance(x.dtype, complex): return x.real - 1j*x.imag else: return x def isinstance_complex0(x): return x * conj(x)''' self.run_test(code, np.ones(5, dtype=int), isinstance_complex0=[NDArray[int,:]]) def test_isinstance_complex1(self): code = ''' import numpy as np def conj (x): if isinstance(x.dtype, complex): return x.real - 1j*x.imag else: return x def isinstance_complex1(x): return x * conj(x)''' self.run_test(code, np.ones(5, dtype=complex) * 2j, isinstance_complex1=[NDArray[complex,:]]) def test_inner_loop_break(self): code = ''' import numpy as np from math import sqrt def hex_area (spot_radius): return sqrt(3)/2 * 3 * spot_radius**2 def inner_loop_break(center, pts, radius, spot_radius, target_area=None): spot_area = hex_area (spot_radius) pts_within_radius = pts[np.abs (pts - center) <= radius] if target_area is None: return pts_within_radius else: area = 0 pts_list = [] for pt in pts_within_radius: area += spot_area if area < target_area: pts_list.append (pt) else: break return pts_list ''' self.run_test(code, 1j, np.array([1j]), 1., 1., 1., inner_loop_break=[complex, NDArray[complex, :], float, float, float]) def test_escape_redef(self): code = ''' def escape_redef(a=None): l = [] if a is None: for ii in range(10): l.append(ii) else: a=a for ii in range(10): l.append(ii) return l''' self.run_test(code, escape_redef=[]) def test_nested_static_if0(self): code = ''' def nested_static_if0 ( free_age_vars, n_calls ): if n_calls is not None: n_calls[0] += 1 cpp_free = free_age_vars[0] and free_age_vars[1] QQQQ = 1 if QQQQ == 0: pass elif QQQQ == 1: if cpp_free: if n_calls is not None and n_calls[0] == 1: return 4. else: pass elif QQQQ == 2: pass return 123.''' self.run_test(code, [0,1], [0], nested_static_if0=[List[int], List[int]]) pythran-0.10.0+ds2/pythran/tests/test_normalize_methods.py000066400000000000000000000132511416264035500237340ustar00rootroot00000000000000from pythran.tests import TestEnv import numpy from tempfile import mkstemp import unittest from pythran.typing import List, Set, Dict, NDArray class TestNormalizeMethods(TestEnv): def test_normalize_methods0(self): self.run_test("def normalize_methods0(): c = complex(1) ; return complex.conjugate(c)", normalize_methods0=[]) def test_function_alias0(self): self.run_test(""" def function_alias0(): def p(): return 0 g = p return g()""", function_alias0=[]) def test_function_alias1(self): self.run_test(""" def function_alias1(n): def p(): return 0 def q(): return 1 g = p if n else q return g() """, 1, function_alias1=[int]) def test_function_alias2(self): self.run_test(""" def function_alias2(n): def p(): return 0 def q(): return 1 if n: g = p else: g = q return g() """, 1, function_alias2=[int]) def test_module_alias0(self): self.run_test("def module_alias0(c): import math ; m = math; return m.cos(c)", 1., module_alias0=[float]) def test_module_alias1(self): self.run_test("def module_alias1(c): import math as ma; m = ma; return m.cos(c)", 1., module_alias1=[float]) def test_module_alias2(self): self.run_test("import math as ma\ndef module_alias2(c): m = ma; return m.cos(c)", 1., module_alias2=[float]) def test_module_alias3(self): self.run_test("import math as ma; m = ma\ndef module_alias3(c): return m.cos(c)", 1., module_alias3=[float]) def test_module_alias4(self): self.run_test(""" import math as ma def module_alias4(c): import math as ma2 m = ma def mab(): return m.cos(c) + ma2.cos(c) return mab()""", 1., module_alias4=[float]) def test_module_alias5(self): self.run_test("import math as m2\ndef module_alias5(math): m = m2 ; return m.cos(math)", 1., module_alias5=[float]) def test_shadow_import0(self): self.run_test("def shadow_import0(math): math.add(1)", {1,2}, shadow_import0=[Set[int]]) def test_shadow_import1(self): self.run_test("def shadow_import1(v): math={ v } ; math.add(1)", 1, shadow_import1=[int]) def test_shadow_import2(self): self.run_test("def shadow_import2(s):\n for set in s : set.add(1)", [{1},{2}], shadow_import2=[List[Set[int]]]) def test_shadow_import3(self): self.run_test("def shadow_import3(s): import math ; math = set ; set.add(s, 1)", {1}, shadow_import3=[Set[int]]) def test_shadow_import4(self): self.run_test("import math\ndef shadow_import4(math): math.add(1)", {1}, shadow_import4=[Set[int]]) def test_builtin_support0(self): self.run_test("def builtin_support0(a): import builtins; return builtins.list(a)", [1, 2], builtin_support0=[List[int]]) def test_dispatch_clear(self): self.run_test("def dispatch_clear(s, d): set.clear(s); dict.clear(d); s.clear(); d.clear() ; return s, d", {1}, {1:1}, dispatch_clear=[Set[int],Dict[int,int]]) def test_dispatch_conjugate(self): self.run_test("def dispatch_conjugate(c, n): import numpy; return complex.conjugate(c), numpy.conjugate(n), c.conjugate(), n.conjugate()", 2.j, numpy.array([1.j+1.]), dispatch_conjugate=[complex, NDArray[complex, :]]) def test_dispatch_copy(self): self.run_test("def dispatch_copy(d, s, n): import numpy; return dict.copy(d), set.copy(s), numpy.copy(n), d.copy(), s.copy(), n.copy()", {1:1}, {1}, numpy.ones(1), dispatch_copy=[Dict[int,int], Set[int], NDArray[float, :]]) def test_dispatch_count(self): self.run_test("def dispatch_count(s, l): return str.count(s,'1'), list.count(l,1), s.count('1'), l.count(1)", "1", [1], dispatch_count=[str, List[int]]) def test_dispatch_pop(self): self.run_test("def dispatch_pop(l, d): list.pop(l); dict.pop(d,1); l.pop(); d.pop(2); return l, d", [1,2], {1:1, 2:2}, dispatch_pop=[List[int], Dict[int,int]]) def test_dispatch_remove(self): self.run_test("def dispatch_remove(s, l): list.remove(l,1); set.remove(s,1); l.remove(2); s.remove(2); return s, l", {1, 2}, [1,2], dispatch_remove=[Set[int], List[int]]) def test_dispatch_update(self): self.run_test("def dispatch_update(s, d): set.update(s, s); dict.update(d,d); s.update(s); d.update(d); return s, d", {1}, {1:1}, dispatch_update=[Set[int], Dict[int,int]]) def test_capture_bound_method(self): code = ''' def capture_bound_method(fname, r): if r: f = open(fname,'w') write_line = f.write else: write_line = fake_write for i in range(10): write_line(str(i)) if r: f.close() def fake_write(s): return 0''' self.run_test(code, "none", False, capture_bound_method=[str, bool]) pythran-0.10.0+ds2/pythran/tests/test_numpy_broadcasting.py000066400000000000000000000223321416264035500241010ustar00rootroot00000000000000import unittest from pythran.tests import TestEnv import numpy as np from pythran.typing import NDArray, List, Tuple @TestEnv.module class TestBroadcasting(TestEnv): def test_broadcast_scalar0(self): self.run_test('def broadcast_scalar0(x): return x + 1', np.arange(12000), broadcast_scalar0=[NDArray[int, :]]) def test_broadcast_scalar1(self): self.run_test('def broadcast_scalar1(x): return x + 1', np.arange(12000).reshape(40, 300), broadcast_scalar1=[NDArray[int, :, :]]) def test_broadcast_array0(self): self.run_test('def broadcast_array0(x, y): return x + y', np.arange(12000).reshape(40,300), np.arange(300), broadcast_array0=[NDArray[int,:,:], NDArray[int,:]]) def test_broadcast_array1(self): self.run_test('def broadcast_array1(x, y): return x + y', np.arange(12000).reshape(4,30,100), np.arange(100), broadcast_array1=[NDArray[int,:,:,:], NDArray[int,:]]) def test_broadcast_array2(self): self.run_test('def broadcast_array2(x, y): return x + y', np.arange(12000).reshape(4,30,2, 50), [3] * 50, broadcast_array2=[NDArray[int,:,:,:,:], List[int]]) def test_broadcast_array3(self): self.run_test('def broadcast_array3(x, y): return (x + y)[1:,1:,0]', np.arange(12000).reshape(4,30,2, 50), [3] * 50, broadcast_array3=[NDArray[int,:,:,:,:], List[int]]) def test_broadcast_with_ref(self): code = ''' import numpy as np import numpy.linalg as npl def _get_cross(a, b): return a[0] * b[:, :, 1] - a[1] * b[:, :, 0] def _get_mgrid(shape): xx = np.empty(shape, dtype=int) xx.T[:] = np.arange(0, shape[0], 1, dtype=int) yy = np.empty(shape, dtype=int) yy[:] = np.arange(0, shape[1], 1, dtype=int) return xx, yy def broadcast_array_with_ref(shape, center, angle): xx, yy = _get_mgrid(shape) coords = np.rollaxis(np.array([xx, yy]), 0, 3) far = center + np.array([np.cos(angle), np.sin(angle)]) * 100 if angle > 1: z = np.absolute(_get_cross(far - center, coords - center)) return z else: z = _get_cross(far - center, coords - center) return np.absolute(z) ''' self.run_test(code, (3,3), (4.,4.), 5., broadcast_array_with_ref=[Tuple[int, int], Tuple[float, float], float]) def test_broadcast_same_dim0(self): self.run_test('def broadcast_same_dim0(x, y): return x + y', np.arange(12000).reshape(40,300), np.arange(300).reshape(1,300), broadcast_same_dim0=[NDArray[int, :, :], NDArray[int, :, :]]) def test_broadcast_same_dim1(self): self.run_test('def broadcast_same_dim1(x, y): return x + y', np.arange(12000).reshape(40,300), np.arange(300), broadcast_same_dim1=[NDArray[int, :, :], NDArray[int, :]]) def test_broadcast_both_dims(self): self.run_test('def broadcast_both_dims(x, y): return x + y', np.arange(100).reshape(1,100), np.arange(100).reshape(100, 1), broadcast_both_dims=[NDArray[int, :, :], NDArray[int, :, :]]) def test_broadcast_update_scalar0(self): self.run_test('def broadcast_update_scalar0(x): x += 1; return x', np.arange(12000), broadcast_update_scalar0=[NDArray[int, :]]) def test_broadcast_update_scalar1(self): self.run_test('def broadcast_update_scalar1(x): x += 1; return x', np.arange(12000).reshape(40, 300), broadcast_update_scalar1=[NDArray[int, :, :]]) def test_broadcast_update_array0(self): self.run_test('def broadcast_update_array0(x, y): x += y ; return x', np.arange(12000).reshape(40,300), np.arange(300), broadcast_update_array0=[NDArray[int, :, :], NDArray[int, :]]) def test_broadcast_update_array1(self): self.run_test('def broadcast_update_array1(x, y): x += y; return x', np.arange(12000).reshape(4,30,100), np.arange(100), broadcast_update_array1=[NDArray[int, :, :,:], NDArray[int, :]]) def test_broadcast_update_same_dim0(self): self.run_test('def broadcast_update_same_dim0(x, y): x += y; return x', np.arange(12000).reshape(40,300), np.arange(300).reshape(1,300), broadcast_update_same_dim0=[NDArray[int, :, :], NDArray[int, :, :]]) def test_broadcast_update_same_dim1(self): self.run_test('def broadcast_update_same_dim1(x, y): x += y; return x', np.arange(12000).reshape(40,300), np.arange(300), broadcast_update_same_dim1=[NDArray[int, :, :], NDArray[int, :]]) def test_broadcast_compute_scalar0(self): self.run_test('def broadcast_compute_scalar0(x): return -(x + 1)', np.arange(12000), broadcast_compute_scalar0=[NDArray[int, :]]) def test_broadcast_compute_scalar1(self): self.run_test('def broadcast_compute_scalar1(x): return -(x + 1)', np.arange(12000).reshape(40, 300), broadcast_compute_scalar1=[NDArray[int, :, :]]) def test_broadcast_compute_array0(self): self.run_test('def broadcast_compute_array0(x, y): return -(x + y)', np.arange(12000).reshape(40,300), np.arange(300), broadcast_compute_array0=[NDArray[int, :, :], NDArray[int, :]]) def test_broadcast_compute_array1(self): self.run_test('def broadcast_compute_array1(x, y): return -(x + y)', np.arange(12000).reshape(4,30,100), np.arange(100), broadcast_compute_array1=[NDArray[int, :, :, :], NDArray[int, :]]) def test_broadcast_compute_same_dim0(self): self.run_test('def broadcast_compute_same_dim0(x, y): return -(x + y)', np.arange(12000).reshape(40,300), np.arange(300).reshape(1,300), broadcast_compute_same_dim0=[NDArray[int, :, :], NDArray[int, :, :]]) def test_broadcast_compute_same_dim1(self): self.run_test('def broadcast_compute_same_dim1(x, y): return -(x + y)', np.arange(12000).reshape(40,300), np.arange(300), broadcast_compute_same_dim1=[NDArray[int, :, :], NDArray[int, :]]) def test_broadcast_compute_both_dims(self): self.run_test('def broadcast_compute_both_dims(x, y): return -(x + y)', np.arange(100).reshape(1,100), np.arange(100).reshape(100, 1), broadcast_compute_both_dims=[NDArray[int, :, :], NDArray[int, :, :]]) def test_broadcast_with_reshape(self): self.run_test('def broadcast_with_reshape(x): n = x.shape[0]; return x.reshape(1, n) + x.reshape(n, 1)', np.arange(100), broadcast_with_reshape=[NDArray[int, :]]) def test_broadcast_sum(self): code = ''' def broadcast_sum(x, y): n = x.size return (x.reshape(n, 1) * y.reshape(1, n)).sum() ''' self.run_test(code, np.arange(100).reshape(1,100), np.arange(100).reshape(100, 1), broadcast_sum=[NDArray[int, :, :], NDArray[int, :, :]]) def test_broadcast_sum_axis(self): code = ''' import numpy as np def broadcast_sum_axis(forecasted_array, observed_array): return np.abs(forecasted_array[:, None] - observed_array).sum() ''' self.run_test(code, np.arange(100.), np.arange(100.), broadcast_sum_axis=[NDArray[float, :], NDArray[float, :]]) def test_broadcasting_nth29(self): code = ''' def broadcast_nth29(n): res = (n+2*pow(n**2,2))/n return res''' self.run_test(code, np.arange(1., 100.), broadcast_nth29=[NDArray[float, :]]) def test_broadcasting_expr0(self): code = ''' import numpy as np def broadcasting_expr0(num_pairs): t = np.zeros((num_pairs, 4), dtype=float) t[0,1] = t[1, 0] = 1 first_row = t[:, 0].reshape((-1, 1)) normalized_triples = np.subtract(t, first_row) return normalized_triples[:,1:4]''' self.run_test(code, 4, broadcasting_expr0=[int]) pythran-0.10.0+ds2/pythran/tests/test_numpy_fft.py000066400000000000000000001164241416264035500222260ustar00rootroot00000000000000import unittest from pythran.tests import TestEnv import numpy from pythran.typing import NDArray import unittest @TestEnv.module class TestNumpyRFFT(TestEnv): # Basic test def test_rfft_0(self): self.run_test("def test_rfft(x): from numpy.fft import rfft ; return rfft(x)", numpy.arange(0,8.), test_rfft=[NDArray[float,:]]) # Test various values of n, even, odd, greater and smaller than array size def test_rfft_1(self): self.run_test("def test_rfft_1(x,n): from numpy.fft import rfft ; return rfft(x,n)", numpy.arange(0,8.),8, test_rfft_1=[NDArray[float,:],int]) def test_rfft_2(self): self.run_test("def test_rfft_2(x,n): from numpy.fft import rfft ; return rfft(x,n)", numpy.arange(0,8.),9, test_rfft_2=[NDArray[float,:],int]) def test_rfft_3(self): self.run_test("def test_rfft_3(x,n): from numpy.fft import rfft ; return rfft(x,n)", numpy.arange(0,8.),7, test_rfft_3=[NDArray[float,:],int]) def test_rfft_4(self): self.run_test("def test_rfft_4(x,n): from numpy.fft import rfft ; return rfft(x,n)", numpy.arange(0,8.),6, test_rfft_4=[NDArray[float,:],int]) def test_rfft_5(self): self.run_test("def test_rfft_5(x,n): from numpy.fft import rfft ; return rfft(x,n)", numpy.arange(0,8.),10, test_rfft_5=[NDArray[float,:],int]) # Two dimensional array def test_rfft_6(self): self.run_test("def test_rfft_6(x): from numpy.fft import rfft ; return rfft(x)", numpy.random.random((4,128)), test_rfft_6=[NDArray[float,:,:]]) # Test axes def test_rfft_7(self): self.run_test("def test_rfft_7(x,n,a): from numpy.fft import rfft ; return rfft(x,n,a)", numpy.random.random((4,128)),128,1, test_rfft_7=[NDArray[float,:,:],int,int]) def test_rfft_8(self): self.run_test("def test_rfft_8(x,n,a): from numpy.fft import rfft ; return rfft(x,n,a)", numpy.random.random((4,128)),128,0, test_rfft_8=[NDArray[float,:,:],int,int]) # Test renorm def test_rfft_9(self): self.run_test("def test_rfft_9(x,n,a,r): from numpy.fft import rfft ; return rfft(x,n,a,r)", numpy.random.random((4,128)),128,1,'ortho', test_rfft_9=[NDArray[float,:,:],int,int,str]) # Test float32 def test_rfft_10(self): self.run_test("def test_rfft_10(x): from numpy.fft import rfft ; return rfft(x)", numpy.arange(0,8.).astype(numpy.float32), test_rfft_10=[NDArray[numpy.float32,:]]) def test_rfft_11(self): self.run_test("def test_rfft_11(x,n): from numpy.fft import rfft ; return rfft(x,n)", numpy.arange(0,8.).astype(numpy.float32),16,test_rfft_11=[NDArray[numpy.float32,:],int]) # Test parallel: def test_rfft_12(self): self.run_test(''' import numpy as np def test_rfft_12(x): out = out = [np.empty_like(x, dtype=complex) for i in range(20)] #omp parallel for for ii in range(20): out[ii] = np.fft.rfft(x) return np.concatenate(out) ''',numpy.random.random((4,128)), test_rfft_12=[NDArray[float,:,:]]) # Test with arguments def test_rfft_13(self): self.run_test("def test_rfft_13(x): from numpy.fft import rfft ; return rfft(x,axis=1)", numpy.random.random((2,128)), test_rfft_13=[NDArray[float,:,:]]) def test_rfft_14(self): self.run_test("def test_rfft_14(x): from numpy.fft import rfft ; return rfft(x,n=128,axis=0)", numpy.random.random((2,128)), test_rfft_14=[NDArray[float,:,:]]) @unittest.skip("Mismatch because numpy converts to double before fft (See comment in header file)") def test_rfft_f32(self): self.run_test("def test_rfft_f32(x): from numpy.fft import rfft ; return rfft(x)", numpy.random.random(128).astype(numpy.float32), test_rfft_f32=[NDArray[numpy.float32,:]]) def test_rfft_int64(self): self.run_test("def test_rfft_int64(x): from numpy.fft import rfft ; return rfft(x)", (100*numpy.random.random(128)).astype(numpy.int64), test_rfft_int64=[NDArray[numpy.int64,:]]) def test_rfft_byte(self): self.run_test("def test_rfft_byte(x): from numpy.fft import rfft ; return rfft(x)", (100*numpy.random.random(128)).astype(numpy.byte), test_rfft_byte=[NDArray[numpy.byte,:]]) @TestEnv.module class TestNumpyIRFFT(TestEnv): ############# IRFFT # Basic test def test_irfft_0(self): self.run_test("def test_irfft_0(x): from numpy.fft import irfft ; return irfft(x)", numpy.exp(1j*numpy.random.random(8)), test_irfft_0=[NDArray[complex,:]]) # Test various values of n, even, odd, greater and smaller than array size def test_irfft_1(self): self.run_test("def test_irfft_1(x,n): from numpy.fft import irfft ; return irfft(x,n)", numpy.exp(1j*numpy.random.random(8)),8, test_irfft_1=[NDArray[complex,:],int]) def test_irfft_2(self): self.run_test("def test_irfft_2(x,n): from numpy.fft import irfft ; return irfft(x,n)", numpy.exp(1j*numpy.random.random(8)),9, test_irfft_2=[NDArray[complex,:],int]) def test_irfft_3(self): self.run_test("def test_irfft_3(x,n): from numpy.fft import irfft ; return irfft(x,n)", numpy.exp(1j*numpy.random.random(8)),7, test_irfft_3=[NDArray[complex,:],int]) def test_irfft_4(self): self.run_test("def test_irfft_4(x,n): from numpy.fft import irfft ; return irfft(x,n)", numpy.exp(1j*numpy.random.random(8)),6, test_irfft_4=[NDArray[complex,:],int]) def test_irfft_5(self): self.run_test("def test_irfft_5(x,n): from numpy.fft import irfft ; return irfft(x,n)", numpy.exp(1j*numpy.random.random(8)),10, test_irfft_5=[NDArray[complex,:],int]) # Two dimensional array def test_irfft_6(self): self.run_test("def test_irfft_6(x): from numpy.fft import irfft ; return irfft(x)", numpy.exp(1j*numpy.random.random((4,128))), test_irfft_6=[NDArray[complex,:,:]]) # Test axes def test_irfft_7(self): self.run_test("def test_irfft_7(x,n,a): from numpy.fft import irfft ; return irfft(x,n,a)", numpy.exp(1j*numpy.random.random((4,128))),128,1, test_irfft_7=[NDArray[complex,:,:],int,int]) def test_irfft_8(self): self.run_test("def test_irfft_8(x,n,a): from numpy.fft import irfft ; return irfft(x,n,a)", numpy.exp(1j*numpy.random.random((4,128))),128,0, test_irfft_8=[NDArray[complex,:,:],int,int]) # Test renorm def test_irfft_9(self): self.run_test("def test_irfft_9(x,n,a,r): from numpy.fft import irfft ; return irfft(x,n,a,r)", numpy.exp(1j*numpy.random.random((4,128))),128,1,'ortho', test_irfft_9=[NDArray[complex,:,:],int,int,str]) # Test complex64 def test_irfft_10(self): self.run_test("def test_irfft_10(x): from numpy.fft import irfft ; return irfft(x)", numpy.exp(1j*numpy.random.random(8)).astype(numpy.complex64), test_irfft_10=[NDArray[numpy.complex64,:]]) def test_irfft_11(self): self.run_test("def test_irfft_11(x,n): from numpy.fft import irfft ; return irfft(x,n)", numpy.exp(1j*numpy.random.random(8)).astype(numpy.complex64),16,test_irfft_11=[NDArray[numpy.complex64,:],int]) # Test parallel: def test_irfft_12(self): self.run_test(''' import numpy as np def test_irfft_12(x): out = [np.empty_like(x, dtype=float) for i in range(20)] #omp parallel for for ii in range(20): out[ii] = np.fft.irfft(x) return np.concatenate(out) ''',numpy.exp(1j*numpy.random.random((4,128))).astype(numpy.complex64), test_irfft_12=[NDArray[numpy.complex64,:,:]]) # Test with arguments def test_irfft_13(self): self.run_test("def test_irfft_13(x): from numpy.fft import irfft ; return irfft(x,axis=1)", numpy.exp(1j*numpy.random.random((2,128))), test_irfft_13=[NDArray[complex,:,:]]) def test_irfft_14(self): self.run_test("def test_irfft_14(x): from numpy.fft import irfft ; return irfft(x,n=128,axis=0)", numpy.exp(1j*numpy.random.random((2,128))), test_irfft_14=[NDArray[complex,:,:]]) def test_irfft_c64(self): self.run_test("def test_irfft_c64(x): from numpy.fft import irfft ; return irfft(x)", numpy.exp(1j*numpy.random.random((2,128))).astype(numpy.complex64), test_irfft_c64=[NDArray[numpy.complex64,:,:]]) def test_irfft_f64(self): self.run_test("def test_irfft_f64(x): from numpy.fft import irfft ; return irfft(x)", numpy.random.random(128), test_irfft_f64=[NDArray[numpy.float64,:]]) def test_irfft_f32(self): self.run_test("def test_irfft_f32(x): from numpy.fft import irfft ; return irfft(x)", numpy.random.random(128).astype(numpy.float32), test_irfft_f32=[NDArray[numpy.float32,:]]) def test_irfft_int64(self): self.run_test("def test_irfft_int64(x): from numpy.fft import irfft ; return irfft(x)", (100*numpy.random.random(128)).astype(numpy.int64), test_irfft_int64=[NDArray[numpy.int64,:]]) def test_irfft_byte(self): self.run_test("def test_irfft_byte(x): from numpy.fft import irfft ; return irfft(x)", (100*numpy.random.random(128)).astype(numpy.byte), test_irfft_byte=[NDArray[numpy.byte,:]]) @TestEnv.module class TestNumpyIHFFT(TestEnv): # Basic test def test_ihfft_0(self): self.run_test("def test_ihfft(x): from numpy.fft import ihfft ; return ihfft(x)", numpy.arange(0,8.), test_ihfft=[NDArray[float,:]]) # Test various values of n, even, odd, greater and smaller than array size def test_ihfft_1(self): self.run_test("def test_ihfft_1(x,n): from numpy.fft import ihfft ; return ihfft(x,n)", numpy.arange(0,8.),8, test_ihfft_1=[NDArray[float,:],int]) def test_ihfft_2(self): self.run_test("def test_ihfft_2(x,n): from numpy.fft import ihfft ; return ihfft(x,n)", numpy.arange(0,8.),9, test_ihfft_2=[NDArray[float,:],int]) def test_ihfft_3(self): self.run_test("def test_ihfft_3(x,n): from numpy.fft import ihfft ; return ihfft(x,n)", numpy.arange(0,8.),7, test_ihfft_3=[NDArray[float,:],int]) def test_ihfft_4(self): self.run_test("def test_ihfft_4(x,n): from numpy.fft import ihfft ; return ihfft(x,n)", numpy.arange(0,8.),6, test_ihfft_4=[NDArray[float,:],int]) def test_ihfft_5(self): self.run_test("def test_ihfft_5(x,n): from numpy.fft import ihfft ; return ihfft(x,n)", numpy.arange(0,8.),10, test_ihfft_5=[NDArray[float,:],int]) # Two dimensional array def test_ihfft_6(self): self.run_test("def test_ihfft_6(x): from numpy.fft import ihfft ; return ihfft(x)", numpy.random.random((4,128)), test_ihfft_6=[NDArray[float,:,:]]) # Test axes def test_ihfft_7(self): self.run_test("def test_ihfft_7(x,n,a): from numpy.fft import ihfft ; return ihfft(x,n,a)", numpy.random.random((4,128)),128,1, test_ihfft_7=[NDArray[float,:,:],int,int]) def test_ihfft_8(self): self.run_test("def test_ihfft_8(x,n,a): from numpy.fft import ihfft ; return ihfft(x,n,a)", numpy.random.random((4,128)),128,0, test_ihfft_8=[NDArray[float,:,:],int,int]) # Test renorm def test_ihfft_9(self): self.run_test("def test_ihfft_9(x,n,a,r): from numpy.fft import ihfft ; return ihfft(x,n,a,r)", numpy.random.random((4,128)),128,1,'ortho', test_ihfft_9=[NDArray[float,:,:],int,int,str]) # Test float32 def test_ihfft_10(self): self.run_test("def test_ihfft_10(x): from numpy.fft import ihfft ; return ihfft(x)", numpy.arange(0,8.).astype(numpy.float32), test_ihfft_10=[NDArray[numpy.float32,:]]) def test_ihfft_11(self): self.run_test("def test_ihfft_11(x,n): from numpy.fft import ihfft ; return ihfft(x,n)", numpy.arange(0,8.).astype(numpy.float32),16,test_ihfft_11=[NDArray[numpy.float32,:],int]) # Test parallel: def test_ihfft_12(self): self.run_test(''' import numpy as np def test_ihfft_12(x): out = out = [np.empty_like(x, dtype=complex) for i in range(20)] #omp parallel for for ii in range(20): out[ii] = np.fft.ihfft(x) return np.concatenate(out) ''',numpy.random.random((4,128)), test_ihfft_12=[NDArray[float,:,:]]) # Test with arguments def test_ihfft_13(self): self.run_test("def test_ihfft_13(x): from numpy.fft import ihfft ; return ihfft(x,axis=1)", numpy.random.random((2,128)), test_ihfft_13=[NDArray[float,:,:]]) def test_ihfft_14(self): self.run_test("def test_ihfft_14(x): from numpy.fft import ihfft ; return ihfft(x,n=128,axis=0)", numpy.random.random((2,128)), test_ihfft_14=[NDArray[float,:,:]]) @unittest.skip("Mismatch because numpy converts to double before fft (See comment in header file)") def test_ihfft_f32(self): self.run_test("def test_ihfft_f32(x): from numpy.fft import ihfft ; return ihfft(x)", numpy.random.random(128).astype(numpy.float32), test_ihfft_f32=[NDArray[numpy.float32,:]]) def test_ihfft_int64(self): self.run_test("def test_ihfft_int64(x): from numpy.fft import ihfft ; return ihfft(x)", (100*numpy.random.random(128)).astype(numpy.int64), test_ihfft_int64=[NDArray[numpy.int64,:]]) def test_ihfft_byte(self): self.run_test("def test_ihfft_byte(x): from numpy.fft import ihfft ; return ihfft(x)", (100*numpy.random.random(128)).astype(numpy.byte), test_ihfft_byte=[NDArray[numpy.byte,:]]) @TestEnv.module class TestNumpyHFFT(TestEnv): ############# hfft # Basic test def test_hfft_0(self): self.run_test("def test_hfft_0(x): from numpy.fft import hfft ; return hfft(x)", numpy.exp(1j*numpy.random.random(8)), test_hfft_0=[NDArray[complex,:]]) # Test various values of n, even, odd, greater and smaller than array size def test_hfft_1(self): self.run_test("def test_hfft_1(x,n): from numpy.fft import hfft ; return hfft(x,n)", numpy.exp(1j*numpy.random.random(8)),8, test_hfft_1=[NDArray[complex,:],int]) def test_hfft_2(self): self.run_test("def test_hfft_2(x,n): from numpy.fft import hfft ; return hfft(x,n)", numpy.exp(1j*numpy.random.random(8)),9, test_hfft_2=[NDArray[complex,:],int]) def test_hfft_3(self): self.run_test("def test_hfft_3(x,n): from numpy.fft import hfft ; return hfft(x,n)", numpy.exp(1j*numpy.random.random(8)),7, test_hfft_3=[NDArray[complex,:],int]) def test_hfft_4(self): self.run_test("def test_hfft_4(x,n): from numpy.fft import hfft ; return hfft(x,n)", numpy.exp(1j*numpy.random.random(8)),6, test_hfft_4=[NDArray[complex,:],int]) def test_hfft_5(self): self.run_test("def test_hfft_5(x,n): from numpy.fft import hfft ; return hfft(x,n)", numpy.exp(1j*numpy.random.random(8)),10, test_hfft_5=[NDArray[complex,:],int]) # Two dimensional array def test_hfft_6(self): self.run_test("def test_hfft_6(x): from numpy.fft import hfft ; return hfft(x)", numpy.exp(1j*numpy.random.random((4,128))), test_hfft_6=[NDArray[complex,:,:]]) # Test axes def test_hfft_7(self): self.run_test("def test_hfft_7(x,n,a): from numpy.fft import hfft ; return hfft(x,n,a)", numpy.exp(1j*numpy.random.random((4,128))),128,1, test_hfft_7=[NDArray[complex,:,:],int,int]) def test_hfft_8(self): self.run_test("def test_hfft_8(x,n,a): from numpy.fft import hfft ; return hfft(x,n,a)", numpy.exp(1j*numpy.random.random((4,128))),128,0, test_hfft_8=[NDArray[complex,:,:],int,int]) # Test renorm def test_hfft_9(self): self.run_test("def test_hfft_9(x,n,a,r): from numpy.fft import hfft ; return hfft(x,n,a,r)", numpy.exp(1j*numpy.random.random((4,128))),128,1,'ortho', test_hfft_9=[NDArray[complex,:,:],int,int,str]) # Test complex64 def test_hfft_10(self): self.run_test("def test_hfft_10(x): from numpy.fft import hfft ; return hfft(x)", numpy.exp(1j*numpy.random.random(8)).astype(numpy.complex64), test_hfft_10=[NDArray[numpy.complex64,:]]) def test_hfft_11(self): self.run_test("def test_hfft_11(x,n): from numpy.fft import hfft ; return hfft(x,n)", numpy.exp(1j*numpy.random.random(8)).astype(numpy.complex64),16,test_hfft_11=[NDArray[numpy.complex64,:],int]) # Test parallel: def test_hfft_12(self): self.run_test(''' import numpy as np def test_hfft_12(x): out = [np.empty_like(x, dtype=float) for i in range(20)] #omp parallel for for ii in range(20): out[ii] = np.fft.hfft(x) return np.concatenate(out) ''',numpy.exp(1j*numpy.random.random((4,128))), test_hfft_12=[NDArray[numpy.complex128,:,:]]) # Test with arguments def test_hfft_13(self): self.run_test("def test_hfft_13(x): from numpy.fft import hfft ; return hfft(x,axis=1)", numpy.exp(1j*numpy.random.random((2,128))), test_hfft_13=[NDArray[complex,:,:]]) def test_hfft_14(self): self.run_test("def test_hfft_14(x): from numpy.fft import hfft ; return hfft(x,n=128,axis=0)", numpy.exp(1j*numpy.random.random((2,128))), test_hfft_14=[NDArray[complex,:,:]]) @unittest.skip("Fails because of numpy casting to double (see documentation in headers for a discussion)") def test_hfft_c64(self): self.run_test("def test_hfft_c64(x): from numpy.fft import hfft ; return hfft(x)", numpy.exp(1j*numpy.random.random((2,128))).astype(numpy.complex64), test_hfft_c64=[NDArray[numpy.complex64,:,:]]) def test_hfft_f64(self): self.run_test("def test_hfft_f64(x): from numpy.fft import hfft ; return hfft(x)", numpy.random.random(128), test_hfft_f64=[NDArray[numpy.float64,:]]) @unittest.skip("Fails because of numpy casting to double (see documentation in headers for a discussion)") def test_hfft_f32(self): self.run_test("def test_hfft_f32(x): from numpy.fft import hfft ; return hfft(x)", numpy.random.random(128).astype(numpy.float32), test_hfft_f32=[NDArray[numpy.float32,:]]) def test_hfft_int64(self): self.run_test("def test_hfft_int64(x): from numpy.fft import hfft ; return hfft(x)", (100*numpy.random.random(128)).astype(numpy.int64), test_hfft_int64=[NDArray[numpy.int64,:]]) def test_hfft_byte(self): self.run_test("def test_hfft_byte(x): from numpy.fft import hfft ; return hfft(x)", (100*numpy.random.random(128)).astype(numpy.byte), test_hfft_byte=[NDArray[numpy.byte,:]]) @TestEnv.module class TestNumpyFFT(TestEnv): # complex inputs def test_fft_1d_1(self): self.run_test("def test_fft_1d_1(x): from numpy.fft import fft ; return fft(x)", numpy.random.randn(10)+1j*numpy.random.randn(10), test_fft_1d_1=[NDArray[numpy.complex128, :]]) # check axis arg def test_fft_1d_axis(self): axs = [0, -1] for ax in axs: with self.subTest(): self.run_test("def test_fft_1d_axis(x, a): from numpy.fft import fft ; return fft(x, axis=a)", numpy.random.randn(10)+1j*numpy.random.randn(10), ax, test_fft_1d_axis=[NDArray[numpy.complex128, :], int]) # check n arg set to None explicitely def test_fft_1d_n_None(self): self.run_test("def test_fft_1d_n_None(x): from numpy.fft import fft ; return fft(x, n=None)", numpy.random.randn(10)+1j*numpy.random.randn(10), test_fft_1d_n_None=[NDArray[numpy.complex128, :]]) # check n set smaller, same and larger than axis def test_fft_1d_n(self): ns = [8, 10, 12] for n in ns: with self.subTest(): self.run_test("def test_fft_1d_n(x, n): from numpy.fft import fft ; return fft(x, n)", numpy.random.randn(10)+1j*numpy.random.randn(10), n, test_fft_1d_n=[NDArray[numpy.complex128, :], int]) # check norm arg def test_fft_1d_norm(self): self.run_test("def test_fft_1d_norm(x, norm): from numpy.fft import fft ; return fft(x, norm=norm)", numpy.random.randn(10)+1j*numpy.random.randn(10), "ortho", test_fft_1d_norm=[NDArray[numpy.complex128, :], str]) ## do the same checks for real inputs def test_fft_1d_f64_1(self): self.run_test("def test_fft_1d_f64_1(x): from numpy.fft import fft ; return fft(x)", numpy.random.randn(10), test_fft_1d_f64_1=[NDArray[numpy.float64, :]]) def test_fft_1d_real_f32(self): self.run_test("def test_fft_1d_real_f32(x): from numpy.fft import fft ; return fft(x)", numpy.random.randn(10).astype(numpy.float32), test_fft_1d_real_f32=[NDArray[numpy.float32, :]]) def test_fft_1d_f64_axis(self): axs = [0, -1] for ax in axs: with self.subTest(): self.run_test("def test_fft_1d_f64_axis(x, a): from numpy.fft import fft ; return fft(x, axis=a)", numpy.random.randn(10), ax, test_fft_1d_f64_axis=[NDArray[numpy.float64, :], int]) def test_fft_1d_f64_n_None(self): self.run_test("def test_fft_1d_f64_n_None(x): from numpy.fft import fft ; return fft(x, n=None)", numpy.random.randn(10), test_fft_1d_f64_n_None=[NDArray[numpy.float64, :]]) # check n set smaller, same and larger than axis def test_fft_1d_f64_n(self): ns = [8, 10, 12] for n in ns: with self.subTest(): self.run_test("def test_fft_1d_f64_n(x, n): from numpy.fft import fft ; return fft(x, n)", numpy.random.randn(10), n, test_fft_1d_f64_n=[NDArray[numpy.float64, :], int]) def test_fft_1d_f64_norm(self): self.run_test("def test_fft_1d_f64_norm(x, n): from numpy.fft import fft ; return fft(x, norm=n)", numpy.random.randn(10), "ortho", test_fft_1d_f64_norm=[NDArray[numpy.float64, :], str]) def test_fft_1d_f64_norm_None(self): self.run_test("def test_fft_1d_norm_None(x): from numpy.fft import fft ; return fft(x, norm=None)", numpy.random.randn(10), test_fft_1d_norm_None=[NDArray[numpy.float64, :]]) # check for integer intputs def test_fft_1d_int64(self): self.run_test("def test_fft_1d_int64(x): from numpy.fft import fft ; return fft(x)", numpy.random.randn(10).astype(numpy.int64), test_fft_1d_int64=[NDArray[numpy.int64, :]]) def test_fft_1d_int32(self): self.run_test("def test_fft_1d_int32(x): from numpy.fft import fft ; return fft(x)", numpy.random.randn(10).astype(numpy.int32), test_fft_1d_int32=[NDArray[numpy.int32, :]]) def test_fft_1d_int8(self): self.run_test("def test_fft_1d_int8(x): from numpy.fft import fft ; return fft(x)", numpy.random.randn(10).astype(numpy.int8), test_fft_1d_int8=[NDArray[numpy.int8, :]]) def test_fft_1d_byte(self): self.run_test("def test_fft_1d_byte(x): from numpy.fft import fft ; return fft(x)", numpy.random.randn(10).astype(numpy.int8), test_fft_1d_byte=[NDArray[numpy.byte, :]]) def test_fft_1d_int64_axis(self): axs = [0, -1] for ax in axs: with self.subTest(): self.run_test("def test_fft_1d_int64_axis(x, a): from numpy.fft import fft ; return fft(x, axis=a)", numpy.random.randn(10).astype(numpy.int64), ax, test_fft_1d_int64_axis=[NDArray[numpy.int64, :], int]) def test_fft_1d_int64_n_None(self): self.run_test("def test_fft_1d_int64_n_None(x): from numpy.fft import fft ; return fft(x, n=None)", numpy.random.randn(10).astype(numpy.int64), test_fft_1d_int64_n_None=[NDArray[numpy.int64, :]]) # check n set smaller, same and larger than axis def test_fft_1d_int64_n(self): ns = [8, 10, 12] for n in ns: with self.subTest(): self.run_test("def test_fft_1d_int64_n(x, n): from numpy.fft import fft ; return fft(x, n)", numpy.random.randn(10).astype(numpy.int64), n, test_fft_1d_int64_n=[NDArray[numpy.int64, :], int]) def test_fft_1d_int64_axis(self): self.run_test("def test_fft_1d_int64_axis(x, a): from numpy.fft import fft ; return fft(x, axis=a)", numpy.random.randn(10).astype(numpy.int64), 0, test_fft_1d_int64_axis=[NDArray[numpy.int64, :], int]) def test_fft_1d_int64_norm(self): self.run_test("def test_fft_1d_int64_norm(x, n): from numpy.fft import fft ; return fft(x, norm=n)", numpy.random.randn(10).astype(numpy.int64), "ortho", test_fft_1d_int64_norm=[NDArray[numpy.int64, :], str]) def test_fft_1d_int64_norm_None(self): self.run_test("def test_fft_1d_int64_norm_None(x): from numpy.fft import fft ; return fft(x, norm=None)", numpy.random.randn(10).astype(numpy.int64), test_fft_1d_int64_norm_None=[NDArray[numpy.int64, :]]) def test_fft_1d_2(self): self.run_test("def test_fft_1d_2(x): from numpy.fft import fft ; return fft(x)", numpy.random.randn(2**16)+1j*numpy.random.randn(2**16), test_fft_1d_2=[NDArray[numpy.complex128, :]]) def test_fft_2d(self): szs = [3, 5] for sz in szs: with self.subTest(): self.run_test("def test_fft_2d_2(x): from numpy.fft import fft ; return fft(x)", (numpy.random.randn(30)+1j*numpy.random.randn(30)).reshape(sz, -1), test_fft_2d_2=[NDArray[numpy.complex128, :, :]]) def test_fft_2d_axis_n_norm(self): al = [0, 1, -1] nl = [3, 4, 5] norm = "ortho" for a in al: for n in nl: with self.subTest(): self.run_test("def test_fft_2d_axis_n_norm(x, a, n, norm): from numpy.fft import fft ; return fft(x, n=n, axis=a, norm=norm)", (numpy.random.randn(20)+1j*numpy.random.randn(20)).reshape(5, -1), a, n, norm, test_fft_2d_axis_n_norm=[NDArray[numpy.complex128, :, :], int, int, str]) def test_fft_2d_f64_axis_n_norm(self): al = [0, 1, -1] nl = [3, 4, 5] norm = "ortho" for a in al: for n in nl: with self.subTest(): self.run_test("def test_fft_2d_f64_axis_n_norm(x, a, n, norm): from numpy.fft import fft ; return fft(x, n=n, axis=a, norm=norm)", numpy.random.randn(20).reshape(5, -1).copy(), a, n, norm, test_fft_2d_f64_axis_n_norm=[NDArray[numpy.float64, :, :], int, int, str]) def test_fft_2d_int64_axis_n_norm(self): al = [0, 1, -1] nl = [3, 4, 5] norm = "ortho" for a in al: for n in nl: with self.subTest(): self.run_test("def test_fft_2d_int64_axis_n_norm(x, a, n, norm): from numpy.fft import fft ; return fft(x, n=n, axis=a, norm=norm)", (numpy.random.randn(20)).astype(numpy.int64).reshape(5, -1).copy(), a, n, norm, test_fft_2d_int64_axis_n_norm=[NDArray[numpy.int64, :, :], int, int, str]) # 3D def test_fft_3d(self): szs = [(5, 4, -1), (4, 5, -1)] for sz in szs: with self.subTest(): self.run_test("def test_fft_3d(x): from numpy.fft import fft ; return fft(x)", (numpy.random.randn(200)+1j*numpy.random.randn(200)).reshape(sz).copy(), test_fft_3d=[NDArray[numpy.complex128, :, :, :]]) def test_fft_3d_axis(self): al = [0, 1, 2, -1] szs = [(5, 4, -1), (4, 5, -1)] for a in al: for sz in szs: with self.subTest(): self.run_test("def test_fft_3d_axis(x, a): from numpy.fft import fft ; return fft(x, axis=a)", (numpy.random.randn(200)+1j*numpy.random.randn(200)).reshape(sz), a, test_fft_3d_axis=[NDArray[numpy.complex128, :, :, :], int]) def test_fft_3d_f64_axis(self): al = [0, 1, 2, -1] szs = [(5, 4, -1), (4, 5, -1)] for a in al: for sz in szs: with self.subTest(): self.run_test("def test_fft_3d_f64_axis(x, a): from numpy.fft import fft ; return fft(x, axis=a)", (numpy.random.randn(200)).reshape(sz), a, test_fft_3d_f64_axis=[NDArray[numpy.float64, :, :, :], int]) def test_fft_3d_int64_axis(self): al = [0, 1, 2, -1] szs = [(5, 4, -1), (4, 5, -1)] for a in al: for sz in szs: with self.subTest(): self.run_test("def test_fft_3d_int64_axis(x, a): from numpy.fft import fft ; return fft(x, axis=a)", (numpy.random.randn(200)).reshape(sz).astype(numpy.int64), a, test_fft_3d_int64_axis=[NDArray[numpy.int64, :, :, :], int]) def test_fft_parallel(self): import numpy self.run_test(""" import numpy as np def test_fft_parallel(x): out = [np.empty_like(x) for i in range(20)] #omp parallel for for ii in range(20): out[ii] = np.fft.fft(x) return np.concatenate(out) """, (numpy.random.randn(512)+1j*numpy.random.randn(512)).reshape((4,128)), test_fft_parallel=[NDArray[numpy.complex128, :, :]]) def test_fft_f64_parallel(self): import numpy self.run_test(""" import numpy as np def test_fft_f64_parallel(x): out = [np.empty_like(x) for i in range(20)] #omp parallel for for ii in range(20): out[ii] = np.fft.fft(x) return np.concatenate(out) """, (numpy.random.randn(512)).reshape((4,128)), test_fft_f64_parallel=[NDArray[numpy.float64, :, :]]) def test_fft_int64_parallel(self): import numpy self.run_test(""" import numpy as np def test_fft_int64_parallel(x): out = [np.empty_like(x) for i in range(20)] #omp parallel for for ii in range(20): out[ii] = np.fft.fft(x) return np.concatenate(out) """, (numpy.random.randn(512)).reshape((4,128)).astype(numpy.int64), test_fft_int64_parallel=[NDArray[numpy.int64, :, :]]) @TestEnv.module class TestNumpyIFFT(TestEnv): # complex inputs def test_ifft_1d_1(self): self.run_test("def test_ifft_1d_1(x): from numpy.fft import ifft ; return ifft(x)", numpy.random.randn(10)+1j*numpy.random.randn(10), test_ifft_1d_1=[NDArray[numpy.complex128, :]]) # check axis arg def test_ifft_1d_axis(self): axs = [0, -1] for ax in axs: with self.subTest(): self.run_test("def test_ifft_1d_axis(x, a): from numpy.fft import ifft ; return ifft(x, axis=a)", numpy.random.randn(10)+1j*numpy.random.randn(10), ax, test_ifft_1d_axis=[NDArray[numpy.complex128, :], int]) # check n arg set to None explicitely def test_ifft_1d_n_None(self): self.run_test("def test_ifft_1d_n_None(x): from numpy.fft import ifft ; return ifft(x, n=None)", numpy.random.randn(10)+1j*numpy.random.randn(10), test_ifft_1d_n_None=[NDArray[numpy.complex128, :]]) # check n set smaller, same and larger than axis def test_ifft_1d_n(self): ns = [8, 10, 12] for n in ns: with self.subTest(): self.run_test("def test_ifft_1d_n(x, n): from numpy.fft import ifft ; return ifft(x, n)", numpy.random.randn(10)+1j*numpy.random.randn(10), n, test_ifft_1d_n=[NDArray[numpy.complex128, :], int]) # check norm arg def test_ifft_1d_norm(self): self.run_test("def test_ifft_1d_norm(x, norm): from numpy.fft import ifft ; return ifft(x, norm=norm)", numpy.random.randn(10)+1j*numpy.random.randn(10), "ortho", test_ifft_1d_norm=[NDArray[numpy.complex128, :], str]) ## do the same checks for real inputs def test_ifft_1d_f64_1(self): self.run_test("def test_ifft_1d_f64_1(x): from numpy.fft import ifft ; return ifft(x)", numpy.random.randn(10), test_ifft_1d_f64_1=[NDArray[numpy.float64, :]]) def test_ifft_1d_real_f32(self): self.run_test("def test_ifft_1d_real_f32(x): from numpy.fft import ifft ; return ifft(x)", numpy.random.randn(10).astype(numpy.float32), test_ifft_1d_real_f32=[NDArray[numpy.float32, :]]) def test_ifft_1d_f64_axis(self): axs = [0, -1] for ax in axs: with self.subTest(): self.run_test("def test_ifft_1d_f64_axis(x, a): from numpy.fft import ifft ; return ifft(x, axis=a)", numpy.random.randn(10), ax, test_ifft_1d_f64_axis=[NDArray[numpy.float64, :], int]) def test_ifft_1d_f64_n_None(self): self.run_test("def test_ifft_1d_f64_n_None(x): from numpy.fft import ifft ; return ifft(x, n=None)", numpy.random.randn(10), test_ifft_1d_f64_n_None=[NDArray[numpy.float64, :]]) # check n set smaller, same and larger than axis def test_ifft_1d_f64_n(self): ns = [8, 10, 12] for n in ns: with self.subTest(): self.run_test("def test_ifft_1d_f64_n(x, n): from numpy.fft import ifft ; return ifft(x, n)", numpy.random.randn(10), n, test_ifft_1d_f64_n=[NDArray[numpy.float64, :], int]) def test_ifft_1d_f64_norm(self): self.run_test("def test_ifft_1d_f64_norm(x, n): from numpy.fft import ifft ; return ifft(x, norm=n)", numpy.random.randn(10), "ortho", test_ifft_1d_f64_norm=[NDArray[numpy.float64, :], str]) def test_ifft_1d_f64_norm_None(self): self.run_test("def test_ifft_1d_norm_None(x): from numpy.fft import ifft ; return ifft(x, norm=None)", numpy.random.randn(10), test_ifft_1d_norm_None=[NDArray[numpy.float64, :]]) # check for integer intputs def test_ifft_1d_int64(self): self.run_test("def test_ifft_1d_int64(x): from numpy.fft import ifft ; return ifft(x)", numpy.random.randn(10).astype(numpy.int64), test_ifft_1d_int64=[NDArray[numpy.int64, :]]) def test_ifft_1d_int32(self): self.run_test("def test_ifft_1d_int32(x): from numpy.fft import ifft ; return ifft(x)", numpy.random.randn(10).astype(numpy.int32), test_ifft_1d_int32=[NDArray[numpy.int32, :]]) def test_ifft_1d_int8(self): self.run_test("def test_ifft_1d_int8(x): from numpy.fft import ifft ; return ifft(x)", numpy.random.randn(10).astype(numpy.int8), test_ifft_1d_int8=[NDArray[numpy.int8, :]]) def test_ifft_1d_byte(self): self.run_test("def test_ifft_1d_byte(x): from numpy.fft import ifft ; return ifft(x)", numpy.random.randn(10).astype(numpy.int8), test_ifft_1d_byte=[NDArray[numpy.byte, :]]) def test_ifft_1d_int64_axis(self): axs = [0, -1] for ax in axs: with self.subTest(): self.run_test("def test_ifft_1d_int64_axis(x, a): from numpy.fft import ifft ; return ifft(x, axis=a)", numpy.random.randn(10).astype(numpy.int64), ax, test_ifft_1d_int64_axis=[NDArray[numpy.int64, :], int]) def test_ifft_1d_int64_n_None(self): self.run_test("def test_ifft_1d_int64_n_None(x): from numpy.fft import ifft ; return ifft(x, n=None)", numpy.random.randn(10).astype(numpy.int64), test_ifft_1d_int64_n_None=[NDArray[numpy.int64, :]]) # check n set smaller, same and larger than axis def test_ifft_1d_int64_n(self): ns = [8, 10, 12] for n in ns: with self.subTest(): self.run_test("def test_ifft_1d_int64_n(x, n): from numpy.fft import ifft ; return ifft(x, n)", numpy.random.randn(10).astype(numpy.int64), n, test_ifft_1d_int64_n=[NDArray[numpy.int64, :], int]) def test_ifft_1d_int64_axis(self): self.run_test("def test_ifft_1d_int64_axis(x, a): from numpy.fft import ifft ; return ifft(x, axis=a)", numpy.random.randn(10).astype(numpy.int64), 0, test_ifft_1d_int64_axis=[NDArray[numpy.int64, :], int]) def test_ifft_1d_int64_norm(self): self.run_test("def test_ifft_1d_int64_norm(x, n): from numpy.fft import ifft ; return ifft(x, norm=n)", numpy.random.randn(10).astype(numpy.int64), "ortho", test_ifft_1d_int64_norm=[NDArray[numpy.int64, :], str]) def test_ifft_1d_int64_norm_None(self): self.run_test("def test_ifft_1d_int64_norm_None(x): from numpy.fft import ifft ; return ifft(x, norm=None)", numpy.random.randn(10).astype(numpy.int64), test_ifft_1d_int64_norm_None=[NDArray[numpy.int64, :]]) def test_ifft_1d_2(self): self.run_test("def test_ifft_1d_2(x): from numpy.fft import ifft ; return ifft(x)", numpy.random.randn(2**16)+1j*numpy.random.randn(2**16), test_ifft_1d_2=[NDArray[numpy.complex128, :]]) def test_ifft_2d(self): szs = [3, 5] for sz in szs: with self.subTest(): self.run_test("def test_ifft_2d_2(x): from numpy.fft import ifft ; return ifft(x)", (numpy.random.randn(30)+1j*numpy.random.randn(30)).reshape(sz, -1), test_ifft_2d_2=[NDArray[numpy.complex128, :, :]]) def test_ifft_2d_axis_n_norm(self): al = [0, 1, -1] nl = [3, 4, 5] norm = "ortho" for a in al: for n in nl: with self.subTest(): self.run_test("def test_ifft_2d_axis_n_norm(x, a, n, norm): from numpy.fft import ifft ; return ifft(x, n=n, axis=a, norm=norm)", (numpy.random.randn(20)+1j*numpy.random.randn(20)).reshape(5, -1), a, n, norm, test_ifft_2d_axis_n_norm=[NDArray[numpy.complex128, :, :], int, int, str]) def test_ifft_2d_f64_axis_n_norm(self): al = [0, 1, -1] nl = [3, 4, 5] norm = "ortho" for a in al: for n in nl: with self.subTest(): self.run_test("def test_ifft_2d_f64_axis_n_norm(x, a, n, norm): from numpy.fft import ifft ; return ifft(x, n=n, axis=a, norm=norm)", numpy.random.randn(20).reshape(5, -1).copy(), a, n, norm, test_ifft_2d_f64_axis_n_norm=[NDArray[numpy.float64, :, :], int, int, str]) def test_ifft_2d_int64_axis_n_norm(self): al = [0, 1, -1] nl = [3, 4, 5] norm = "ortho" for a in al: for n in nl: with self.subTest(): self.run_test("def test_ifft_2d_int64_axis_n_norm(x, a, n, norm): from numpy.fft import ifft ; return ifft(x, n=n, axis=a, norm=norm)", (numpy.random.randn(20)).astype(numpy.int64).reshape(5, -1).copy(), a, n, norm, test_ifft_2d_int64_axis_n_norm=[NDArray[numpy.int64, :, :], int, int, str]) # 3D def test_ifft_3d(self): szs = [(5, 4, -1), (4, 5, -1)] for sz in szs: with self.subTest(): self.run_test("def test_ifft_3d(x): from numpy.fft import ifft ; return ifft(x)", (numpy.random.randn(200)+1j*numpy.random.randn(200)).reshape(sz).copy(), test_ifft_3d=[NDArray[numpy.complex128, :, :, :]]) def test_ifft_3d_axis(self): al = [0, 1, 2, -1] szs = [(5, 4, -1), (4, 5, -1)] for a in al: for sz in szs: with self.subTest(): self.run_test("def test_ifft_3d_axis(x, a): from numpy.fft import ifft ; return ifft(x, axis=a)", (numpy.random.randn(200)+1j*numpy.random.randn(200)).reshape(sz), a, test_ifft_3d_axis=[NDArray[numpy.complex128, :, :, :], int]) def test_ifft_3d_f64_axis(self): al = [0, 1, 2, -1] szs = [(5, 4, -1), (4, 5, -1)] for a in al: for sz in szs: with self.subTest(): self.run_test("def test_ifft_3d_f64_axis(x, a): from numpy.fft import ifft ; return ifft(x, axis=a)", (numpy.random.randn(200)).reshape(sz), a, test_ifft_3d_f64_axis=[NDArray[numpy.float64, :, :, :], int]) def test_ifft_3d_int64_axis(self): al = [0, 1, 2, -1] szs = [(5, 4, -1), (4, 5, -1)] for a in al: for sz in szs: with self.subTest(): self.run_test("def test_ifft_3d_int64_axis(x, a): from numpy.fft import ifft ; return ifft(x, axis=a)", (numpy.random.randn(200)).reshape(sz).astype(numpy.int64), a, test_ifft_3d_int64_axis=[NDArray[numpy.int64, :, :, :], int]) def test_ifft_parallel(self): import numpy self.run_test(""" import numpy as np def test_ifft_parallel(x): out = [np.empty_like(x) for i in range(20)] #omp parallel for for ii in range(20): out[ii] = np.fft.fft(x) return np.concatenate(out) """, (numpy.random.randn(512)+1j*numpy.random.randn(512)).reshape((4,128)), test_ifft_parallel=[NDArray[numpy.complex128, :, :]]) def test_ifft_f64_parallel(self): import numpy self.run_test(""" import numpy as np def test_ifft_f64_parallel(x): out = [np.empty_like(x) for i in range(20)] #omp parallel for for ii in range(20): out[ii] = np.fft.fft(x) return np.concatenate(out) """, (numpy.random.randn(512)).reshape((4,128)), test_ifft_f64_parallel=[NDArray[numpy.float64, :, :]]) def test_ifft_int64_parallel(self): import numpy self.run_test(""" import numpy as np def test_ifft_int64_parallel(x): out = [np.empty_like(x) for i in range(20)] #omp parallel for for ii in range(20): out[ii] = np.fft.fft(x) return np.concatenate(out) """, (numpy.random.randn(512)).reshape((4,128)).astype(numpy.int64), test_ifft_int64_parallel=[NDArray[numpy.int64, :, :]]) pythran-0.10.0+ds2/pythran/tests/test_numpy_func0.py000066400000000000000000001377021416264035500224640ustar00rootroot00000000000000import unittest from pythran.tests import TestEnv import numpy import tempfile import os from pythran.typing import NDArray, List, Tuple @TestEnv.module class TestNumpyFunc0(TestEnv): def test_extended_sum0(self): self.run_test("def numpy_extended_sum0(a): import numpy ; return numpy.sum(a)", numpy.arange(120).reshape((3,5,4,2)), numpy_extended_sum0=[NDArray[int,:,:,:,:]]) def test_extended_sum1(self): self.run_test("def numpy_extended_sum1(a): import numpy ; return numpy.sum(a[1])", numpy.arange(120).reshape((3,5,4,2)), numpy_extended_sum1=[NDArray[int,:,:,:,:]]) def test_extended_sum2(self): self.run_test("def numpy_extended_sum2(a): import numpy ; return numpy.sum(a[1,0])", numpy.arange(120).reshape((3,5,4,2)), numpy_extended_sum2=[NDArray[int,:,:,:,:]]) def test_extended_sum3(self): self.run_test("def numpy_extended_sum3(a): import numpy ; return numpy.sum(a[1:-1])", numpy.arange(120).reshape((3,5,4,2)), numpy_extended_sum3=[NDArray[int,:,:,:,:]]) def test_extended_sum4(self): self.run_test("def numpy_extended_sum4(a): import numpy ; return numpy.sum(a[1:-1,0])", numpy.arange(120).reshape((3,5,4,2)), numpy_extended_sum4=[NDArray[int,:,:,:,:]]) def test_extended_sum5(self): self.run_test("def numpy_extended_sum5(a): import numpy ; return numpy.sum(a)", numpy.arange(120).reshape((3,5,4,2)), numpy_extended_sum5=[NDArray[int,:,:,:,:]]) def test_out_sum0(self): self.run_test("def numpy_out_sum0(a, b): import numpy ; return numpy.sum(a, axis=0, out=b)", numpy.arange(10).reshape((5,2)), numpy.zeros(2, dtype=int), numpy_out_sum0=[NDArray[int,:,:], NDArray[int,:]]) def test_out_sum1(self): self.run_test("def numpy_out_sum1(a, b): import numpy ; return numpy.sum(a, axis=0, out=b)", numpy.arange(10).reshape((5,2)), numpy.ones(2, dtype=int), numpy_out_sum1=[NDArray[int,:,:], NDArray[int,:]]) def test_out_sum2(self): self.run_test("def numpy_out_sum2(a, b): import numpy ; return numpy.sum(a, axis=1, out=b)", numpy.arange(10).reshape((5,2)), numpy.zeros(5, dtype=int), numpy_out_sum2=[NDArray[int,:,:], NDArray[int,:]]) def test_numpy_shape_as_function(self): self.run_test("def numpy_shape_as_function(a): import numpy ; return numpy.shape(a)", numpy.ones(3, numpy.int16), numpy_shape_as_function=[NDArray[numpy.int16,:]]) def test_numpy_size_as_function(self): self.run_test("def numpy_size_as_function(a): import numpy ; return numpy.size(a)", numpy.ones(3, numpy.int16), numpy_size_as_function=[NDArray[numpy.int16,:]]) def test_numpy_ndim_as_function(self): self.run_test("def numpy_ndim_as_function(a): import numpy ; return numpy.ndim(a)", numpy.ones(3, numpy.int16), numpy_ndim_as_function=[NDArray[numpy.int16,:]]) def test_frexp0(self): self.run_test("def np_frexp0(a): import numpy as np ; return np.frexp(a)", 1.5, np_frexp0=[float]) def test_frexp1(self): self.run_test("def np_frexp1(a): import numpy as np ; return np.frexp(a)", numpy.array([1.1,2.2,3.3]), np_frexp1=[NDArray[float,:]]) def test_frexp2(self): self.run_test("def np_frexp2(a): import numpy as np ; return np.frexp(a+a)", numpy.array([1.1,2.2,3.3]), np_frexp2=[NDArray[float,:]]) def test_ndindex0(self): self.run_test("def np_ndindex0(): import numpy as np ; return [x for x in np.ndindex(5,6)]", np_ndindex0=[]) def test_ndindex1(self): self.run_test("def np_ndindex1(a): import numpy as np ; return [x for x in np.ndindex(a)]", 3, np_ndindex1=[int]) def test_ndindex2(self): self.run_test("def np_ndindex2(n): import numpy as np ; return [x for x in np.ndindex((n,n))]", 3, np_ndindex2=[int]) def test_ndenumerate0(self): self.run_test("def np_ndenumerate0(a): import numpy as np ; return [x for x in np.ndenumerate(a)]", numpy.array([[1, 2], [3, 4]]), np_ndenumerate0=[NDArray[int,:,:]]) def test_ndenumerate1(self): self.run_test("def np_ndenumerate1(a): import numpy as np ; return [x for x in np.ndenumerate(a)]", numpy.array([1, 2, 3, 4]), np_ndenumerate1=[NDArray[int,:]]) def test_nansum0(self): self.run_test("def np_nansum0(a): import numpy as np ; return np.nansum(a)" , numpy.array([[1, 2], [3, numpy.nan]]), np_nansum0=[NDArray[float,:,:]]) def test_nansum1(self): self.run_test("def np_nansum1(a): import numpy as np ; return np.nansum(a)" , numpy.array([[1, 2], [numpy.NINF, numpy.nan]]), np_nansum1=[NDArray[float,:,:]]) def test_nansum2(self): self.run_test("def np_nansum2(a): import numpy as np ; return np.nansum(a)", [1., numpy.nan], np_nansum2=[List[float]]) def test_nanmin0(self): self.run_test("def np_nanmin0(a): import numpy as np ; return np.nanmin(a)" , numpy.array([[1, 2], [3, numpy.nan]]), np_nanmin0=[NDArray[float,:,:]]) def test_nanmin1(self): self.run_test("def np_nanmin1(a): import numpy as np ; return np.nanmin(a)" , numpy.array([[1, 2], [numpy.NINF, numpy.nan]]), np_nanmin1=[NDArray[float,:,:]]) def test_nanmin2(self): self.run_test("def np_nanmin2(a): import numpy as np ; return np.nanmin(a)" , numpy.array([[numpy.nan, numpy.nan], [numpy.nan, numpy.nan]]), np_nanmin2=[NDArray[float,:,:]]) def test_nanmax0(self): self.run_test("def np_nanmax0(a): import numpy as np ; return np.nanmax(a)" , numpy.array([[1, 2], [3, numpy.nan]]), np_nanmax0=[NDArray[float,:,:]]) def test_nanmax1(self): self.run_test("def np_nanmax1(a): import numpy as np ; return np.nanmax(a)" , numpy.array([[1, 2], [numpy.inf, numpy.nan]]) , np_nanmax1=[NDArray[float,:,:]]) def test_nanmax2(self): self.run_test("def np_nanmax2(a): import numpy as np ; return np.nanmax(a)" , numpy.array([[numpy.nan, numpy.nan], [numpy.nan, numpy.nan]]), np_nanmax2=[NDArray[float,:,:]]) def test_np_residual(self): self.run_test("""import numpy as np def np_residual(): nx, ny, nz= 75, 75, 100 hx, hy = 1./(nx-1), 1./(ny-1) P_left, P_right = 0, 0 P_top, P_bottom = 1, 0 P = np.ones((nx, ny, nz), np.float64) d2x = np.zeros_like(P) d2y = np.zeros_like(P) d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy return d2x + d2y + 5*np.cosh(P).mean()**2 """, np_residual=[]) def test_np_func2(self): self.run_test("""import numpy as np def np_func2(x): f = [x[0] * np.cos(x[1]) - 4, x[1]*x[0] - x[1] - 5] df = np.array([[np.cos(x[1]), -x[0] * np.sin(x[1])], [x[1], x[0] - 1]]) return f, df """, [1.0, 2.0, 3.0], np_func2=[List[float]]) def test_np_peval(self): self.run_test("""import numpy def np_peval(x, p): return p[0]*numpy.sin(2*numpy.pi*p[1]*x+p[2]) """, 12., [1.0, 2.0, 3.0], np_peval=[float, List[float]]) def test_np_residuals(self): self.run_test("""import numpy def np_residuals(): x = numpy.arange(0,6e-2,6e-2/30) A,k,theta = 10, 1.0/3e-2, numpy.pi/6 return A*numpy.sin(2*numpy.pi*k*x+theta) """, np_residuals=[]) def test_np_func_deriv(self): self.run_test("""import numpy def np_func_deriv(x, sign=1.0): dfdx0 = sign*(-2*x[0] + 2*x[1] + 2) dfdx1 = sign*(2*x[0] - 4*x[1]) return numpy.array([ dfdx0, dfdx1 ]) """, [-1.0, 1.0], -1.0, np_func_deriv=[List[float], float]) def test_np_func(self): self.run_test("""import numpy def np_func(x, sign=1.0): return sign*(2*x[0]*x[1] + 2*x[0] - x[0]**2 - 2*x[1]**2) """, [-1.0, 1.0], -1.0, np_func=[List[float], float]) def test_rosen_hess_p(self): self.run_test("""import numpy def np_rosen_hess_p(x, p): x = numpy.asarray(x) Hp = numpy.zeros_like(x) Hp[0] = (1200*x[0]**2 - 400*x[1] + 2)*p[0] - 400*x[0]*p[1] Hp[1:-1] = -400*x[:-2]*p[:-2]+(202+1200*x[1:-1]**2-400*x[2:])*p[1:-1] \ -400*x[1:-1]*p[2:] Hp[-1] = -400*x[-2]*p[-2] + 200*p[-1] return Hp """, numpy.array([1.3, 0.7, 0.8, 1.9, 1.2]), numpy.array([2.3, 1.7, 1.8, 2.9, 2.2]), np_rosen_hess_p=[NDArray[float,:], NDArray[float,:]]) def test_rosen_hess(self): self.run_test("""import numpy def np_rosen_hess(x): x = numpy.asarray(x) H = numpy.diag(-400*x[:-1],1) - numpy.diag(400*x[:-1],-1) diagonal = numpy.zeros_like(x) diagonal[0] = 1200*x[0]**2-400*x[1]+2 diagonal[-1] = 200 diagonal[1:-1] = 202 + 1200*x[1:-1]**2 - 400*x[2:] H = H + numpy.diag(diagonal) return H """, numpy.array([1.3, 0.7, 0.8, 1.9, 1.2]), np_rosen_hess=[NDArray[float,:]]) def test_rosen_der(self): self.run_test("""import numpy def np_rosen_der(x): xm = x[1:-1] xm_m1 = x[:-2] xm_p1 = x[2:] der = numpy.zeros_like(x) der[1:-1] = 200*(xm-xm_m1**2) - 400*(xm_p1 - xm**2)*xm - 2*(1-xm) der[0] = -400*x[0]*(x[1]-x[0]**2) - 2*(1-x[0]) der[-1] = 200*(x[-1]-x[-2]**2) return der """, numpy.array([1.3, 0.7, 0.8, 1.9, 1.2]), np_rosen_der=[NDArray[float,:]]) def test_rosen(self): self.run_test("import numpy\ndef np_rosen(x): return sum(100.0*(x[1:]-x[:-1]**2.0)**2.0 + (1-x[:-1])**2.0)", numpy.array([1.3, 0.7, 0.8, 1.9, 1.2]), np_rosen=[NDArray[float,:]]) def test_nanargmax0(self): self.run_test("def np_nanargmax0(a): from numpy import nanargmax; return nanargmax(a)", numpy.array([[numpy.nan, 4], [2, 3]]), np_nanargmax0=[NDArray[float,:,:]]) def test_nanargmin0(self): self.run_test("def np_nanargmin0(a): from numpy import nanargmin ; return nanargmin(a)", numpy.array([[numpy.nan, 4], [2, 3]]), np_nanargmin0=[NDArray[float,:,:]]) def test_nan_to_num0(self): self.run_test("def np_nan_to_num0(a): import numpy as np ; return np.nan_to_num(a)", numpy.array([numpy.inf, -numpy.inf, numpy.nan, -128, 128]), np_nan_to_num0=[NDArray[float,:]]) def test_median0(self): self.run_test("def np_median0(a): from numpy import median ; return median(a)", numpy.array([[1, 2], [3, 4]]), np_median0=[NDArray[int,:,:]]) def test_median1(self): self.run_test("def np_median1(a): from numpy import median ; return median(a)", numpy.array([1, 2, 3, 4,5]), np_median1=[NDArray[int,:]]) def test_median2(self): self.run_test("def np_median2(a): from numpy import median ; return median(a, None)", numpy.array([1, 2, 3, 4,5]), np_median2=[NDArray[int,:]]) def test_median3(self): self.run_test("def np_median3(a): from numpy import median ; return median(a, 0)", numpy.array([[1, 2, 3], [4,5,6]]), np_median3=[NDArray[int,:,:]]) def test_median4(self): self.run_test("def np_median4(a): from numpy import median ; return median(a, 1)", numpy.array([[1, 2, 3], [4,5,6]]), np_median4=[NDArray[int,:,:]]) def test_median5(self): self.run_test("def np_median5(a): from numpy import median ; return median(a, -1)", numpy.array([[[1], [2], [3]], [[4],[5],[6]]]), np_median5=[NDArray[int,:,:,:]]) def test_median6(self): self.run_test("def np_median6(l): from numpy import median ; return l + median(l)", numpy.array([3, 1]), np_median6=[NDArray[int, :]]) def test_mean0(self): self.run_test("def np_mean0(a): from numpy import mean ; return mean(a)", numpy.array([[1, 2], [3, 4]]), np_mean0=[NDArray[int,:,:]]) def test_mean1(self): self.run_test("def np_mean1(a): from numpy import mean ; return mean(a, 1)", numpy.array([[1, 2], [3, 4.]]), np_mean1=[NDArray[float,:,:]]) def test_mean2(self): self.run_test("def np_mean2(a): from numpy import mean ; return mean(a)", numpy.array([[[1, 2], [3, 4.]]]), np_mean2=[NDArray[float,:,:,:]]) def test_mean3(self): self.run_test("def np_mean3(a): from numpy import mean ; return mean(a, 0)", numpy.array([[[1, 2], [3, 4.]]]), np_mean3=[NDArray[float,:,:,:]]) def test_mean4(self): self.run_test("def np_mean4(a): from numpy import mean ; return mean(a, 1)", numpy.array([[[1, 2], [3, 4.]]]), np_mean4=[NDArray[float,:,:,:]]) def test_mean5(self): self.run_test("def np_mean5(a): from numpy import mean ; return mean(a, 2)", numpy.array([[[1, 2], [3, 4.]]]), np_mean5=[NDArray[float,:,:,:]]) def test_mean6(self): self.run_test("def np_mean6(a): from numpy import mean ; from numpy import float64; return mean(a, 2, float64, None, False)", numpy.array([[[1, 2], [3, 4.]]]), np_mean6=[NDArray[float,:,:,:]]) def test_mean7(self): self.run_test("def np_mean7(a): from numpy import mean ; return mean(a, 2, out=None, keepdims=False)", numpy.array([[[1, 2], [3, 4.]]]), np_mean7=[NDArray[float,:,:,:]]) def test_mean8(self): self.run_test("def np_mean8(a): from numpy import mean ; return mean(a, 2, keepdims=False)", numpy.array([[[1, 2], [3, 4.]]]), np_mean8=[NDArray[float,:,:,:]]) def test_mean9(self): self.run_test("def np_mean9(a): from numpy import mean ; return mean(a, keepdims=False)", numpy.array([[[1, 2], [3, 4.]]]), np_mean9=[NDArray[float,:,:,:]]) def test_mean10(self): self.run_test("def np_mean10(a): from numpy import mean ; return mean(a, 2, dtype=int, keepdims=True)", numpy.array([[[0, 2], [3, 4.]]]), np_mean10=[NDArray[float,:,:,:]]) def test_mean11(self): self.run_test("def np_mean11(a): from numpy import mean ; return mean(a, 2, keepdims=1)", numpy.array([[[1, 2], [3, 4.]]]), np_mean11=[NDArray[float,:,:,:]]) def test_mean12(self): self.run_test("def np_mean12(a): from numpy import mean ; return mean(a, keepdims=True)", numpy.array([[[1, 2], [3, 4.]]]), np_mean12=[NDArray[float,:,:,:]]) def test_mean13(self): self.run_test("def np_mean13(a): from numpy import mean ; return mean(a, keepdims=True, dtype=int)", numpy.array([[[0, 2], [3, 4.]]]), np_mean13=[NDArray[float,:,:,:]]) def test_var0(self): self.run_test("def np_var0(a): return a.var()", numpy.array([[1, 2], [3, 4]], dtype=float), np_var0=[NDArray[float,:,:]]) def test_var1(self): self.run_test("def np_var1(a): from numpy import var ; return var(a, 1)", numpy.array([[1, 2], [3, 4.]]), np_var1=[NDArray[float,:,:]]) def test_var2(self): self.run_test("def np_var2(a): from numpy import var ; return var(a)", numpy.array([[[1, 2], [3, 4.]]]), np_var2=[NDArray[float,:,:,:]]) def test_var3(self): self.run_test("def np_var3(a): from numpy import var ; return var(a, 0)", numpy.array([[[1, 2], [3, 4.]]]), np_var3=[NDArray[float,:,:,:]]) def test_var4(self): self.run_test("def np_var4(a): from numpy import var ; return var(a, 1)", numpy.array([[[1, 2], [3, 4.]]]), np_var4=[NDArray[float,:,:,:]]) def test_var5(self): self.run_test("def np_var5(a): from numpy import var ; return var(a, 2)", numpy.array([[[1, 2], [3, 4.]]]), np_var5=[NDArray[float,:,:,:]]) def test_var6(self): self.run_test("def np_var6(a): from numpy import var ; return var(1j * a)", numpy.array([[[1, 2], [3, 4.]]]), np_var6=[NDArray[float,:,:,:]]) def test_var7(self): self.run_test("def np_var7(a): from numpy import var ; return var(1j * a, 2)", numpy.array([[[1, 2], [3, 4.]]]), np_var7=[NDArray[float,:,:,:]]) def test_var8(self): self.run_test("def np_var8(a): from numpy import var ; return var(1j * a, 2)", numpy.array([[[1, 2], [3, 4]]]), np_var8=[NDArray[int,:,:,:]]) def test_var9(self): self.run_test("def np_var9(a): from numpy import var ; return var(1j * a)", numpy.array([[[1, 2], [3, 4]]]), np_var9=[NDArray[int,:,:,:]]) def test_std0(self): self.run_test("def np_std0(a): from numpy import std ; return std(a)", numpy.array([[[1, 2], [3, 4]]]), np_std0=[NDArray[int, :, :, :]]) def test_std1(self): self.run_test("def np_std1(a): from numpy import std ; return std(a, 0)", numpy.array([[[1, 2], [3, 4]]]), np_std1=[NDArray[int, :, :, :]]) def test_std2(self): self.run_test("def np_std2(a): from numpy import std ; return std(a, 1)", numpy.array([[[1, 2], [3, 4]]]), np_std2=[NDArray[int, :, :, :]]) def test_std3(self): self.run_test("def np_std3(a): from numpy import std ; return std(1j*a, 1)", numpy.array([[[1, 2], [3, 4]]]), np_std3=[NDArray[int, :, :, :]]) def test_logspace0(self): self.run_test("def np_logspace0(start, stop): from numpy import logspace ; start, stop = 3., 4. ; return logspace(start, stop, 4)", 3., 4., np_logspace0=[float, float]) def test_logspace1(self): self.run_test("def np_logspace1(start, stop): from numpy import logspace ; return logspace(start, stop, 4, False)", 3., 4., np_logspace1=[float, float]) def test_logspace2(self): self.run_test("def np_logspace2(start, stop): from numpy import logspace ; return logspace(start, stop, 4, True, 2.0)", 3., 4., np_logspace2=[float, float]) def test_lexsort0(self): self.run_test("def np_lexsort0(surnames): from numpy import lexsort ; first_names = ('Heinrich', 'Galileo', 'Gustav') ; return lexsort((first_names, surnames))", ('Hertz', 'Galilei', 'Hertz'), np_lexsort0=[Tuple[str, str, str]]) def test_lexsort1(self): self.run_test("def np_lexsort1(a): from numpy import lexsort ; b = [1,5,1,4,3,4,4] ; return lexsort((a,b))", [9,4,0,4,0,2,1], np_lexsort1=[List[int]]) def test_lexsort2(self): self.run_test("def np_lexsort2(a): from numpy import lexsort ; return lexsort((a+1,a-1))", numpy.array([1,5,1,4,3,4,4]), np_lexsort2=[NDArray[int,:]]) def test_issctype0(self): self.run_test("def np_issctype0(): from numpy import issctype, int32 ; a = int32 ; return issctype(a)", np_issctype0=[]) def test_issctype1(self): self.run_test("def np_issctype1(): from numpy import issctype ; a = list ; return issctype(a)", np_issctype1=[]) def test_issctype2(self): self.run_test("def np_issctype2(a): from numpy import issctype ; return issctype(a)", 3.1, np_issctype2=[float]) def test_isscalar0(self): self.run_test("def np_isscalar0(a): from numpy import isscalar ; return isscalar(a)", 3.1, np_isscalar0=[float]) def test_isscalar1(self): self.run_test("def np_isscalar1(a): from numpy import isscalar ; return isscalar(a)", [3.1], np_isscalar1=[List[float]]) def test_isscalar2(self): self.run_test("def np_isscalar2(a): from numpy import isscalar ; return isscalar(a)", '3.1', np_isscalar2=[str]) def test_isrealobj0(self): self.run_test("def np_isrealobj0(a): from numpy import isrealobj ; return isrealobj(a)", numpy.array([1,2,3.]), np_isrealobj0=[NDArray[float,:]]) def test_isrealobj1(self): self.run_test("def np_isrealobj1(a): from numpy import isrealobj ; return isrealobj(a)", numpy.array([1,2,3.,4 + 1j]).reshape((2,2)), np_isrealobj1=[NDArray[complex,:,:]]) def test_isreal0(self): self.run_test("def np_isreal0(a): from numpy import isreal ; return isreal(a)", numpy.array([1,2,3.]), np_isreal0=[NDArray[float,:]]) def test_isreal1(self): self.run_test("def np_isreal1(a): from numpy import isreal ; return isreal(a)", numpy.array([1,2,3.,4 + 1j]).reshape((2,2)), np_isreal1=[NDArray[complex,:,:]]) def test_iscomplex0(self): self.run_test("def np_iscomplex0(a): from numpy import iscomplex ; return iscomplex(a)", numpy.array([1, 2, 3.]), np_iscomplex0=[NDArray[float,:]]) def test_iscomplex1(self): self.run_test("def np_iscomplex1(a): from numpy import iscomplex ; return iscomplex(a)", numpy.array([1,2,3.,4 + 1j]).reshape((2,2)), np_iscomplex1=[NDArray[complex,:,:]]) def test_intersect1d0(self): self.run_test("def np_intersect1d0(a): from numpy import intersect1d ; b = [3, 1, 2, 1] ; return intersect1d(a,b)", [1, 3, 4, 3], np_intersect1d0=[List[int]]) def test_insert0(self): self.run_test("def np_insert0(a): from numpy import insert ; return insert(a, 1, 5)", numpy.array([[1, 1], [2, 2], [3, 3]]), np_insert0=[NDArray[int,:,:]]) def test_insert1(self): self.run_test("def np_insert1(a): from numpy import insert ; return insert(a, [1,2], [5,6])", numpy.array([[1, 1], [2, 2], [3, 3]]), np_insert1=[NDArray[int,:,:]]) def test_insert2(self): self.run_test("def np_insert2(a): from numpy import insert ; return insert(a, [1,1], [5.2,6])", numpy.array([[1, 1], [2, 2], [3, 3]]), np_insert2=[NDArray[int,:,:]]) def test_inner0(self): self.run_test("def np_inner0(x): from numpy import inner ; y = 3 ; return inner(x,y)", 2, np_inner0=[int]) def test_inner1(self): self.run_test("def np_inner1(x): from numpy import inner ; y = [2, 3] ; return inner(x,y)", [2, 3], np_inner1=[List[int]]) def test_indices0(self): self.run_test("def np_indices0(s): from numpy import indices ; return indices(s)", (2, 3), np_indices0=[Tuple[int, int]]) def test_identity0(self): self.run_test("def np_identity0(a): from numpy import identity ; return identity(a)", 3, np_identity0=[int]) def test_identity1(self): self.run_test("def np_identity1(a): from numpy import identity ;return identity(a)", 4, np_identity1=[int]) def test_tofile0(self): temp_name = tempfile.mkstemp()[1] x = numpy.random.randint(0,2**8,1000).astype(numpy.uint8) try: self.run_test("def np_tofile0(x,file): import numpy ; x.tofile(file); return numpy.fromfile(file)", x, temp_name, np_tofile0=[NDArray[numpy.uint8,:], str]) finally: os.remove(temp_name) def test_tofile1(self): temp_name = tempfile.mkstemp()[1] x = numpy.random.randint(0,2**16,1000).astype(numpy.uint16) try: self.run_test("def np_tofile1(x,file): import numpy ; x.tofile(file); return numpy.fromfile(file)", x, temp_name, np_tofile1=[NDArray[numpy.uint16,:], str]) finally: os.remove(temp_name) def test_tofile2(self): temp_name = tempfile.mkstemp()[1] x = numpy.random.randint(0,2**31,1000).astype(numpy.uint32) try: self.run_test("def np_tofile2(x,file): import numpy ; x.tofile(file); return numpy.fromfile(file)", x, temp_name, np_tofile2=[NDArray[numpy.uint32,:], str]) finally: os.remove(temp_name) def test_tofile3(self): temp_name = tempfile.mkstemp()[1] x = numpy.random.random(1000).astype(numpy.float32) try: self.run_test("def np_tofile3(x,file): import numpy ; x.tofile(file); return numpy.fromfile(file)", x, temp_name, np_tofile3=[NDArray[numpy.float32,:], str]) finally: os.remove(temp_name) def test_tofile4(self): temp_name = tempfile.mkstemp()[1] x = numpy.random.random(1000).astype(numpy.float64) try: self.run_test("def np_tofile4(x,file): import numpy ; x.tofile(file); return numpy.fromfile(file)", x, temp_name, np_tofile4=[NDArray[numpy.float64,:], str]) finally: os.remove(temp_name) def test_fromfile0(self): temp_name = tempfile.mkstemp()[1] x = numpy.random.randint(0,2**8,1000).astype(numpy.uint8) x.tofile(temp_name) try: self.run_test("def np_fromfile0(file): from numpy import fromfile, uint8 ; return fromfile(file, uint8)", temp_name, np_fromfile0=[str]) finally: os.remove(temp_name) def test_fromfile1(self): temp_name = tempfile.mkstemp()[1] x = numpy.random.randint(0,2**16,1000).astype(numpy.uint16) x.tofile(temp_name) try: self.run_test("def np_fromfile1(file): from numpy import fromfile, uint16 ; return fromfile(file, uint16)", temp_name, np_fromfile1=[str]) finally: os.remove(temp_name) def test_fromfile2(self): temp_name = tempfile.mkstemp()[1] x = numpy.random.randint(0,2**31,1000).astype(numpy.uint32) x.tofile(temp_name) try: self.run_test("def np_fromfile2(file): from numpy import fromfile, uint32 ; return fromfile(file, uint32)", temp_name, np_fromfile2=[str]) finally: os.remove(temp_name) def test_fromfile3(self): temp_name = tempfile.mkstemp()[1] x = numpy.random.random(1000).astype(numpy.float32) x.tofile(temp_name) try: self.run_test("def np_fromfile3(file): from numpy import fromfile, float32 ; return fromfile(file, float32)", temp_name, np_fromfile3=[str]) finally: os.remove(temp_name) def test_fromfile4(self): temp_name = tempfile.mkstemp()[1] x = numpy.random.random(1000).astype(numpy.float64) x.tofile(temp_name) try: self.run_test("def np_fromfile4(file): from numpy import fromfile, float64 ; return fromfile(file, float64)", temp_name, np_fromfile4=[str]) finally: os.remove(temp_name) def test_fromfile5(self): temp_name = tempfile.mkstemp()[1] x = numpy.random.random(1000).astype(numpy.float64) x.tofile(temp_name) try: self.run_test("def np_fromfile5(file): from numpy import fromfile, float64 ; return fromfile(file, float64, 100)", temp_name, np_fromfile5=[str]) finally: os.remove(temp_name) def test_fromstring0(self): self.run_test("def np_fromstring0(a): from numpy import fromstring, uint8 ; return fromstring(a, uint8)", '\x01\x02', np_fromstring0=[str]) def test_fromstring1(self): self.run_test("def np_fromstring1(a): from numpy import fromstring, uint8 ; a = '\x01\x02\x03\x04' ; return fromstring(a, uint8,3)", '\x01\x02\x03\x04', np_fromstring1=[str]) def test_fromstring2(self): self.run_test("def np_fromstring2(a): from numpy import fromstring, uint32 ; return fromstring(a, uint32,-1, ' ')", '1 2 3 4', np_fromstring2=[str]) def test_fromstring3(self): self.run_test("def np_fromstring3(a): from numpy import fromstring, uint32 ; return fromstring(a, uint32,2, ',')", '1,2, 3, 4', np_fromstring3=[str]) def test_outer0(self): self.run_test("def np_outer0(x): from numpy import outer ; return outer(x, x+2)", numpy.arange(6).reshape(2,3), np_outer0=[NDArray[int,:,:]]) def test_outer1(self): self.run_test("def np_outer1(x): from numpy import outer; return outer(x, range(6))", numpy.arange(6).reshape((2,3)), np_outer1=[NDArray[int,:,:]]) def test_place0(self): self.run_test("def np_place0(x): from numpy import place, ravel ; place(x, x>1, ravel(x**2)); return x", numpy.arange(6).reshape((2,3)), np_place0=[NDArray[int,:,:]]) def test_place1(self): self.run_test("def np_place1(x): from numpy import place ; place(x, x>1, [57, 58]); return x", numpy.arange(6).reshape((2,3)), np_place1=[NDArray[int,:,:]]) def test_product(self): self.run_test("def np_product(x):\n from numpy import product\n return product(x)", numpy.arange(1, 10), np_product=[NDArray[int,:]]) def test_ptp0(self): self.run_test("def np_ptp0(x): return x.ptp()", numpy.arange(4).reshape((2,2)), np_ptp0=[NDArray[int,:,:]]) def test_ptp1(self): self.run_test("def np_ptp1(x): from numpy import ptp ; return ptp(x,0)", numpy.arange(4).reshape((2,2)), np_ptp1=[NDArray[int,:,:]]) def test_ptp2(self): self.run_test("def np_ptp2(x): from numpy import ptp ; return ptp(x,1)", numpy.arange(4).reshape((2,2)), np_ptp2=[NDArray[int,:,:]]) def test_put0(self): self.run_test("def np_put0(x): x.put([0,2], [-44, -55]); return x", numpy.arange(5), np_put0=[NDArray[int,:]]) def test_put1(self): self.run_test("def np_put1(x): from numpy import put ; put(x, [0,2,3], [57, 58]); return x", numpy.arange(6).reshape((2, 3)), np_put1=[NDArray[int,:,:]]) def test_put2(self): self.run_test("def np_put2(x): from numpy import put ; put(x, 2, 57); return x", numpy.arange(6).reshape((2,3)), np_put2=[NDArray[int,:,:]]) def test_putmask0(self): self.run_test("def np_putmask0(x): from numpy import putmask ; putmask(x, x>1, x**2); return x", numpy.arange(6).reshape((2,3)), np_putmask0=[NDArray[int,:,:]]) def test_putmask1(self): self.run_test("def np_putmask1(x): from numpy import putmask; putmask(x, x>1, [57, 58]); return x", numpy.arange(6).reshape((2,3)), np_putmask1=[NDArray[int,:,:]]) def test_ravel0(self): self.run_test("def np_ravel0(x): from numpy import ravel ; return ravel(x)", numpy.arange(6).reshape((2,3)), np_ravel0=[NDArray[int,:,:]]) def test_ravel1(self): self.run_test("def np_ravel1(x): return x.ravel()", numpy.arange(6).reshape((2,3)), np_ravel1=[NDArray[int,:,:]]) def test_ravel2(self): self.run_test("def np_ravel2(x): y = x.ravel(); y[3] = 10; return x", numpy.arange(6).reshape((2,3)), np_ravel2=[NDArray[int,:,:]]) def test_repeat0(self): self.run_test("def np_repeat0(x): from numpy import repeat; return repeat(x, 3)", numpy.arange(3), np_repeat0=[NDArray[int,:]]) def test_repeat1(self): self.run_test("def np_repeat1(x): return x.repeat(3)", numpy.arange(6).reshape(2,3), np_repeat1=[NDArray[int,:,:]]) def test_repeat2(self): self.run_test("def np_repeat2(x): from numpy import repeat; return repeat(x, 4, axis=0)", numpy.arange(6).reshape(2,3), np_repeat2=[NDArray[int,:,:]]) def test_repeat3(self): self.run_test("def np_repeat3(x): from numpy import repeat; return repeat(x, 4, axis=1)", numpy.arange(6).reshape(2,3), np_repeat3=[NDArray[int,:,:]]) def test_resize4(self): self.run_test("def np_resize4(x): from numpy import resize ; return resize(x, (6,7))", numpy.arange(24).reshape((2,3,4)), np_resize4=[NDArray[int, :, :, :]]) def test_resize3(self): self.run_test("def np_resize3(x): from numpy import resize; return resize(x, (6,6))", numpy.arange(24).reshape((2,3,4)), np_resize3=[NDArray[int, :, :, :]]) def test_resize2(self): self.run_test("def np_resize2(x): from numpy import resize; return resize(x, (3,3))", numpy.arange(24).reshape((2,3,4)), np_resize2=[NDArray[int, :, :, :]]) def test_resize1(self): self.run_test("def np_resize1(x): from numpy import resize; return resize(x, 32)", numpy.arange(24), np_resize1=[NDArray[int,:]]) def test_resize0(self): self.run_test("def np_resize0(x): from numpy import resize; return resize(x, 12)", numpy.arange(24), np_resize0=[NDArray[int,:]]) def test_rollaxis3(self): self.run_test("def np_rollaxis3(x): from numpy import rollaxis; return rollaxis(x, 0, 3)", numpy.arange(24).reshape((2,3,4)), np_rollaxis3=[NDArray[int, :, :, :]]) def test_rollaxis2(self): self.run_test("def np_rollaxis2(x): from numpy import rollaxis; return rollaxis(x, 2)", numpy.arange(24).reshape((2,3,4)), np_rollaxis2=[NDArray[int, :, :, :]]) def test_rollaxis1(self): self.run_test("def np_rollaxis1(x): from numpy import rollaxis; return rollaxis(x, 1, 2)", numpy.arange(24).reshape(2,3,4), np_rollaxis1=[NDArray[int, :, :, :]]) def test_rollaxis0(self): self.run_test("def np_rollaxis0(x): from numpy import rollaxis; return rollaxis(x, 1)", numpy.arange(24).reshape(2,3,4), np_rollaxis0=[NDArray[int, :, :, :]]) def test_roll6(self): self.run_test("def np_roll6(x): from numpy import roll; return roll(x[:,:,:-1], -1, 2)", numpy.arange(24).reshape(2,3,4), np_roll6=[NDArray[int, :, :, :]]) def test_roll5(self): self.run_test("def np_roll5(x): from numpy import roll; return roll(x, -1, 2)", numpy.arange(24).reshape(2,3,4), np_roll5=[NDArray[int, :, :, :]]) def test_roll4(self): self.run_test("def np_roll4(x): from numpy import roll; return roll(x, 1, 1)", numpy.arange(24).reshape(2,3,4), np_roll4=[NDArray[int, :, :, :]]) def test_roll3(self): self.run_test("def np_roll3(x): from numpy import roll; return roll(x, -1, 0)", numpy.arange(24).reshape(2,3,4), np_roll3=[NDArray[int, :, :, :]]) def test_roll2(self): self.run_test("def np_roll2(x): from numpy import roll; return roll(x, -1)", numpy.arange(24).reshape(2,3,4), np_roll2=[NDArray[int, :, :, :]]) def test_roll1(self): self.run_test("def np_roll1(x): from numpy import roll; return roll(x, 10)", numpy.arange(24).reshape(2,3,4), np_roll1=[NDArray[int, :, :, :]]) def test_roll0(self): self.run_test("def np_roll0(x): from numpy import roll; return roll(x, 3)", numpy.arange(24).reshape(2,3,4), np_roll0=[NDArray[int, :, :, :]]) def test_searchsorted3(self): self.run_test("def np_searchsorted3(x): from numpy import searchsorted; return searchsorted(x, [[3,4],[1,87]])", numpy.arange(6), np_searchsorted3=[NDArray[int,:]]) def test_searchsorted2(self): self.run_test("def np_searchsorted2(x): from numpy import searchsorted; return searchsorted(x, [[3,4],[1,87]], 'right')", numpy.arange(6), np_searchsorted2=[NDArray[int,:]]) def test_searchsorted1(self): self.run_test("def np_searchsorted1(x): from numpy import searchsorted; return searchsorted(x, 3)", numpy.arange(6), np_searchsorted1=[NDArray[int,:]]) def test_searchsorted0(self): self.run_test("def np_searchsorted0(x): from numpy import searchsorted; return x.searchsorted(3, 'right')", numpy.arange(6), np_searchsorted0=[NDArray[int,:]]) def test_rot904(self): self.run_test("def np_rot904(x): from numpy import rot90; return rot90(x, 4)", numpy.arange(24).reshape(2,3,4), np_rot904=[NDArray[int, :, :, :]]) def test_rot903(self): self.run_test("def np_rot903(x): from numpy import rot90; return rot90(x, 2)", numpy.arange(24).reshape(2,3,4), np_rot903=[NDArray[int, :, :, :]]) def test_rot902(self): self.run_test("def np_rot902(x): from numpy import rot90; return rot90(x, 3)", numpy.arange(24).reshape(2,3,4), np_rot902=[NDArray[int, :, :, :]]) def test_rot900(self): self.run_test("def np_rot900(x): from numpy import rot90; return rot90(x)", numpy.arange(24).reshape(2,3,4), np_rot900=[NDArray[int, :, :, :]]) def test_rot901(self): self.run_test("def np_rot901(x): from numpy import rot90; return rot90(x)", numpy.arange(4).reshape(2,2), np_rot901=[NDArray[int,:,:]]) def test_select2(self): self.run_test("def np_select2(x): from numpy import select; condlist = [x<3, x>5]; choicelist = [x**3, x**2]; return select(condlist, choicelist)", numpy.arange(10).reshape(2,5), np_select2=[NDArray[int,:,:]]) def test_select1(self): self.run_test("def np_select1(x): from numpy import select; condlist = [x<3, x>5]; choicelist = [x+3, x**2]; return select(condlist, choicelist)", numpy.arange(10), np_select1=[NDArray[int,:]]) def test_select0(self): self.run_test("def np_select0(x): from numpy import select; condlist = [x<3, x>5]; choicelist = [x, x**2]; return select(condlist, choicelist)", numpy.arange(10), np_select0=[NDArray[int,:]]) def test_sometrue0(self): self.run_test("def np_sometrue0(a): from numpy import sometrue ; return sometrue(a)", numpy.array([[True, False], [True, True]]), np_sometrue0=[NDArray[bool,:,:]]) def test_sometrue1(self): self.run_test("def np_sometrue1(a): from numpy import sometrue ; return sometrue(a, 0)", numpy.array([[True, False], [False, False]]), np_sometrue1=[NDArray[bool,:,:]]) def test_sometrue2(self): self.run_test("def np_sometrue2(a): from numpy import sometrue ; return sometrue(a)", [-1, 0, 5], np_sometrue2=[List[int]]) def test_sort0(self): self.run_test("def np_sort0(a): from numpy import sort ; return sort(a)", numpy.array([[1,6],[7,5]]), np_sort0=[NDArray[int,:,:]]) def test_sort1(self): self.run_test("def np_sort1(a): from numpy import sort ; return sort(a)", numpy.array([2, 1, 6, 3, 5]), np_sort1=[NDArray[int,:]]) def test_sort2(self): self.run_test("def np_sort2(a): from numpy import sort ; return sort(a)", numpy.arange(2*3*4, 0, -1).reshape(2,3,4), np_sort2=[NDArray[int, :, :, :]]) def test_sort3(self): self.run_test("def np_sort3(a): from numpy import sort ; return sort(a, 0)", numpy.arange(2*3*4, 0, -1).reshape(2,3,4), np_sort3=[NDArray[int, :, :, :]]) def test_sort4(self): self.run_test("def np_sort4(a): from numpy import sort ; return sort(a, 1, kind='quicksort')", numpy.arange(2*3*4, 0, -1).reshape(2,3,4), np_sort4=[NDArray[int, :, :, :]]) def test_sort5(self): self.run_test("def np_sort5(a): from numpy import sort ; return sort(a, 1, kind='heapsort')", numpy.arange(2*3*5, 0, -1).reshape(2,3,5), np_sort5=[NDArray[int, :, :, :]]) def test_sort6(self): self.run_test("def np_sort6(a): from numpy import sort ; return sort(a, 0, kind='stable')", numpy.arange(2*3*6, 0, -1).reshape(2,3,6), np_sort6=[NDArray[int, :, :, :]]) def test_sort7(self): self.run_test("def np_sort7(a): from numpy import sort ; return sort(a, 2, kind='mergesort')", numpy.arange(2*3*7, 0, -1).reshape(2,3,7), np_sort7=[NDArray[int, :, :, :]]) def test_sort8(self): self.run_test("def np_sort8(a): from numpy import sort ; return sort(a, None)", numpy.arange(2*3*7, 0, -1).reshape(2,3,7), np_sort8=[NDArray[int, :, :, :]]) def test_sort9(self): self.run_test("def np_sort9(a): from numpy import sort ; return sort(2 * a, None)", numpy.arange(2*3*7, 0, -1).reshape(2,3,7), np_sort9=[NDArray[int, :, :, :]]) def test_sort10(self): self.run_test("def np_sort10(a): from numpy import sort ; return sort(3*a, 0)", numpy.arange(2*3*4, 0, -1).reshape(2,3,4), np_sort10=[NDArray[int, :, :, :]]) def test_sort11(self): self.run_test("def np_sort11(a): a.sort(kind='heapsort'); return a", numpy.arange(2*3*4, 0, -1).reshape(2,3,4), np_sort11=[NDArray[int, :, :, :]]) def test_sort_complex0(self): self.run_test("def np_sort_complex0(a): from numpy import sort_complex ; return sort_complex(a)", numpy.array([[1,6],[7,5]]), np_sort_complex0=[NDArray[int,:,:]]) def test_sort_complex1(self): self.run_test("def np_sort_complex1(a): from numpy import sort_complex ; return sort_complex(a)", numpy.array([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]), np_sort_complex1=[NDArray[complex,:]]) def test_split0(self): self.run_test("def np_split0(a): from numpy import split,array2string ; return list(map(array2string,split(a, 3)))", numpy.arange(12), np_split0=[NDArray[int,:]]) def test_split1(self): self.run_test("def np_split1(a):\n from numpy import split\n try:\n print(split(a, 5))\n return False\n except ValueError:\n return True", numpy.arange(12), np_split1=[NDArray[int,:]]) def test_split2(self): self.run_test("def np_split2(a): from numpy import split, array2string; return list(map(array2string,split(a, [0,1,5])))", numpy.arange(12).reshape(6,2), np_split2=[NDArray[int,:,:]]) @unittest.skip("Require numpy_fexpr for multidim array") def test_take0(self): self.run_test("def np_take0(a):\n from numpy import take\n return take(a, [0,1])", numpy.arange(24).reshape(2,3,4), np_take0=[NDArray[int, :, :, :]]) @unittest.skip("Require numpy_fexpr for multidim array") def test_take1(self): self.run_test("def np_take1(a):\n from numpy import take\n return take(a, [[0,0,2,2],[1,0,1,2]])", numpy.arange(24).reshape(2,3,4), np_take1=[NDArray[int, :, :, :]]) @unittest.skip("Require numpy_fexpr with indices") def test_take2(self): self.run_test("def np_take2(a):\n from numpy import take\n return take(a, [1,0,1,2])", numpy.arange(24), np_take2=[NDArray[int,:]]) def test_swapaxes_(self): self.run_test("def np_swapaxes_(a):\n from numpy import swapaxes\n return swapaxes(a, 1, 2)", numpy.arange(24).reshape(2,3,4), np_swapaxes_=[NDArray[int, :, :, :]]) def test_tile0(self): self.run_test("def np_tile0(a): from numpy import tile ; return tile(a, 3)", numpy.arange(4), np_tile0=[NDArray[int,:]]) def test_tile1(self): self.run_test("def np_tile1(a): from numpy import tile ; return tile(a, (3, 2))", numpy.arange(4), np_tile1=[NDArray[int,:]]) def test_tile2(self): self.run_test("def np_tile2(a): from numpy import tile ; return tile(a, (1, 2))", numpy.arange(12).reshape(3,4), np_tile2=[NDArray[int,:,:]]) def test_tolist0(self): self.run_test("def np_tolist0(a): return a.tolist()", numpy.arange(12), np_tolist0=[NDArray[int,:]]) def test_tolist1(self): self.run_test("def np_tolist1(a): return a.tolist()", numpy.arange(12).reshape(3,4), np_tolist1=[NDArray[int,:,:]]) def test_tolist2(self): self.run_test("def np_tolist2(a): return a.tolist()", numpy.arange(2*3*4*5).reshape(2,3,4,5), np_tolist2=[NDArray[int, :, :, :, :]]) @unittest.skip("bytes/str confusion") def test_tostring0(self): self.run_test("def np_tostring0(a): return a.tostring()", numpy.arange(80, 100), np_tostring0=[NDArray[int,:]]) @unittest.skip("bytes/str confusion") def test_tostring1(self): self.run_test("def np_tostring1(a): return a.tostring()", numpy.arange(500, 600), np_tostring1=[NDArray[int,:]]) def test_fromiter0(self): self.run_test("def g(): yield 1 ; yield 2\ndef np_fromiter0(): from numpy import fromiter, float32 ; iterable = g() ; return fromiter(iterable, float32)", np_fromiter0=[]) def test_fromiter1(self): self.run_test("def np_fromiter1(): from numpy import fromiter, float32 ; iterable = (x*x for x in range(5)) ; return fromiter(iterable, float32, 5)", np_fromiter1=[]) def test_fromiter2(self): self.run_test("def np_fromiter2(): from numpy import fromiter, float64 ; iterable = (x-x for x in range(5)) ; return fromiter(iterable, count=2, dtype=float64)", np_fromiter2=[]) def test_fromfunction0(self): self.run_test("def np_fromfunction0(s): from numpy import fromfunction ; return fromfunction(lambda i: i == 1, s)", (3,), np_fromfunction0=[Tuple[int]]) def test_fromfunction1(self): self.run_test("def np_fromfunction1(s): from numpy import fromfunction; return fromfunction(lambda i, j: i + j, s)", (3, 3), np_fromfunction1=[Tuple[int, int]]) def test_flipud0(self): self.run_test("def np_flipud0(x): from numpy import flipud ; return flipud(x)", numpy.arange(9).reshape(3,3), np_flipud0=[NDArray[int,:,:]]) def test_fliplr0(self): self.run_test("def np_fliplr0(x): from numpy import fliplr ; return fliplr(x)", numpy.arange(9).reshape(3,3), np_fliplr0=[NDArray[int,:,:]]) def test_flip3(self): self.run_test("def np_flip3(x): from numpy import flip; return flip(x[:,:,:-1], 2)", numpy.arange(24).reshape(2,3,4), np_flip3=[NDArray[int, :, :, :]]) def test_flip2(self): self.run_test("def np_flip2(x): from numpy import flip; return flip(x, 2)", numpy.arange(24).reshape(2,3,4), np_flip2=[NDArray[int, :, :, :]]) def test_flip1(self): self.run_test("def np_flip1(x): from numpy import flip; return flip(x, 1)", numpy.arange(24).reshape(2,3,4), np_flip1=[NDArray[int, :, :, :]]) def test_flip0(self): self.run_test("def np_flip0(x): from numpy import flip; return flip(x, 0)", numpy.arange(24).reshape(2,3,4), np_flip0=[NDArray[int, :, :, :]]) def test_flatten0(self): self.run_test("def np_flatten0(x): return x.flatten()", numpy.array([[1,2], [3,4]]), np_flatten0=[NDArray[int,:,:]]) def test_flatnonzero0(self): self.run_test("def np_flatnonzero0(x): from numpy import flatnonzero ; return flatnonzero(x)", numpy.arange(-2, 3), np_flatnonzero0=[NDArray[int,:]]) def test_flatnonzero1(self): self.run_test("def np_flatnonzero1(x): from numpy import flatnonzero ; return flatnonzero(x[1:-1])", numpy.arange(-2, 3), np_flatnonzero1=[NDArray[int,:]]) def test_fix0(self): self.run_test("def np_fix0(x): from numpy import fix ; return fix(x)", 3.14, np_fix0=[float]) def test_fix1(self): self.run_test("def np_fix1(x): from numpy import fix ; return fix(x)", 3, np_fix1=[int]) def test_fix2(self): self.run_test("def np_fix2(x): from numpy import fix ; return fix(x)", numpy.array([2.1, 2.9, -2.1, -2.9]), np_fix2=[NDArray[float,:]]) def test_fix3(self): self.run_test("def np_fix3(x): from numpy import fix ; return fix(x)", numpy.array([2.1, 2.9, -2.1, -2.9]), np_fix3=[NDArray[float,:]]) def test_fix4(self): self.run_test("def np_fix4(x): from numpy import fix ; return fix(x+x)", numpy.array([2.1, 2.9, -2.1, -2.9]), np_fix4=[NDArray[float,:]]) def test_cross1(self): self.run_test("def np_cross1(x): from numpy import cross ; return cross(x, [3,4,5])", numpy.array([2.1, 2.9]), np_cross1=[NDArray[float,:]]) def test_cross2(self): self.run_test("def np_cross2(x): from numpy import cross ; return cross(x, -x)", numpy.array([2.1, 2.9, -2.1]), np_cross2=[NDArray[float,:]]) def test_cross3(self): self.run_test("def np_cross3(x): from numpy import cross ; return cross(x, 2 * x)", numpy.array([[2.1, 2.9, -2.9]]), np_cross3=[NDArray[float,:, :]]) def test_cross4(self): self.run_test("def np_cross4(x): from numpy import cross ; return cross(x, [[1,2]])", numpy.array([[2.9, -2.1, -2.9]]), np_cross4=[NDArray[float,:, :]]) def test_finfo0(self): self.run_test("def np_finfo0(): from numpy import finfo, float64 ; x = finfo(float64) ; return x.eps", np_finfo0=[]) def test_finfo1(self): self.run_test("def np_finfo1(x): from numpy import finfo ; f = finfo(x.dtype) ; return f.eps", numpy.ones(1), np_finfo1=[NDArray[float,:]]) def test_finfo2(self): self.run_test("def np_finfo2(z): from numpy import finfo ; f = finfo(z.dtype) ; return f.eps < 1.0", numpy.array([1+1j]), np_finfo2=[NDArray[complex,:]]) def test_fill0(self): self.run_test("def np_fill0(x): x.fill(5) ; return x", numpy.ones((2, 3)), np_fill0=[NDArray[float,:,:]]) def test_eye0(self): self.run_test("def np_eye0(x): from numpy import eye ; return eye(x)", 2, np_eye0=[int]) def test_eye1(self): self.run_test("def np_eye1(x): from numpy import eye ; return eye(x, x+1)", 2, np_eye1=[int]) def test_eye1b(self): self.run_test("def np_eye1b(x): from numpy import eye ; return eye(x, x-1)", 3, np_eye1b=[int]) def test_eye2(self): self.run_test("def np_eye2(x): from numpy import eye ; return eye(x, x, 1)", 2, np_eye2=[int]) def test_eye3(self): self.run_test("def np_eye3(x): from numpy import eye, int32 ; return eye(x, x, 1, int32)", 2, np_eye3=[int]) def test_eye4(self): self.run_test("def np_eye4(x): from numpy import eye, uint32 ; return eye(x, dtype=uint32)", 2, np_eye4=[int]) def test_ediff1d0(self): self.run_test("def np_ediff1d0(x): from numpy import ediff1d ; return ediff1d(x)", [1,2,4,7,0], np_ediff1d0=[List[int]]) def test_ediff1d1(self): self.run_test("def np_ediff1d1(x): from numpy import ediff1d ; return ediff1d(x)", [[1,2,4],[1,6,24]], np_ediff1d1=[List[List[int]]]) def test_print_slice(self): self.run_test("def np_print_slice(a): print(a[:-1])", numpy.arange(12), np_print_slice=[NDArray[int,:]]) def test_print_expr(self): self.run_test("def np_print_expr(a): print(a * 2)", numpy.arange(12), np_print_expr=[NDArray[int,:]]) def test_broadcast_to0(self): self.run_test("def np_broadcast_to0(a, s): import numpy as np; return np.broadcast_to(a, s)", numpy.arange(12), (4, 12), np_broadcast_to0=[NDArray[int,:], Tuple[int, int]]) def test_broadcast_to1(self): self.run_test("def np_broadcast_to1(a, s): import numpy as np; return np.broadcast_to(a, s)", numpy.arange(1), (4, 12), np_broadcast_to1=[NDArray[int,:], Tuple[int, int]]) def test_broadcast_to2(self): self.run_test("def np_broadcast_to2(a, s): import numpy as np; return np.broadcast_to(a, s)", 5., (12, 2), np_broadcast_to2=[float, Tuple[int, int]]) pythran-0.10.0+ds2/pythran/tests/test_numpy_func1.py000066400000000000000000000406641416264035500224650ustar00rootroot00000000000000import unittest import sys from pythran.tests import TestEnv import numpy from pythran.typing import NDArray, List @TestEnv.module class TestNumpyFunc1(TestEnv): def test_sum_bool2(self): self.run_test("def np_sum_bool2(a): return a.sum()", numpy.ones(10,dtype=bool).reshape(2,5), np_sum_bool2=[NDArray[bool,:,:]]) def test_sum_expr(self): self.run_test("def np_sum_expr(a):\n from numpy import ones\n return (a + ones(10)).sum()", numpy.arange(10), np_sum_expr=[NDArray[int,:]]) def test_sum2_(self): self.run_test("def np_sum2_(a): return a.sum()", numpy.arange(10).reshape(2,5), np_sum2_=[NDArray[int,:,:]]) def test_sum3_(self): self.run_test("def np_sum3_(a): return a.sum(1)", numpy.arange(10).reshape(2,5), np_sum3_=[NDArray[int,:,:]]) def test_sum4_(self): self.run_test("def np_sum4_(a): return a.sum(0)", numpy.arange(10).reshape(2,5), np_sum4_=[NDArray[int,:,:]]) def test_sum5_(self): self.run_test("def np_sum5_(a): return a.sum(0)", numpy.arange(10), np_sum5_=[NDArray[int,:]]) def test_sum6_(self): self.run_test("def np_sum6_(a): return a.sum(0)", numpy.arange(12).reshape(2,3,2), np_sum6_=[NDArray[int,:,:,:]]) def test_sum7_(self): self.run_test("def np_sum7_(a): return a.sum(1)", numpy.arange(12).reshape(2,3,2), np_sum7_=[NDArray[int,:,:,:]]) def test_sum8_(self): self.run_test("def np_sum8_(a): return a.sum(2)", numpy.arange(12).reshape(2,3,2), np_sum8_=[NDArray[int,:,:,:]]) def test_sum9_(self): self.run_test("def np_sum9_(a): import numpy as np ; return np.sum(a*a,0)", numpy.arange(12).reshape(2,3,2), np_sum9_=[NDArray[int,:,:,:]]) def test_sum10_(self): self.run_test("def np_sum10_(a): import numpy as np ; return np.sum(a-a,1)", numpy.arange(12).reshape(2,3,2), np_sum10_=[NDArray[int,:,:,:]]) def test_sum11_(self): self.run_test("def np_sum11_(a): import numpy as np ; return np.sum(a+a,2)", numpy.arange(12).reshape(2,3,2), np_sum11_=[NDArray[int,:,:,:]]) @unittest.skipIf(sys.maxsize == (2**31 - 1), "overflow test") def test_sum12_(self): self.run_test("def np_sum12_(a): import numpy as np ; return np.sum(a)", numpy.array([2**32-1, -2**32 +1 , -2**32 + 1], dtype=numpy.uint32), np_sum12_=[NDArray[numpy.uint32,:]]) def test_sum13_(self): self.run_test("def np_sum13_(a): import numpy as np ; return np.sum(a)", numpy.array([2**31-1, -2**31 +1 , -2**31 + 1], dtype=numpy.int32), np_sum13_=[NDArray[numpy.int32,:]]) @unittest.skipIf(sys.maxsize == (2**31 - 1), "overflow test") def test_sum14_(self): self.run_test("def np_sum14_(a): import numpy as np ; return np.sum(a)", numpy.array([2**31-1, 2**31 +1 , 2**31 + 1], dtype=numpy.int32), np_sum14_=[NDArray[numpy.int32,:]]) def test_sum15_(self): self.run_test("def np_sum15_(a): import numpy as np ; return np.sum(a, dtype=int)", numpy.array([0.5, 1.5, 2.5]), np_sum15_=[NDArray[float,:]]) def test_sum16_(self): self.run_test("def np_sum16_(a): import numpy as np ; return np.sum(a, dtype=int, axis=0)", numpy.array([0.5, 1.5, 2.5]), np_sum16_=[NDArray[float,:]]) def test_prod_(self): """ Check prod function for numpy array. """ self.run_test(""" def np_prod_(a): return a.prod()""", numpy.arange(10), np_prod_=[NDArray[int,:]]) def test_prod_bool(self): self.run_test("def np_prod_bool(a): return (a > 2).prod()", numpy.arange(10), np_prod_bool=[NDArray[int,:]]) def test_prod_bool2(self): self.run_test("def np_prod_bool2(a): return a.prod()", numpy.ones(10,dtype=bool).reshape(2,5), np_prod_bool2=[NDArray[bool,:,:]]) def test_prod2_(self): self.run_test("def np_prod2_(a): return a.prod()", numpy.arange(10).reshape(2,5), np_prod2_=[NDArray[int,:,:]]) def test_prod3_(self): self.run_test("def np_prod3_(a): return a.prod(1)", numpy.arange(10).reshape(2,5), np_prod3_=[NDArray[int,:,:]]) def test_prod4_(self): self.run_test("def np_prod4_(a): return a.prod(0)", numpy.arange(10).reshape(2,5), np_prod4_=[NDArray[int,:,:]]) def test_prod5_(self): self.run_test("def np_prod5_(a): return a.prod(0)", numpy.arange(10), np_prod5_=[NDArray[int,:]]) def test_prod6_(self): self.run_test("def np_prod6_(a): return a.prod(0)", numpy.arange(12).reshape(2,3,2), np_prod6_=[NDArray[int,:,:,:]]) def test_prod7_(self): self.run_test("def np_prod7_(a): return a.prod(1)", numpy.arange(12).reshape(2,3,2), np_prod7_=[NDArray[int,:,:,:]]) def test_prod8_(self): self.run_test("def np_prod8_(a): return a.prod(2)", numpy.arange(12).reshape(2,3,2), np_prod8_=[NDArray[int,:,:,:]]) def test_prod9_(self): self.run_test("def np_prod9_(a): import numpy as np ; return np.prod(a*a,0)", numpy.arange(12).reshape(2,3,2), np_prod9_=[NDArray[int,:,:,:]]) def test_prod10_(self): self.run_test("def np_prod10_(a): import numpy as np ; return np.prod(a-a,1)", numpy.arange(12).reshape(2,3,2), np_prod10_=[NDArray[int,:,:,:]]) def test_prod11_(self): self.run_test("def np_prod11_(a): import numpy as np ; return np.prod(a+a,2)", numpy.arange(12).reshape(2,3,2), np_prod11_=[NDArray[int,:,:,:]]) def test_prod_expr(self): self.run_test("def np_prod_expr(a):\n from numpy import ones\n return (a + ones(10)).prod()", numpy.arange(10), np_prod_expr=[NDArray[int,:]]) def test_amin_amax(self): self.run_test("def np_amin_amax(a):\n from numpy import amin,amax\n return amin(a), amax(a)",numpy.arange(10), np_amin_amax=[NDArray[int,:]]) def test_min_(self): self.run_test("def np_min_(a): return a.min()", numpy.arange(10), np_min_=[NDArray[int,:]]) def test_min1_(self): self.run_test("def np_min1_(a): return (a+a).min()", numpy.arange(10), np_min1_=[NDArray[int,:]]) def test_min2_(self): self.run_test("def np_min2_(a): return a.min()", numpy.arange(10).reshape(2,5), np_min2_=[NDArray[int,:,:]]) def test_min3_(self): self.run_test("def np_min3_(a): return a.min(1)", numpy.arange(10).reshape(2,5), np_min3_=[NDArray[int,:,:]]) def test_min4_(self): self.run_test("def np_min4_(a): return a.min(0)", numpy.arange(10).reshape(2,5), np_min4_=[NDArray[int,:,:]]) def test_min5_(self): self.run_test("def np_min5_(a): return a.min(0)", numpy.arange(10), np_min5_=[NDArray[int,:]]) def test_min6_(self): self.run_test("def np_min6_(a): return a.min(1)", numpy.arange(30).reshape(2,5,3), np_min6_=[NDArray[int,:,:,:]]) def test_min7_(self): self.run_test("def np_min7_(a): return (a+a).min(1)", numpy.arange(30).reshape(2,5,3), np_min7_=[NDArray[int,:,:,:]]) def test_min8_(self): self.run_test("def np_min8_(a): return a.min()", numpy.arange(4, dtype=numpy.int8), np_min8_=[NDArray[numpy.int8,:]]) def test_max_(self): self.run_test("def np_max_(a): return a.max()", numpy.arange(10), np_max_=[NDArray[int,:]]) def test_max1_(self): self.run_test("def np_max1_(a): return (a+a).max()", numpy.arange(10), np_max1_=[NDArray[int,:]]) def test_max2_(self): self.run_test("def np_max2_(a): return a.max()", numpy.arange(10).reshape(2,5), np_max2_=[NDArray[int,:,:]]) def test_max3_(self): self.run_test("def np_max3_(a): return a.max(1)", numpy.arange(10).reshape(2,5), np_max3_=[NDArray[int,:,:]]) def test_max4_(self): self.run_test("def np_max4_(a): return a.max(0)", numpy.arange(10).reshape(2,5), np_max4_=[NDArray[int,:,:]]) def test_max5_(self): self.run_test("def np_max5_(a): return a.max(0)", numpy.arange(10), np_max5_=[NDArray[int,:]]) def test_max6_(self): self.run_test("def np_max6_(a): return a.max(1)", numpy.arange(30).reshape(2,5,3), np_max6_=[NDArray[int,:,:,:]]) def test_max7_(self): self.run_test("def np_max7_(a): return (a+a).max(1)", numpy.arange(30).reshape(2,5,3), np_max7_=[NDArray[int,:,:,:]]) def test_max8_(self): self.run_test("def np_max8_(a): return a.max()", numpy.arange(4, dtype=numpy.int8), np_max8_=[NDArray[numpy.int8,:]]) def test_all_(self): self.run_test("def np_all_(a): return a.all()", numpy.arange(10), np_all_=[NDArray[int,:]]) def test_all2_(self): self.run_test("def np_all2_(a): return a.all()", numpy.ones(10).reshape(2,5), np_all2_=[NDArray[float,:,:]]) def test_all3_(self): self.run_test("def np_all3_(a): return a.all(1)", numpy.arange(10).reshape(2,5), np_all3_=[NDArray[int,:,:]]) def test_all4_(self): self.run_test("def np_all4_(a): return a.all(0)", numpy.ones(10).reshape(2,5), np_all4_=[NDArray[float,:,:]]) def test_all5_(self): self.run_test("def np_all5_(a): return a.all(0)", numpy.arange(10), np_all5_=[NDArray[int,:]]) def test_all6_(self): self.run_test("def np_all6_(a): return a.all().all()", numpy.arange(10), np_all6_=[NDArray[int,:]]) def test_all7_(self): self.run_test("def np_all7_(a): return a.all().all(0)", numpy.arange(10), np_all7_=[NDArray[int,:]]) def test_transpose_(self): self.run_test("def np_transpose_(a): return a.transpose()", numpy.arange(24).reshape(2,3,4), np_transpose_=[NDArray[int,:,:,:]]) def test_transpose_expr(self): self.run_test("def np_transpose_expr(a): return (a + a).transpose()", numpy.ones(24).reshape(2,3,4), np_transpose_expr=[NDArray[float,:,:,:]]) def test_transpose_expr2(self): self.run_test("def np_transpose_expr2(a): import numpy as np; return np.conj(a).T", 1j * numpy.ones(6).reshape(2,3), np_transpose_expr2=[NDArray[complex,:,:]]) def test_transpose2_(self): self.run_test("def np_transpose2_(a): return a.transpose((2,0,1))", numpy.arange(24).reshape(2,3,4), np_transpose2_=[NDArray[int,:,:,:]]) def test_transpose3_(self): self.run_test("def np_transpose3_(a): return a.transpose(2,1,0)", numpy.arange(24).reshape(2,3,4), np_transpose3_=[NDArray[int,:,:,:]]) def test_alen0(self): self.run_test("def np_alen0(a): from numpy import alen ; return alen(a)", numpy.ones((5,6)), np_alen0=[NDArray[float,:,:]]) def test_alen1(self): self.run_test("def np_alen1(a): from numpy import alen ; return alen(-a)", numpy.ones((5,6)), np_alen1=[NDArray[float,:,:]]) def test_allclose0(self): self.run_test("def np_allclose0(a): from numpy import allclose ; return allclose([1e10,1e-7], a)", [1.00001e10,1e-8], np_allclose0=[List[float]]) def test_allclose1(self): self.run_test("def np_allclose1(a): from numpy import allclose; return allclose([1e10,1e-8], +a)", numpy.array([1.00001e10,1e-9]), np_allclose1=[NDArray[float,:]]) def test_allclose2(self): self.run_test("def np_allclose2(a): from numpy import array, allclose; return allclose(array([1e10,1e-8]), a)", numpy.array([1.00001e10,1e-9]), np_allclose2=[NDArray[float,:]]) def test_allclose3(self): self.run_test("def np_allclose3(a): from numpy import allclose; return allclose(a, a)", [1.0, numpy.nan], np_allclose3=[List[float]]) def test_allclose4(self): """ Check allclose behavior with infinity values. """ self.run_test(""" def np_allclose4(a): from numpy import array, allclose return allclose(array([-float('inf'), float('inf'), -float('inf')]), a)""", numpy.array([float("inf"), float("inf"), -float('inf')]), np_allclose4=[NDArray[float,:]]) def test_alltrue0(self): self.run_test("def np_alltrue0(b): from numpy import alltrue ; return alltrue(b)", numpy.array([True, False, True, True]), np_alltrue0=[NDArray[bool,:]]) def test_alltrue1(self): self.run_test("def np_alltrue1(a): from numpy import alltrue ; return alltrue(a >= 5)", numpy.array([1, 5, 2, 7]), np_alltrue1=[NDArray[int,:]]) def test_count_nonzero0(self): self.run_test("def np_count_nonzero0(a): from numpy import count_nonzero; return count_nonzero(a)", numpy.array([[-1, -5, -2, 7], [9, 3, 0, -0]]), np_count_nonzero0=[NDArray[int,:,:]]) def test_count_nonzero1(self): self.run_test("def np_count_nonzero1(a): from numpy import count_nonzero; return count_nonzero(a)", numpy.array([-1, 5, -2, 0]), np_count_nonzero1=[NDArray[int,:]]) def test_count_nonzero2(self): self.run_test("def np_count_nonzero2(a): from numpy import count_nonzero; return count_nonzero(a)", numpy.array([-1., 0., -2., -1e-20]), np_count_nonzero2=[NDArray[float,:]]) def test_count_nonzero3(self): self.run_test("def np_count_nonzero3(a): from numpy import count_nonzero; return count_nonzero(a)", numpy.array([[0, 2, 0., 4 + 1j], [0.+0.j, 0.+4j, 1.+0j, 1j]]), np_count_nonzero3=[NDArray[complex,:,:]]) def test_count_nonzero4(self): self.run_test("def np_count_nonzero4(a): from numpy import count_nonzero; return count_nonzero(a)", numpy.array([[True, False], [False, False]]), np_count_nonzero4=[NDArray[bool,:,:]]) def test_count_nonzero5(self): self.run_test("def np_count_nonzero5(a): from numpy import count_nonzero; return count_nonzero(a*2)", numpy.array([[-1, -5, -2, 7], [9, 3, 0, -0]]), np_count_nonzero5=[NDArray[int,:,:]]) def test_isclose0(self): self.run_test("def np_isclose0(u): from numpy import isclose; return isclose(u, u)", numpy.array([[-1.01, 1e-10+1e-11, -0, 7., float('NaN')], [-1.0, 1e-10, 0., 7., float('NaN')]]), np_isclose0=[NDArray[float,:,:]]) def test_isclose1(self): self.run_test("def np_isclose1(u, v): from numpy import isclose; return isclose(u, v, 1e-19, 1e-16)", numpy.array([-1.01, 1e-10+1e-11, float("inf"), 7.]), numpy.array([9., 1e-10, float("inf"), float('NaN')]), np_isclose1=[NDArray[float,:], NDArray[float,:]]) def test_isclose2(self): self.run_test("def np_isclose2(u,v): from numpy import isclose; return isclose(u, v, 1e-16, 1e-19)", numpy.array([-1.01, 1e-10+1e-11, -0, 7., float('NaN')]), numpy.array([-1., 1e-10+2e-11, -0, 7.1, float('NaN')]), np_isclose2=[NDArray[float,:], NDArray[float,:]]) def test_isclose3(self): self.run_test("def np_isclose3(u): from numpy import isclose; return isclose(u, u)", numpy.array([9.+3j, 1e-10, 1.1j, float('NaN')]), np_isclose3=[NDArray[complex,:]]) def test_isclose4(self): self.run_test("def np_isclose4(u,v): from numpy import isclose; return isclose(u, v)", numpy.array([True, False, True, True, False]), numpy.array([True, False, False, True, True]), np_isclose4=[NDArray[bool,:], NDArray[bool,:]]) def test_isclose5(self): self.run_test("def np_isclose5(u,v): from numpy import isclose; return isclose(u, v)", 1e-10, 1e-10+1e-11, np_isclose5=[float, float]) def test_isclose6(self): self.run_test("def np_isclose6(u, v): from numpy import isclose; return isclose(u, v, 1e-19, 1e-16)", numpy.array([[-float("inf"), 1e-10+1e-11, -0, 7.],[9., 1e-10, 0., float('NaN')]]), numpy.array([float("inf"), 1e-10, 0., float('NaN')]), np_isclose6=[NDArray[float,:,:], NDArray[float,:]]) def test_isclose7(self): self.run_test("def np_isclose7(u, v): from numpy import isclose; return isclose(u, v, 1e-19, 1e-16)", numpy.array([9., 1e-10, 0., float('NaN')]), numpy.array([[-1.01, 1e-10+1e-11, -0, 7.],[9., 1e-10, 0., float('NaN')]]), np_isclose7=[NDArray[float,:], NDArray[float,:,:]]) def test_remainder0(self): self.run_test("def np_remainder0(u, v): from numpy import remainder; return remainder(u, v)", numpy.array([9., 9., -9., -9.]), numpy.array([2., -2., 2., -2.]), np_remainder0=[NDArray[float,:], NDArray[float,:]]) def test_numpy_ones_list(self): self.run_test( "def np_ones_list(u): from numpy import ones; return ones([u,u])", 2, np_ones_list=[int]) pythran-0.10.0+ds2/pythran/tests/test_numpy_func2.py000066400000000000000000001557151416264035500224720ustar00rootroot00000000000000import unittest from pythran.tests import TestEnv import numpy from pythran.typing import NDArray, List, Tuple @TestEnv.module class TestNumpyFunc2(TestEnv): def test_nonzero0(self): self.run_test("def np_nonzero0(x): return x.nonzero()", numpy.arange(6), np_nonzero0=[NDArray[int,:]]) def test_nonzero1(self): self.run_test("def np_nonzero1(x): from numpy import nonzero ; return nonzero(x>8)", numpy.arange(6), np_nonzero1=[NDArray[int,:]]) def test_nonzero2(self): self.run_test("def np_nonzero2(x): from numpy import nonzero ; return nonzero(x>0)", numpy.arange(6).reshape(2,3), np_nonzero2=[NDArray[int,:,:]]) def test_diagflat3(self): self.run_test("def np_diagflat3(a): from numpy import diagflat ; return diagflat(a)", numpy.arange(2), np_diagflat3=[NDArray[int,:]]) def test_diagflat4(self): self.run_test("def np_diagflat4(a): from numpy import diagflat ; return diagflat(a,1)", numpy.arange(3), np_diagflat4=[NDArray[int,:]]) def test_diagflat5(self): self.run_test("def np_diagflat5(a): from numpy import diagflat ; return diagflat(a,-2)", numpy.arange(4), np_diagflat5=[NDArray[int,:]]) def test_diagonal0(self): self.run_test("def np_diagonal0(a): return a.diagonal()", numpy.arange(10).reshape(2,5), np_diagonal0=[NDArray[int,:,:]]) def test_diagonal1(self): self.run_test("def np_diagonal1(a): from numpy import diagonal ; return diagonal(a,1)", numpy.arange(9).reshape(3,3), np_diagonal1=[NDArray[int,:,:]]) def test_diagonal2(self): self.run_test("def np_diagonal2(a): from numpy import diagonal ; return diagonal(a,-2)", numpy.arange(9).reshape(3,3), np_diagonal2=[NDArray[int,:,:]]) def test_diag0(self): self.run_test("def np_diag0(a): from numpy import diag ; return diag(a)", numpy.arange(10).reshape(2,5), np_diag0=[NDArray[int,:,:]]) def test_diag1(self): self.run_test("def np_diag1(a): from numpy import diag ; return diag(a,1)", numpy.arange(9).reshape(3,3), np_diag1=[NDArray[int,:,:]]) def test_diag2(self): self.run_test("def np_diag2(a): from numpy import diag ; return diag(a,-2)", numpy.arange(9).reshape(3,3), np_diag2=[NDArray[int,:,:]]) def test_diag2b(self): self.run_test("def np_diag2b(a): from numpy import diag ; return diag(a,-2)", numpy.arange(12).reshape(4,3), np_diag2b=[NDArray[int,:,:]]) def test_diag3(self): self.run_test("def np_diag3(a): from numpy import diag ; return diag(a)", numpy.arange(2), np_diag3=[NDArray[int,:]]) def test_diag4(self): self.run_test("def np_diag4(a): from numpy import diag ; return diag(a,1)", numpy.arange(3), np_diag4=[NDArray[int,:]]) def test_diag5(self): self.run_test("def np_diag5(a): from numpy import diag; return diag(a,-2)", numpy.arange(4), np_diag5=[NDArray[int,:]]) def test_delete0(self): self.run_test("def np_delete0(a): from numpy import delete ; return delete(a, 1)", numpy.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]), np_delete0=[NDArray[int,:,:]]) def test_delete1(self): self.run_test("def np_delete1(a): from numpy import delete ; return delete(a, [1,3,5])", numpy.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]), np_delete1=[NDArray[int,:,:]]) def test_where0(self): self.run_test("""def np_where0(a): from numpy import arange, where b = arange(5, 17).reshape((3,4)) c = [[0, 1, 1, 1], [0, 0, 1, 1], [1, 0, 0, 0]] return where(c , a, b)""", numpy.arange(12).reshape(3,4), np_where0=[NDArray[int,:,:]]) def test_where1(self): self.run_test("""def np_where1(a): from numpy import arange, where c = [[0, 1, 1, 1], [0, 0, 1, 1], [1, 0, 0, 0]] return where(True , a, c)""", numpy.arange(12).reshape(3,4), np_where1=[NDArray[int,:,:]]) def test_where2(self): self.run_test("""def np_where2(a): from numpy import arange, where c = [[0, 1, 1, 1], [0, 0, 1, 1], [1, 0, 0, 0]] return where(False , a, c)""", numpy.arange(12).reshape(3,4), np_where2=[NDArray[int,:,:]]) def test_where3(self): self.run_test("""def np_where3(a): from numpy import arange, where c = [[0, 1, 1, 1], [0, 0, 1, 1], [1, 0, 0, 0]] return where(True , a, 5)""", numpy.arange(12).reshape(3,4), np_where3=[NDArray[int,:,:]]) def test_where4(self): self.run_test("""def np_where4(a): from numpy import arange, where c = [[0, 1, 1, 1], [0, 0, 1, 1], [1, 0, 0, 0]] return where(False , a, 6)""", numpy.arange(12).reshape(3,4), np_where4=[NDArray[int,:,:]]) def test_where5(self): self.run_test("""def np_where5(a): from numpy import arange, where b = arange(5, 17).reshape((3,4)) return where(a>5 , a, b)""", numpy.arange(12).reshape(3,4), np_where5=[NDArray[int,:,:]]) def test_where6(self): self.run_test("""def np_where6(a): from numpy import arange, where return where(a>5 , 1, 2)""", numpy.arange(12).reshape(3,4), np_where6=[NDArray[int,:,:]]) def test_where7(self): self.run_test("""def np_where7(a): from numpy import arange, where return where(a>5)""", numpy.arange(12).reshape(3,4), np_where7=[NDArray[int,:,:]]) def test_cumprod_(self): self.run_test("def np_cumprod_(a):\n return a.cumprod()", numpy.arange(10), np_cumprod_=[NDArray[int,:]]) def test_cumprod2_(self): self.run_test("def np_cumprod2_(a):\n from numpy import cumprod\n return a.cumprod()", numpy.arange(10).reshape(2,5), np_cumprod2_=[NDArray[int,:,:]]) def test_cumprod3_(self): self.run_test("def np_cumprod3_(a):\n from numpy import cumprod\n return a.cumprod(1)", numpy.arange(10).reshape(2,5), np_cumprod3_=[NDArray[int,:,:]]) def test_cumprod4_(self): self.run_test("def np_cumprod4_(a):\n from numpy import cumprod\n return a.cumprod(0)", numpy.arange(10).reshape(2,5), np_cumprod4_=[NDArray[int,:,:]]) def test_cumprod5_(self): self.run_test("def np_cumprod5_(a):\n from numpy import cumprod\n return a.cumprod(0)", numpy.arange(10), np_cumprod5_=[NDArray[int,:]]) def test_correlate_1(self): self.run_test("def np_correlate_1(a,b):\n from numpy import correlate\n return correlate(a,b)", numpy.arange(10,dtype=float), numpy.arange(12,dtype=float), np_correlate_1=[NDArray[float,:],NDArray[float,:]]) def test_correlate_2(self): self.run_test("def np_correlate_2(a,b):\n from numpy import correlate\n return correlate(a,b)", numpy.arange(12,dtype=float), numpy.arange(10,dtype=float), np_correlate_2=[NDArray[float,:],NDArray[float,:]]) def test_correlate_3(self): self.run_test("def np_correlate_3(a,b):\n from numpy import correlate\n return correlate(a,b,'valid')", numpy.arange(12,dtype=float), numpy.arange(10,dtype=float), np_correlate_3=[NDArray[float,:],NDArray[float,:]]) def test_correlate_4(self): self.run_test("def np_correlate_4(a,b):\n from numpy import correlate\n return correlate(a,b,'same')", numpy.arange(12,dtype=float), numpy.arange(10,dtype=float), np_correlate_4=[NDArray[float,:],NDArray[float,:]]) def test_correlate_5(self): self.run_test("def np_correlate_5(a,b):\n from numpy import correlate\n return correlate(a,b,'same')", numpy.arange(12,dtype=float), numpy.arange(7,dtype=float), np_correlate_5=[NDArray[float,:],NDArray[float,:]]) def test_correlate_6(self): self.run_test("def np_correlate_6(a,b):\n from numpy import correlate\n return correlate(a,b,'full')", numpy.arange(12) + 1j*numpy.arange(12.), numpy.arange(7.) + 1j* numpy.arange(7.), np_correlate_6=[NDArray[complex,:],NDArray[complex,:]]) def test_correlate_7(self): dtype = numpy.float32 self.run_test("def np_correlate_7(a,b):\n from numpy import correlate\n return correlate(a,b,'full')", numpy.arange(12).astype(dtype) + 1j*numpy.arange(12).astype(dtype), numpy.arange(7).astype(dtype) + 1j* numpy.arange(7).astype(dtype), np_correlate_7=[NDArray[numpy.complex64,:],NDArray[numpy.complex64,:]]) def test_correlate_8(self): dtype = numpy.float32 self.run_test("def np_correlate_8(a,b):\n from numpy import correlate\n return correlate(a,b,'full')", numpy.arange(7).astype(dtype) + 1j*numpy.arange(7).astype(dtype), numpy.arange(12).astype(dtype) + 1j*numpy.arange(12).astype(dtype), np_correlate_8=[NDArray[numpy.complex64,:],NDArray[numpy.complex64,:]]) def test_correlate_9(self): dtype = numpy.float self.run_test("def np_correlate_9(a,b):\n from numpy import correlate\n return correlate(a,b,'full')", numpy.arange(7).astype(dtype) + 1j*numpy.arange(7).astype(dtype), numpy.arange(12).astype(dtype) + 1j*numpy.arange(12).astype(dtype), np_correlate_9=[NDArray[numpy.complex128,:],NDArray[numpy.complex128,:]]) def test_correlate_10(self): self.run_test("def np_correlate_10(a,b):\n from numpy import correlate\n return correlate(a,b,'same')", numpy.arange(12,dtype=float), numpy.arange(7,dtype=numpy.float32), np_correlate_10=[NDArray[float,:],NDArray[numpy.float32,:]]) def test_correlate_11(self): self.run_test("def np_correlate_11(a,b):\n from numpy import correlate\n return correlate(a,b,'same')", numpy.arange(12,dtype=numpy.float32), numpy.arange(7,dtype=float), np_correlate_11=[NDArray[numpy.float32,:],NDArray[float,:]]) def test_convolve_1(self): self.run_test("def np_convolve_1(a,b):\n from numpy import convolve\n return convolve(a,b)", numpy.arange(10,dtype=float), numpy.arange(12,dtype=float), np_convolve_1=[NDArray[float,:],NDArray[float,:]]) def test_convolve_2(self): self.run_test("def np_convolve_2(a,b):\n from numpy import convolve\n return convolve(a,b)", numpy.arange(12,dtype=float), numpy.arange(10,dtype=float), np_convolve_2=[NDArray[float,:],NDArray[float,:]]) def test_convolve_3(self): self.run_test("def np_convolve_3(a,b):\n from numpy import convolve\n return convolve(a,b,'valid')", numpy.arange(12,dtype=float), numpy.arange(10,dtype=float), np_convolve_3=[NDArray[float,:],NDArray[float,:]]) def test_convolve_4(self): self.run_test("def np_convolve_4(a,b):\n from numpy import convolve\n return convolve(a,b,'same')", numpy.arange(12,dtype=float), numpy.arange(10,dtype=float), np_convolve_4=[NDArray[float,:],NDArray[float,:]]) def test_convolve_5(self): self.run_test("def np_convolve_5(a,b):\n from numpy import convolve\n return convolve(a,b,'same')", numpy.arange(12,dtype=float), numpy.arange(7,dtype=float), np_convolve_5=[NDArray[float,:],NDArray[float,:]]) def test_convolve_6(self): self.run_test("def np_convolve_6(a,b):\n from numpy import convolve\n return convolve(a,b,'full')", numpy.arange(12.) + 1j*numpy.arange(12.), numpy.arange(7.) + 1j* numpy.arange(7.), np_convolve_6=[NDArray[complex,:],NDArray[complex,:]]) def test_convolve_7(self): dtype = numpy.float32 self.run_test("def np_convolve_7(a,b):\n from numpy import convolve\n return convolve(a,b,'full')", numpy.arange(12).astype(dtype) + 1j*numpy.arange(12).astype(dtype), numpy.arange(7).astype(dtype) + 1j*numpy.arange(7).astype(dtype), np_convolve_7=[NDArray[numpy.complex64,:],NDArray[numpy.complex64,:]]) def test_convolve_8(self): dtype = numpy.float32 self.run_test("def np_convolve_8(a,b):\n from numpy import convolve\n return convolve(a,b,'full')", numpy.arange(7).astype(dtype) + 1j*numpy.arange(7).astype(dtype), numpy.arange(12).astype(dtype) +1j*numpy.arange(12).astype(dtype), np_convolve_8=[NDArray[numpy.complex64,:],NDArray[numpy.complex64,:]]) def test_convolve_9(self): dtype = numpy.float self.run_test("def np_convolve_9(a,b):\n from numpy import convolve\n return convolve(a,b,'full')", numpy.arange(7).astype(dtype) + 1j* numpy.arange(7).astype(dtype), numpy.arange(12).astype(dtype) + 1j* numpy.arange(12).astype(dtype), np_convolve_9=[NDArray[numpy.complex128,:],NDArray[numpy.complex128,:]]) def test_convolve_10(self): self.run_test("def np_convolve_10(a,b):\n from numpy import convolve\n return convolve(a,b,'same')", numpy.arange(12,dtype=float), numpy.arange(7,dtype=numpy.float32), np_convolve_10=[NDArray[float,:],NDArray[numpy.float32,:]]) def test_convolve_11(self): self.run_test("def np_convolve_11(a,b):\n from numpy import convolve\n return convolve(a,b,'same')", numpy.arange(12,dtype=numpy.float32), numpy.arange(7,dtype=float), np_convolve_11=[NDArray[numpy.float32,:],NDArray[float,:]]) def test_copy0(self): code= ''' def test_copy0(x): import numpy as np y = x z = np.copy(x) x[0] = 10 return x[0], y[0], z[0]''' self.run_test(code, numpy.array([1, 2, 3]), test_copy0=[NDArray[int,:]]) def test_copy1(self): code="def test_copy1(n): import numpy as np ; r = np.ones((n,n)); g = np.copy(r); return g" self.run_test(code, 10, test_copy1=[int]) def test_copy2(self): code="def test_copy2(n): import numpy as np ; r = np.ones((n,n)); g0 = np.copy(r); g1 = np.copy(r); g0[0] = 1 ; g1[0] = 2 ; return g0, g1" self.run_test(code, 10, test_copy2=[int]) def test_copy3(self): code="def test_copy3(n): import numpy as np ; r = [[1]*n for _ in range(n)]; g = np.copy(r) ; return g" self.run_test(code, 10, test_copy3=[int]) def test_copy4(self): code="def test_copy4(n): import numpy as np ; r = n; g = np.copy(r) ; return g" self.run_test(code, 10, test_copy4=[int]) def test_copy5(self): code="def test_copy5(n): return n[:-1].copy()" self.run_test(code, numpy.array([1,2,3]), test_copy5=[NDArray[int,:]]) def test_copy6(self): code="def test_copy6(n): return n[-1].copy()" self.run_test(code, numpy.array([[1],[2],]), test_copy6=[NDArray[int,:,:]]) def test_clip0(self): self.run_test("def np_clip0(a): return a.clip(1,8)", numpy.arange(10), np_clip0=[NDArray[int,:]]) def test_clip1(self): self.run_test("def np_clip1(a): from numpy import clip ; return clip(a,3,6)", numpy.arange(10), np_clip1=[NDArray[int,:]]) def test_concatenate0(self): self.run_test("def np_concatenate0(a): from numpy import array, concatenate ; b = array([[5, 6]]) ; return concatenate((a,b))", numpy.array([[1, 2], [3, 4]]), np_concatenate0=[NDArray[int,:,:]]) def test_concatenate1(self): self.run_test("def np_concatenate1(a): from numpy import array, concatenate ; return concatenate([a,a])", numpy.array([[1, 2], [3, 4]]), np_concatenate1=[NDArray[int,:,:]]) def test_concatenate2(self): self.run_test( "def np_concatenate2(a): from numpy import array, concatenate ; b = array([[5, 6]]).T ; return concatenate((a,b, b), axis=1)", numpy.array([[1, 2], [3, 4]]), np_concatenate2=[NDArray[int,:,:]]) def test_concatenate3(self): self.run_test("def np_concatenate3(a): from numpy import array, concatenate ; return concatenate([[1],a + a])", numpy.array([1, 2]), np_concatenate3=[NDArray[int,:]]) def test_hstack_empty(self): code = 'def np_test_stack_empty(a): import numpy as np;return np.stack(a)' with self.assertRaises(ValueError): self.run_test(code, [], np_test_stack_empty=[List[NDArray[float,:]]]) def test_hstack0(self): self.run_test("def np_hstack0(a,b): import numpy as np; return np.hstack((a,b))", numpy.array((1,2,3)), numpy.array((2,3,4)), np_hstack0=[NDArray[int,:],NDArray[int,:]]) def test_hstack1(self): self.run_test("def np_hstack1(a,b): import numpy as np; return np.hstack((a+1,b))", numpy.array(([1],[2],[3])), numpy.array(([2],[3],[4])), np_hstack1=[NDArray[int,:,:],NDArray[int,:,:]]) def test_vstack0(self): self.run_test("def np_vstack0(a,b): import numpy as np; return np.vstack((a,b))", numpy.array((1,2,3)), numpy.array((2,3,4)), np_vstack0=[NDArray[int,:],NDArray[int,:]]) def test_vstack1(self): self.run_test("def np_vstack1(a,b): import numpy as np; return np.vstack((a,b+b))", numpy.array(([1],[2],[3])), numpy.array(([2],[3],[4])), np_vstack1=[NDArray[int,:,:],NDArray[int,:,:]]) def test_stack0(self): self.run_test("def np_stack0(A): import numpy as np; return np.stack(A,axis=0)", [numpy.ones((2,2)), 2*numpy.ones((2,2))], np_stack0=[List[NDArray[float,:,:],NDArray[float,:,:]]]) def test_stack1(self): self.run_test("def np_stack1(A): import numpy as np; return np.stack(A,axis=1)", [numpy.ones((2,2,2)), 2*numpy.ones((2,2,2))], np_stack1=[List[NDArray[float,:,:,:],NDArray[float,:,:,:]]]) def test_stack2(self): self.run_test("def np_stack2(a,b): import numpy as np; return np.stack([a,b],axis=0)", numpy.ones((2,2)), 2*numpy.ones((2,2)), np_stack2=[NDArray[float,:,:],NDArray[float,:,:]]) def test_stack3(self): self.run_test("def np_stack3(a,b): import numpy as np; return np.stack((a,b),axis=0)", numpy.ones((2,2)), 2*numpy.ones((2,2)), np_stack3=[NDArray[float,:,:],NDArray[float,:,:]]) def test_stack4(self): self.run_test("def np_stack4(a,b): import numpy as np; return np.stack((2 * a,b),axis=0)", numpy.ones((4,2,3)), 2*numpy.ones((4,2,3)), np_stack4=[NDArray[float,:,:,:],NDArray[float,:,:,:]]) def test_stack5(self): self.run_test("def np_stack5(a): import numpy as np; return np.stack((a[:,1:],a[:,1:]),axis=1)", numpy.ones((4,3,3)), np_stack5=[NDArray[float,:,:,:]]) def test_bincount0(self): self.run_test("def np_bincount0(a): from numpy import bincount ; return bincount(a)", numpy.arange(5), np_bincount0=[NDArray[int,:]]) def test_bincount1(self): self.run_test("def np_bincount1(a, w): from numpy import bincount; return bincount(a,w)", numpy.array([0, 1, 1, 2, 2, 2]), numpy.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]), np_bincount1=[NDArray[int,:], NDArray[float,:]]) def test_bincount2(self): self.run_test("def np_bincount2(a, w): from numpy import bincount; return bincount(a + 1,w)", numpy.array([0, 1, 1, 2, 2, 2]), numpy.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]), np_bincount2=[NDArray[int,:], NDArray[float,:]]) def test_bincount3(self): self.run_test("def np_bincount3(a): from numpy import bincount,ones; x=ones((a.shape[0]+2), dtype=int); return bincount(a, x[:-2])", numpy.array([0, 1, 1, 2, 2, 2]), np_bincount3=[NDArray[int,:]]) def test_binary_repr0(self): self.run_test("def np_binary_repr0(a): from numpy import binary_repr ; return binary_repr(a)", 3, np_binary_repr0=[int]) def test_binary_repr1(self): self.run_test("def np_binary_repr1(a): from numpy import binary_repr ; return binary_repr(a)", -3, np_binary_repr1=[int]) def test_binary_repr2(self): self.run_test("def np_binary_repr2(a): from numpy import binary_repr ; return binary_repr(a,4)", 3, np_binary_repr2=[int]) def test_binary_repr3(self): self.run_test("def np_binary_repr3(a): from numpy import binary_repr ; return binary_repr(a,4)", -3, np_binary_repr3=[int]) def test_base_repr0(self): self.run_test("def np_base_repr0(a): from numpy import base_repr ; return base_repr(a)", 5, np_base_repr0=[int]) def test_base_repr1(self): self.run_test("def np_base_repr1(a): from numpy import base_repr ; return base_repr(a,5)", 6, np_base_repr1=[int]) def test_base_repr2(self): self.run_test("def np_base_repr2(a): from numpy import base_repr ; return base_repr(a,5,3)", 7, np_base_repr2=[int]) def test_base_repr3(self): self.run_test("def np_base_repr3(a): from numpy import base_repr ; return base_repr(a, 16)", 10, np_base_repr3=[int]) def test_base_repr4(self): self.run_test("def np_base_repr4(a): from numpy import base_repr ; return base_repr(a, 16)", 32, np_base_repr4=[int]) def test_base_repr5(self): self.run_test("def np_base_repr5(a): from numpy import base_repr ; return base_repr(a)", -5, np_base_repr5=[int]) def test_base_repr6(self): self.run_test("def np_base_repr6(a): from numpy import base_repr ; return base_repr(a)", 0, np_base_repr6=[int]) def test_base_repr7(self): self.run_test("def np_base_repr7(a): from numpy import base_repr ; return base_repr(a,5)", 0, np_base_repr7=[int]) def test_average0(self): self.run_test("def np_average0(a): from numpy import average ; return average(a)", numpy.arange(10), np_average0=[NDArray[int,:]]) def test_average1(self): self.run_test("def np_average1(a): from numpy import average ; return average(a,1)", numpy.arange(10).reshape(2,5), np_average1=[NDArray[int,:,:]]) def test_average2(self): self.run_test("def np_average2(a): from numpy import average ; return average(a,None, range(10))", numpy.arange(10), np_average2=[NDArray[int,:]]) def test_average3(self): self.run_test("def np_average3(a): from numpy import average ; return average(a,None, a)", numpy.arange(10).reshape(2,5), np_average3=[NDArray[int,:,:]]) def test_atleast_1d0(self): self.run_test("def np_atleast_1d0(a): from numpy import atleast_1d ; return atleast_1d(a)", 1, np_atleast_1d0=[int]) def test_atleast_1d1(self): self.run_test("def np_atleast_1d1(a): from numpy import atleast_1d ; r = atleast_1d(a) ; return r is a", numpy.arange(2), np_atleast_1d1=[NDArray[int,:]]) def test_atleast_2d0(self): self.run_test("def np_atleast_2d0(a): from numpy import atleast_2d ; return atleast_2d(a)", 1, np_atleast_2d0=[int]) def test_atleast_2d1(self): self.run_test("def np_atleast_2d1(a): from numpy import atleast_2d ; r = atleast_2d(a) ; return r is a", numpy.arange(2).reshape(1,2), np_atleast_2d1=[NDArray[int,:,:]]) def test_atleast_2d2(self): self.run_test("def np_atleast_2d2(a): from numpy import atleast_2d ; r = atleast_2d(a) ; return r", numpy.arange(2), np_atleast_2d2=[NDArray[int,:]]) def test_atleast_3d0(self): self.run_test("def np_atleast_3d0(a): from numpy import atleast_3d ; return atleast_3d(a)", 1, np_atleast_3d0=[int]) def test_atleast_3d1(self): self.run_test("def np_atleast_3d1(a): from numpy import atleast_3d ; r = atleast_3d(a) ; return r is a", numpy.arange(8).reshape(2,2,2), np_atleast_3d1=[NDArray[int,:,:,:]]) def test_atleast_3d2(self): self.run_test("def np_atleast_3d2(a): from numpy import atleast_3d ; r = atleast_3d(a) ; return r", numpy.arange(8).reshape(2,4), np_atleast_3d2=[NDArray[int,:,:]]) def test_atleast_3d3(self): self.run_test("def np_atleast_3d3(a): from numpy import atleast_3d ; r = atleast_3d(a) ; return r", numpy.arange(8), np_atleast_3d3=[NDArray[int,:]]) def test_asscalar0(self): self.run_test("def np_asscalar0(a): from numpy import asscalar; return asscalar(a)", numpy.array([1], numpy.int32), np_asscalar0=[NDArray[numpy.int32,:]]) def test_asscalar1(self): self.run_test("def np_asscalar1(a): from numpy import asscalar; return asscalar(a)", numpy.array([[1]], numpy.int64), np_asscalar1=[NDArray[numpy.int64,:,:]]) def test_ascontiguousarray0(self): self.run_test("def np_ascontiguousarray0(a):\n from numpy import ascontiguousarray\n return ascontiguousarray(a)", (1,2,3), np_ascontiguousarray0=[Tuple[int, int, int]]) def test_asarray_chkfinite0(self): self.run_test("def np_asarray_chkfinite0(a):\n from numpy import asarray_chkfinite\n return asarray_chkfinite(a)", (1,2,3), np_asarray_chkfinite0=[Tuple[int, int, int]]) def test_asarray_chkfinite1(self): self.run_test("def np_asarray_chkfinite1(a, x):\n from numpy import asarray_chkfinite\n try: return asarray_chkfinite(a)\n except ValueError: return asarray_chkfinite(x)", [[1.,2.],[numpy.nan,4.]], [[1.,2.],[3.,4.]], np_asarray_chkfinite1=[List[List[float]], List[List[float]]]) def test_asarray0(self): self.run_test("def np_asarray0(a):\n from numpy import asarray\n return asarray(a)", (1,2,3), np_asarray0=[Tuple[int, int, int]]) def test_asarray1(self): self.run_test("def np_asarray1(a):\n from numpy import asarray\n return asarray(a)", [(1,2),(3,4)], np_asarray1=[List[Tuple[int, int]]]) def test_asarray2(self): self.run_test("def np_asarray2(a):\n from numpy import asarray, int8\n return asarray(a, int8)", [1., 2., 3.], np_asarray2=[List[float]]) def test_asarray3(self): self.run_test("def np_asarray3(a):\n from numpy import asarray; b = asarray(a) ; return a is b", numpy.arange(3), np_asarray3=[NDArray[int,:]]) def test_asarray4(self): self.run_test("def np_asarray4(a):\n from numpy import asarray\n return asarray(a[1:])", [(1,2),(3,4)], np_asarray4=[List[Tuple[int, int]]]) def test_asarray5(self): self.run_test("def np_asarray5(a):\n from numpy import asarray\n return asarray(a)", 1., np_asarray5=[float]) def test_asarray6(self): self.run_test("def np_asarray6(a):\n from numpy import asarray\n return asarray(a, dtype=int)", 1.5, np_asarray6=[float]) def test_asfarray0(self): self.run_test("def np_asfarray0(a):\n from numpy import asfarray; b = asfarray(a) ; return a is b", numpy.arange(3.), np_asfarray0=[NDArray[float,:]]) def test_asfarray1(self): self.run_test("def np_asfarray1(a):\n from numpy import asfarray; b = asfarray(a) ; return a is not b", numpy.arange(3), np_asfarray1=[NDArray[int,:]]) def test_astype0(self): self.run_test("def np_astype0(a):\n return a.astype(float)", numpy.arange(3), np_astype0=[NDArray[int,:]]) def test_astype1(self): self.run_test("def np_astype1(a):\n import numpy as jumpy; return a.astype(jumpy.uint8)", numpy.arange(257), np_astype1=[NDArray[int,:]]) def test_array_str0(self): self.run_test("def np_array_str0(x): from numpy import array_str ; return array_str(x)", numpy.arange(3), np_array_str0=[NDArray[int,:]]) def test_array_split0(self): self.run_test("def np_array_split0(a): from numpy import array_split, array2string ; return list(map(array2string,array_split(a, 3)))", numpy.arange(12), np_array_split0=[NDArray[int,:]]) def test_array_split1(self): self.run_test("def np_array_split1(a): from numpy import array_split, array2string ; return list(map(array2string,array_split(a, 5)))", numpy.arange(12), np_array_split1=[NDArray[int,:]]) def test_array_split2(self): self.run_test("def np_array_split2(a): from numpy import array_split, array2string ; return list(map(array2string,array_split(a, 4)))", numpy.arange(12).reshape(6,2), np_array_split2=[NDArray[int,:,:]]) def test_array_split3(self): self.run_test("def np_array_split3(a): from numpy import array_split, array2string ; return list(map(array2string,array_split(a, [0,1,5])))", numpy.arange(12).reshape(6,2), np_array_split3=[NDArray[int,:,:]]) def test_array_equiv0(self): self.run_test("def np_array_equiv0(a): from numpy import array_equiv ; b = [1,2] ; return array_equiv(a,b)", [1, 2], np_array_equiv0=[List[int]]) def test_array_equiv1(self): self.run_test("def np_array_equiv1(a): from numpy import array_equiv ; b = [1,3] ; return array_equiv(a,b)", [1, 2], np_array_equiv1=[List[int]]) def test_array_equiv2(self): self.run_test("def np_array_equiv2(a): from numpy import array_equiv ; b = [[1,2],[1,2]] ; return array_equiv(a,b)", [1, 2], np_array_equiv2=[List[int]]) def test_array_equiv3(self): self.run_test("def np_array_equiv3(a): from numpy import array_equiv ; b = [[1,2],[1,3]] ; return array_equiv(a,b)", [1, 2], np_array_equiv3=[List[int]]) def test_array_equal0(self): self.run_test("def np_array_equal0(a): from numpy import array_equal ; b = [1,2] ; return array_equal(a,b)", [1, 2], np_array_equal0=[List[int]]) def test_array_equal1(self): self.run_test("def np_array_equal1(a): from numpy import array, array_equal ; b = array([1,2], dtype=int) ; return array_equal(a,b)", numpy.array([1,2]), np_array_equal1=[NDArray[int,:]]) def test_array_equal2(self): self.run_test("def np_array_equal2(a): from numpy import array, array_equal ; b = array([[1,2],[3,5]]) ; return array_equal(a,b)", numpy.array([[1,2],[3,5]]), np_array_equal2=[NDArray[int,:,:]]) def test_array_equal3(self): self.run_test("def np_array_equal3(a): from numpy import array, array_equal ; b = array([[1,2],[4,5]]) ; return array_equal(a,b)", numpy.array([[1,2],[3,5]]), np_array_equal3=[NDArray[int,:,:]]) def test_array_equal4(self): self.run_test("def np_array_equal4(a): from numpy import array, array_equal ; b = array([1,2,3]) ; return array_equal(a,b)", numpy. array([1,2]), np_array_equal4=[NDArray[int,:]]) def test_array2string0(self): self.run_test("def np_array2string0(x): from numpy import array2string ; return array2string(x)", numpy.arange(3), np_array2string0=[NDArray[int,:]]) def test_argwhere0(self): self.run_test("def np_argwhere0(x): from numpy import argwhere ; return argwhere(x)", numpy.arange(6), np_argwhere0=[NDArray[int,:]]) def test_argwhere1(self): self.run_test("def np_argwhere1(x): from numpy import argwhere ; return argwhere(x>8)", numpy.arange(6), np_argwhere1=[NDArray[int,:]]) def test_argwhere2(self): self.run_test("def np_argwhere2(x): from numpy import argwhere ; return argwhere(x>0)", numpy.arange(6).reshape(2,3), np_argwhere2=[NDArray[int,:,:]]) def test_around0(self): self.run_test("def np_around0(x): from numpy import around ; return around(x)", [0.37, 1.64], np_around0=[List[float]]) def test_around1(self): self.run_test("def np_around1(x): from numpy import around ; return around(x, 1)", [0.37, 1.64], np_around1=[List[float]]) def test_around2(self): """ Check rounding on the left side of comma. """ self.run_test(""" def np_around2(x): from numpy import around return around(x, -1)""", [37.4, 164.65], np_around2=[List[float]]) def test_around3(self): self.run_test("def np_around3(x): from numpy import around ; return around(x)", [.5, 1.5, 2.5, 3.5, 4.5], np_around3=[List[float]]) def test_around4(self): self.run_test("def np_around4(x): from numpy import around ; return around(x,1)", [1,2,3,11], np_around4=[List[int]]) def test_around5(self): self.run_test("def np_around5(x): from numpy import around ; return around(x,-1)", [1,2,3,11], np_around5=[List[int]]) def test_argsort0(self): self.run_test("def np_argsort0(x): from numpy import argsort ; return argsort(x)", numpy.array([3, 1, 2]), np_argsort0=[NDArray[int,:]]) def test_argsort1(self): self.run_test("def np_argsort1(x): return x.argsort()", numpy.array([[3, 1, 2], [1 , 2, 3]]), np_argsort1=[NDArray[int,:,:]]) def test_argmax0(self): self.run_test("def np_argmax0(a): return a.argmax()", numpy.arange(6).reshape(2,3), np_argmax0=[NDArray[int,:,:]]) def test_argmax1(self): self.run_test("def np_argmax1(a): from numpy import argmax ; return argmax(a+a)", numpy.arange(6).reshape(2,3), np_argmax1=[NDArray[int,:,:]]) def test_argmax2(self): self.run_test("def np_argmax2(a): from numpy import argmax ; return argmax(a, 0)", numpy.arange(30).reshape(2,3,5), np_argmax2=[NDArray[int,:,:,:]]) def test_argmin0(self): self.run_test("def np_argmin0(a): return a.argmin()", numpy.arange(6).reshape(2,3), np_argmin0=[NDArray[int,:,:]]) def test_argmin1(self): self.run_test("def np_argmin1(a): from numpy import argmin ; return argmin(a)", [1,2,3], np_argmin1=[List[int]]) def test_argmin2(self): self.run_test("def np_argmin2(a): from numpy import argmin ; return argmin(a, 0)", numpy.arange(30).reshape(2,3,5), np_argmin2=[NDArray[int,:,:,:]]) def test_argmin3(self): self.run_test("def np_argmin3(a): from numpy import argmin ; return argmin(a, 1)", numpy.arange(30).reshape(2,3,5), np_argmin3=[NDArray[int,:,:,:]]) def test_argmin4(self): self.run_test("def np_argmin4(a): from numpy import argmin ; return argmin(a, 2)", numpy.arange(30).reshape(2,3,5), np_argmin4=[NDArray[int,:,:,:]]) def test_argmin5(self): self.run_test("def np_argmin5(a): from numpy import argmin ; return argmin(a[None, :] + a[:, None], 0)", numpy.arange(30), np_argmin5=[NDArray[int,:]]) def test_argmin6(self): self.run_test("def np_argmin6(a): from numpy import argmin ; return argmin(a[None, :] + a[:, None], 1)", numpy.arange(30), np_argmin6=[NDArray[int,:]]) def test_append0(self): self.run_test("def np_append0(a): from numpy import append ; b = [[4, 5, 6], [7, 8, 9]] ; return append(a,b)", [1, 2, 3], np_append0=[List[int]]) def test_append1(self): self.run_test("def np_append1(a): from numpy import append,array ; b = array([[4, 5, 6], [7, 8, 9]]) ; return append(a,b)", [1, 2, 3], np_append1=[List[int]]) def test_append2(self): self.run_test("def np_append2(a): from numpy import append,array ; b = array([[4, 5, 6], [7, 8, 9]]) ; return append(a,b)", numpy.array([1, 2, 3]), np_append2=[NDArray[int,:]]) def test_append3(self): self.run_test("def np_append3(a): from numpy import append,array ; return append(a, 1)", numpy.array([1, 2, 3]), np_append3=[NDArray[int,:]]) def test_append4(self): self.run_test("def np_append4(a): from numpy import append ; b = 4 ; return append(a,b)", [1, 2, 3], np_append4=[List[int]]) def test_append5(self): self.run_test("def np_append5(a): from numpy import append,array ; return append(a, 1)", numpy.array([[1], [2], [3]]), np_append5=[NDArray[int,:, :]]) def test_append6(self): self.run_test("def np_append6(a): from numpy import append,array ; b = array([[4, 5, 6], [7, 8, 9]]) ; return append(a.T, b)", numpy.array([[1], [2], [3]]), np_append6=[NDArray[int,:, :]]) def test_append7(self): self.run_test("def np_append7(a): from numpy import append,array ; b = array([[4, 5, 6], [7, 8, 9]]) ; return append(a[0], b)", numpy.array([[1], [2], [3]]), np_append7=[NDArray[int,:, :]]) def test_append8(self): self.run_test("def np_append8(a): from numpy import append,array ; b = array([[4, 5, 6], [7, 8, 9]]) ; return append(a[:], b)", numpy.array([[1], [2], [3]]), np_append8=[NDArray[int,:, :]]) def test_angle0(self): self.run_test("def np_angle0(a): from numpy import angle ; return angle(a)", [1.0+0j, 1.0j, 1+1j, -1.0+0j, -1.0j, -1-1j], np_angle0=[List[complex]]) def test_angle1(self): self.run_test("def np_angle1(a): from numpy import angle ; return angle(a)", numpy.array([1.0+0j, 1.0j, 1+1j, -1.0+0j, -1.0j, -1-1j]), np_angle1=[NDArray[complex,:]]) def test_angle2(self): self.run_test("def np_angle2(a): from numpy import angle ; return angle(a,True)", 1 + 1j, np_angle2=[complex]) def test_angle3(self): self.run_test("def np_angle3(a): from numpy import angle ; return angle(a,True)", 1, np_angle3=[int]) def test_any0(self): self.run_test("def np_any0(a): from numpy import any ; return any(a)", numpy.array([[True, False], [True, True]]), np_any0=[NDArray[bool,:,:]]) def test_any1(self): self.run_test("def np_any1(a): from numpy import any ; return any(a, 0)", numpy.array([[True, False], [False, False]]), np_any1=[NDArray[bool,:,:]]) def test_any2(self): self.run_test("def np_any2(a): from numpy import any ; return any(a)", [-1, 0, 5], np_any2=[List[int]]) def test_any3(self): self.run_test("def np_any3(a): from numpy import any ; return any(a).any(0)", [-1, 0, 5], np_any3=[List[int]]) def test_any4(self): self.run_test("def np_any4(a): from numpy import any ; return any(a > 10)", numpy.array([-1, 0, 5]), np_any4=[NDArray[int, :]]) def test_any5(self): self.run_test("def np_any5(a): from numpy import any ; return any(a > 10.0)", numpy.array([-1.0, 0.0, 5.0]), np_any5=[NDArray[float,:]]) def test_any6(self): self.run_test("def np_any6(a): from numpy import any ; return any(a > 10.0)", numpy.array([[-1.0, 0.0], [5.0, 3.0]]), np_any6=[NDArray[float,:,:]]) def test_any7(self): self.run_test("def np_any7(a): from numpy import any ; return any(a)", numpy.array([[False, False], [False, False]]), np_any7=[NDArray[bool,:,:]]) def test_array1D_(self): self.run_test("def np_array1D_(a):\n from numpy import array\n return array(a)", [1,2,3], np_array1D_=[List[int]]) def test_array2D_(self): self.run_test("def np_array2D_(a):\n from numpy import array\n return array(a)", [[1,2],[3,4]], np_array2D_=[List[List[int]]]) def test_array_typed(self): self.run_test("def np_array_typed(a):\n from numpy import array, int64\n return array(a, int64)", [1.,2.,3.], np_array_typed=[List[float]]) def test_zeros_(self): self.run_test("def np_zeros_(a): from numpy import zeros; return zeros(a)", (10, 5), np_zeros_=[Tuple[int, int]]) def test_ones_(self): self.run_test("def np_ones_(a): from numpy import ones; return ones(a)", (10, 5), np_ones_=[Tuple[int, int]]) def test_full_0(self): self.run_test("def np_full_0(a): from numpy import full; return full(a, 1.5)", (10, 5), np_full_0=[Tuple[int, int]]) def test_full_1(self): self.run_test("def np_full_1(a): from numpy import full; return full(a, 1.5, dtype=int)", (10, 5), np_full_1=[Tuple[int, int]]) def test_full_2(self): self.run_test("def np_full_2(a): from numpy import full; return full(a, 1.5)", 10, np_full_2=[int]) def test_full_3(self): self.run_test("def np_full_3(a): from numpy import full; return full(a, 1.5, dtype=int)", 10, np_full_3=[int]) def test_flat_zeros_(self): self.run_test("def np_flat_zeros_(a): from numpy import zeros; return zeros(a)", 10, np_flat_zeros_=[int]) def test_flat_ones_(self): self.run_test("def np_flat_ones_(a): from numpy import ones; return ones(a)", 5, np_flat_ones_=[int]) def test_input_array_(self): self.run_test("import numpy\n\ndef input_array_(a):\n return a.shape", numpy.array([[1,2],[3,4]]), input_array_=[NDArray[int,:,:]]) def test_change_array1D_(self): """ Assign to lowest dimension of an array. """ self.run_test(""" def np_change_array1D_(a): a[0, 0, 0] = 36 return a""", numpy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]), np_change_array1D_=[NDArray[int,:,:,:]]) def test_str_(self): self.run_test("def np_str_(a): return str(a)", numpy.array([[[1,2],[3,4]],[[5,6],[7,8]]]), np_str_=[NDArray[int,:,:,:]]) def test_len_(self): self.run_test("def np_len_(a): return len(a)", numpy.array([[[1,2],[3,4]],[[5,6],[7,8]]]), np_len_=[NDArray[int,:,:,:]]) def test_empty_(self): self.run_test("def np_empty_(a):\n from numpy import empty\n a = empty(a)\n return a.strides, len(a)", (3, 2), np_empty_=[Tuple[int, int]]) def test_empty_uint_shape(self): self.run_test("def np_empty_uint_shape(a):\n from numpy import empty, uint8\n a = empty((uint8(a), uint8(a)))\n return a.strides, len(a)", 3, np_empty_uint_shape=[int]) def test_empty_integral_shape(self): self.run_test("def np_empty_integral_shape(n):\n from numpy import empty, uint8\n a = empty(5)\n return a.strides, len(a), n", 3, np_empty_integral_shape=[int]) def test_ones_uint_shape(self): self.run_test("def np_ones_uint_shape(a):\n from numpy import ones, uint32\n a = ones((uint32(a), uint32(a)))\n return a.strides, len(a)", 3, np_ones_uint_shape=[int]) def test_ones_integral_shape(self): self.run_test("def np_ones_integral_shape(n):\n from numpy import ones, uint8\n a = ones(5)\n return a.strides, len(a), n", 3, np_ones_integral_shape=[int]) def test_zeros_uint_shape(self): self.run_test("def np_zeros_uint_shape(a):\n from numpy import zeros, int32\n a = zeros((int32(a), int32(a)))\n return a.strides, len(a)", 3, np_zeros_uint_shape=[int]) def test_zeros_integral_shape(self): self.run_test("def np_zeros_integral_shape(n):\n from numpy import zeros, uint8\n a = zeros(5)\n return a.strides, len(a), n", 3, np_zeros_integral_shape=[int]) def test_empty_kwargs(self): self.run_test("def np_empty_kwargs(a):\n from numpy import empty\n a = empty(a, dtype=int)\n return a.strides, len(a)", (3, 2), np_empty_kwargs=[Tuple[int, int]]) def test_empty_kwargs2(self): self.run_test("def np_empty_kwargs2(a):\n from numpy import empty \n a = empty(a, dtype=float)\n return a.strides, a.shape", 3, np_empty_kwargs2=[int]) def test_arange(self): self.run_test("def np_arange_(a):\n from numpy import arange\n return arange(a, dtype=float)", 10, np_arange_=[int]) def test_arange1(self): self.run_test("def np_arange1_(a):\n from numpy import arange\n return arange(a, 10)", 1, np_arange1_=[int]) def test_arange2(self): self.run_test("def np_arange2_(a):\n from numpy import arange\n return arange(a, 10)", 0.5, np_arange2_=[float]) def test_arange3(self): self.run_test("def np_arange3_(a):\n from numpy import arange\n return arange(a, 9.5)", 0.5, np_arange3_=[float]) def test_arange4(self): self.run_test("def np_arange4_(a):\n from numpy import arange\n return arange(a, 9.3, 1)", 0.2, np_arange4_=[float]) def test_arange5(self): self.run_test("def np_arange5_(a):\n from numpy import arange\n return arange(a, 2, 0.3)", 1, np_arange5_=[int]) def test_arange6(self): self.run_test("def np_arange6_(a):\n from numpy import arange\n return arange(a, 3.3, 0.5)", 0.2, np_arange6_=[float]) def test_arange7(self): self.run_test("def np_arange7_(a):\n from numpy import arange\n return arange(a, 4.5, -0.2)", 1, np_arange7_=[int]) def test_arange8(self): self.run_test("def np_arange8_(a):\n from numpy import arange\n return arange(a, 1, -0.1)", 4.5, np_arange8_=[float]) def test_arange9(self): self.run_test("def np_arange9_(a):\n from numpy import arange\n return arange(a, -12, -3.2)", 4.5, np_arange9_=[float]) def test_arange10(self): self.run_test("def np_arange10_(a):\n from numpy import arange\n return arange(a, -5.5, -0.1)", -5, np_arange10_=[int]) def test_arange11(self): self.run_test("def np_arange11_(a):\n from numpy import arange, uint8\n return arange(a, 255, 1, uint8)", 0, np_arange11_=[int]) def test_arange12(self): self.run_test("def np_arange12_(a):\n from numpy import arange, float32\n return arange(a, 25, 1., float32)", 0, np_arange12_=[int]) def test_arange13(self): self.run_test("def np_arange12_(a):\n from numpy import arange, float32\n return arange(a, 25, dtype=float32)", 0, np_arange12_=[int]) def test_arange14(self): self.run_test("def np_arange14_(a):\n from numpy import arange, float32\n return 50000 * arange(a, 25, dtype=float32)", 0, np_arange14_=[int]) def test_arange15(self): self.run_test("def np_arange15_(a):\n from numpy import arange\n return arange(-4 * a, 1, 4)", 1, np_arange15_=[int]) def test_arange16(self): self.run_test("def np_arange16_(a):\n from numpy import arange\n return arange(4 * a, -1, -4)", 1, np_arange16_=[int]) def test_arange17(self): self.run_test("def np_arange17_(a):\n from numpy import arange\n return arange(4 * a, -1, -4)[:,None]", 1, np_arange17_=[int]) def test_linspace(self): self.run_test("def np_linspace_(a):\n from numpy import linspace\n return linspace(a,4,32)", 1, np_linspace_=[int]) def test_linspace1(self): self.run_test("def np_linspace1_(a):\n from numpy import linspace\n return linspace(a,32.5,2)", 0.4, np_linspace1_=[float]) def test_linspace2(self): self.run_test("def np_linspace2_(a):\n from numpy import linspace\n return linspace(a,32.5,32, False)", 0.4, np_linspace2_=[float]) def test_linspace3(self): self.run_test("def np_linspace3_(a):\n from numpy import linspace\n return linspace(1,a)", 4, np_linspace3_=[int]) def test_linspace4(self): self.run_test("def np_linspace4_(a):\n from numpy import linspace\n return linspace(1,a,dtype=int)", 4, np_linspace4_=[int]) def test_sin(self): self.run_test("def np_sin_(a):\n from numpy import sin\n return sin(a)", numpy.linspace(0,6), np_sin_=[NDArray[float,:]]) def test_pi(self): self.run_test("def np_pi_():\n from numpy import pi\n return pi", np_pi_=[]) def test_e(self): self.run_test("def np_e_():\n from numpy import e\n return e", np_e_=[]) def test_ones_like_(self): self.run_test("def np_ones_like_(a):\n from numpy import ones_like, array\n return ones_like(array(a))", [[i,j,k,l] for i in range(5) for j in range(4) for k in range(6) for l in range(8)], np_ones_like_=[List[List[int]]]) def test_full_like_0(self): self.run_test("def np_full_like_0(a):\n from numpy import full_like, array\n return full_like(array(a), fill_value=12.5)", [[i,j,k,l] for i in range(5) for j in range(4) for k in range(6) for l in range(8)], np_full_like_0=[List[List[int]]]) def test_full_like_1(self): self.run_test("def np_full_like_1(a):\n from numpy import full_like, array\n return full_like(array(a), fill_value=12.5, dtype=float)", [[i,j,k,l] for i in range(5) for j in range(4) for k in range(6) for l in range(8)], np_full_like_1=[List[List[int]]]) def test_zeros_like_(self): self.run_test("def np_zeros_like_(a):\n from numpy import zeros_like, array\n return zeros_like(array(a))", [[i,j,k,l] for i in range(5) for j in range(4) for k in range(6) for l in range(8)], np_zeros_like_=[List[List[int]]]) def test_empty_like_1(self): """ Check empty_like numpy function without specified dtype. """ code = """ def np_empty_like_1(a): from numpy import empty_like b = empty_like(a) for i in range(2): for j in range(3): for k in range(4): b[i, j, k] = i + j + k return b""" self.run_test(code, numpy.arange(2 * 3 * 4).reshape(2, 3, 4), np_empty_like_1=[NDArray[int,:,:,:]]) def test_empty_like_2(self): """ Check empty_like numpy function with specific dtype argument. """ code = """ def np_empty_like_2(a): from numpy import empty_like b = empty_like(a, dtype=float) for i in range(2): for j in range(3): for k in range(4): b[i, j, k] = i + j + k return b""" self.run_test(code, numpy.arange(2 * 3 * 4).reshape(2, 3, 4), np_empty_like_2=[NDArray[int,:,:,:]]) def test_reshape0(self): self.run_test("def np_reshape0(a): return a.reshape((2,5))", numpy.arange(10), np_reshape0=[NDArray[int,:]]) def test_reshape1(self): """Check reshaping with -1 argument.""" code = """ def np_reshape1(a): return a.reshape((2,-1))""" self.run_test(code, numpy.arange(10), np_reshape1=[NDArray[int,:]]) def test_reshape2(self): self.run_test("def np_reshape2(a): return a.reshape(5, 2)", numpy.arange(10), np_reshape2=[NDArray[int,:]]) def test_reshape3(self): self.run_test("def np_reshape3(a): return (1 + a.reshape(1, a.size)), (1 + a[None])", numpy.arange(10), np_reshape3=[NDArray[int,:]]) def test_reshape4(self): self.run_test("def np_reshape4(a): return (1 + a.reshape(5, -1)), (1 + a[None])", numpy.arange(10), np_reshape4=[NDArray[int,:]]) def test_reshape5(self): self.run_test("def np_reshape5(a): return a.reshape(-1)", numpy.random.random((10,2)), np_reshape5=[NDArray[float,:,:]]) def test_reshape6(self): code = "def test_reshape6(a): return a.reshape((-1,-1))" with self.assertRaises(ValueError): self.run_test(code, numpy.random.random((10,2)), test_reshape6=[NDArray[float,:,:]]) def test_reshape7(self): self.run_test("def np_reshape7(a): return a.reshape(-10)", numpy.random.random((10,2)), np_reshape7=[NDArray[float,:,:]]) def test_expand_dims1(self): code = """ import numpy def np_expand_dims1(a,axis): return numpy.expand_dims(a,axis)""" self.run_test(code, numpy.arange(10,dtype=float),0, np_expand_dims1=[NDArray[float,:],int]) self.run_test(code, numpy.arange(10,dtype=float),1, np_expand_dims1=[NDArray[float,:],int]) def test_expand_dims2(self): code = """ import numpy def np_expand_dims2(a,axis): return numpy.expand_dims(a,axis)""" self.run_test(code, numpy.random.randn(10,20),0, np_expand_dims2=[NDArray[float,:,:],int]) self.run_test(code, numpy.random.randn(10,20),1, np_expand_dims2=[NDArray[float,:,:],int]) self.run_test(code, numpy.random.randn(10,20),2, np_expand_dims2=[NDArray[float,:,:],int]) def test_expand_dims3(self): code = """ import numpy def np_expand_dims3(a,axis): return numpy.expand_dims(a,axis)""" self.run_test(code, numpy.random.randn(10,20,30),0, np_expand_dims3=[NDArray[float,:,:,:],int]) self.run_test(code, numpy.random.randn(10,20,30),1, np_expand_dims3=[NDArray[float,:,:,:],int]) self.run_test(code, numpy.random.randn(10,20,30),2, np_expand_dims3=[NDArray[float,:,:,:],int]) self.run_test(code, numpy.random.randn(10,20,30),3, np_expand_dims3=[NDArray[float,:,:,:],int]) def test_expand_dims4(self): code = ''' import numpy as np def test1(x): y = np.expand_dims(x, -1) return y def expand_dims4(x): A = np.array([-1.11312199, -0.99629629]) return test1(x - A)''' self.run_test(code, numpy.random.randn(4,3,2), expand_dims4=[NDArray[float,:,:,:]]) def test_duplicate(self): """Check array forwarded twice doesn't double free. """ code = """ def np_duplicate(a): return a, a""" self.run_test(code, numpy.arange(10), np_duplicate=[NDArray[int,:]]) def test_broadcast(self): """Check that ndarray returned twice doesn't double free. """ code = """ def np_broadcast(): import numpy a = numpy.arange(3) return a, a""" self.run_test(code, np_broadcast=[]) def test_broadcast_slice(self): """Check that slicing an expression involving a broadcast works.""" code = """ def np_broadcast_slice(n): import numpy a = numpy.arange(n).reshape(2, n//2) return (a + 1)[:,1:]""" self.run_test(code, 20, np_broadcast_slice=[int]) def test_broadcast_slice_again(self): """Check that slicing an expression involving a broadcast works.""" code = """ def np_broadcast_slice_again(n): import numpy a = numpy.arange(n).reshape(2, n//2) b = numpy.arange(n//2) return (a + b)[:,1:]""" self.run_test(code, 20, np_broadcast_slice_again=[int]) def test_broadcast_dup(self): """Check that ndarray returned twice doesn't double free (reshaping).""" code = """ def np_broadcast_dup(): import numpy a = numpy.arange(10) return a, a.reshape((2,5))""" self.run_test(code, np_broadcast_dup=[]) def test_reshape_expr(self): self.run_test("def np_reshape_expr(a): return (a + a).reshape((2,5))", numpy.ones(10), np_reshape_expr=[NDArray[float,:]]) def test_cumsum_(self): self.run_test("def np_cumsum_(a): return a.cumsum()", numpy.arange(10), np_cumsum_=[NDArray[int,:]]) def test_cumsum2_(self): self.run_test("def np_cumsum2_(a): return a.cumsum()", numpy.arange(10).reshape(2,5), np_cumsum2_=[NDArray[int,:,:]]) def test_cumsum3_(self): self.run_test("def np_cumsum3_(a): return a.cumsum(1)", numpy.arange(10).reshape(2,5), np_cumsum3_=[NDArray[int,:,:]]) def test_cumsum4_(self): self.run_test("def np_cumsum4_(a): return a.cumsum(0)", numpy.arange(10).reshape(2,5), np_cumsum4_=[NDArray[int,:,:]]) def test_cumsum5_(self): self.run_test("def np_cumsum5_(a): return a.cumsum(0)", numpy.arange(10), np_cumsum5_=[NDArray[int,:]]) def test_sum_(self): self.run_test("def np_sum_(a): return a.sum()", numpy.arange(10), np_sum_=[NDArray[int,:]]) def test_sum_of_bool(self): self.run_test("def np_sum_of_bool(a): return (a > 2).sum()", numpy.arange(10), np_sum_of_bool=[NDArray[int,:]]) def test_sum_of_bool2(self): self.run_test("def np_sum_of_bool2(a): return (a > 2).sum(axis=1)", numpy.arange(10).reshape(5,2), np_sum_of_bool2=[NDArray[int,:,:]]) def test_sum_scalar0(self): self.run_test("def np_sum_scalar0(a): return a.sum().sum()", numpy.arange(10), np_sum_scalar0=[NDArray[int,:]]) def test_sum_scalar1(self): self.run_test("def np_sum_scalar1(a): return a.sum().sum(0)", numpy.arange(10), np_sum_scalar1=[NDArray[int,:]]) def test_sum_neg_shape(self): self.run_test("def np_sum_neg_shape(a): return a.sum(axis=-1)", numpy.arange(10).reshape(5,2), np_sum_neg_shape=[NDArray[int,:,:]]) def test_matrix_power0(self): self.run_test( "def np_matrix_power0(a): from numpy.linalg import matrix_power; return matrix_power(a, 0)", numpy.ones((10,10)), np_matrix_power0=[NDArray[float,:,:]] ) def test_matrix_power1(self): self.run_test( "def np_matrix_power1(a): from numpy.linalg import matrix_power; return matrix_power(a, 1)", numpy.ones((10,10)), np_matrix_power1=[NDArray[float,:,:]] ) def test_matrix_power2(self): self.run_test( "def np_matrix_power2(a): from numpy.linalg import matrix_power; return matrix_power(a, 5)", numpy.ones((10,10)), np_matrix_power2=[NDArray[float,:,:]] ) pythran-0.10.0+ds2/pythran/tests/test_numpy_func3.py000066400000000000000000001074361416264035500224700ustar00rootroot00000000000000""" Tests for part of the numpy module. """ import unittest import numpy from pythran.typing import List, NDArray, Tuple from pythran.tests import TestEnv @TestEnv.module class TestNumpyFunc3(TestEnv): """ This module includes tests for multiple numpy module function. Tested functions are: - numpy.dot - numpy.digitize - numpy.diff - numpy.trace - numpy.tri - numpy.trim_zeros - numpy.triu - numpy.tril - numpy.unique - numpy.unwrap and various combinations of +/-/** and trigonometric operations. """ def test_dot0(self): self.run_test("def np_dot0(x, y): from numpy import dot; return dot(x, y)", 2, 3, np_dot0=[int, int]) def test_dot1(self): self.run_test("def np_dot1(x): from numpy import dot ; y = [2, 3] ; return dot(x,y)", [2, 3], np_dot1=[List[int]]) def test_dot2(self): self.run_test("def np_dot2(x): from numpy import dot ; y = [2j, 3j] ; return dot(x,y)", [2j, 3j], np_dot2=[List[complex]]) def test_dot3(self): self.run_test("def np_dot3(x): from numpy import array ; y = array([2, 3]) ; return y.dot(x+x)", numpy.array([2, 3]), np_dot3=[NDArray[int,:]]) def test_dot4a(self): self.run_test("def np_dot4a(x): from numpy import dot ; y = [2, 3] ; return dot(x,y)", numpy.array([2, 3]), np_dot4a=[NDArray[int,:]]) def test_dot4b(self): self.run_test("def np_dot4b(x): from numpy import dot ; y = [2., 3.] ; return dot(x[1:],y)", numpy.array([2, 3, 4], dtype=numpy.float32), np_dot4b=[NDArray[numpy.float32,:]]) def test_dot4c(self): self.run_test("def np_dot4c(x): from numpy import dot ; return dot(x[1:],x[:-1])", numpy.array([2, 3, 4], dtype=numpy.float64), np_dot4c=[NDArray[float,:]]) def test_dot4d(self): self.run_test("def np_dot4d(x): from numpy import dot ; return dot(x, x)", numpy.array([2j, 3j, 4.]), np_dot4d=[NDArray[complex,:]]) def test_dot4e(self): self.run_test("def np_dot4e(x): from numpy import dot ; y = (2.j, 3.j) ; return dot(x[:-1],y)", numpy.array([2.j, 3.j, 4.j], dtype=numpy.complex64), np_dot4e=[NDArray[numpy.complex64,:]]) def test_dot4f(self): self.run_test("def np_dot4f(x): from numpy import dot ; y = (1., 2., 3.) ; return dot(2*x, y)", numpy.array([2., 3., 4.]), np_dot4f=[NDArray[float,:]]) def test_dot5(self): """ Check for dgemm version of dot. """ self.run_test(""" def np_dot5(x, y): from numpy import dot return dot(x,y)""", [[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]], [[10., 11., 12.], [13., 14., 15.], [16., 17., 18.]], np_dot5=[List[List[float]], List[List[float]]]) def test_dot6(self): """ Check for dot with "no blas type". """ self.run_test(""" def np_dot6(x, y): from numpy import dot return dot(x,y)""", numpy.arange(9).reshape(3, 3), numpy.arange(9, 18).reshape(3, 3), np_dot6=[NDArray[int,:,:], NDArray[int,:,:]]) def test_dot7(self): """ Check for dgemm version of dot with rectangular shape. """ self.run_test(""" def np_dot7(x, y): from numpy import dot return dot(x,y)""", [[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]], [[10., 11., 12.], [13., 14., 15.], [16., 17., 18.]], np_dot7=[List[List[float]], List[List[float]]]) def test_dot8(self): """ Check for dot with "no blas type" with rectangulare shape. """ self.run_test(""" def np_dot8(x, y): from numpy import dot return dot(x,y)""", numpy.arange(6).reshape(3, 2), numpy.arange(6, 12).reshape(2, 3), np_dot8=[NDArray[int,:,:], NDArray[int,:,:]]) def test_dot9(self): """ Check for gemv version of dot. """ self.run_test(""" def np_dot9(x, y): from numpy import dot return dot(x,y)""", numpy.arange(9.).reshape(3, 3).tolist(), [float(x) for x in range(9, 12)], np_dot9=[List[List[float]], List[float]]) def test_dot10(self): """ Check for dot gemv with "no blas type". """ self.run_test(""" def np_dot10(x, y): from numpy import dot return dot(x,y)""", numpy.arange(9).reshape(3, 3), numpy.arange(9, 12), np_dot10=[NDArray[int,:,:], NDArray[int,:]]) def test_dot11(self): """ Check for gemv version of dot with rectangular shape. """ self.run_test(""" def np_dot11(x, y): from numpy import dot return dot(x,y)""", numpy.arange(6.).reshape(3, 2).tolist(), [float(x) for x in range(6, 8)], np_dot11=[List[List[float]], List[float]]) def test_dot12(self): """ Check for dot gemv with "no blas type" with rectangulare shape. """ self.run_test(""" def np_dot12(x, y): from numpy import dot return dot(x,y)""", numpy.arange(6).reshape(3, 2), numpy.arange(6, 8), np_dot12=[NDArray[int,:,:], NDArray[int,:]]) def test_dot13(self): """ Check for gevm version of dot. """ self.run_test(""" def np_dot13(x, y): from numpy import dot return dot(x,y)""", [float(x) for x in range(9, 12)], numpy.arange(9.).reshape(3, 3).tolist(), np_dot13=[List[float], List[List[float]]]) def test_dot14(self): """ Check for dot gevm with "no blas type". """ self.run_test(""" def np_dot14(x, y): from numpy import dot return dot(x,y)""", numpy.arange(9, 12), numpy.arange(9).reshape(3, 3), np_dot14=[NDArray[int,:], NDArray[int,:,:]]) def test_dot15(self): """ Check for gevm version of dot with rectangular shape. """ self.run_test(""" def np_dot15(x, y): from numpy import dot return dot(x,y)""", [float(x) for x in range(6, 9)], numpy.arange(6.).reshape(3, 2).tolist(), np_dot15=[List[float], List[List[float]]]) def test_dot16(self): """ Check for dot gevm with "no blas type" with rectangular shape. """ self.run_test(""" def np_dot16(x, y): from numpy import dot return dot(x,y)""", numpy.arange(6.).reshape(2, 3), numpy.arange(18.).reshape(3,6), np_dot16=[NDArray[float,:,:], NDArray[float,:,:]]) def test_dot17(self): """ Check for dot gevm with "no blas type" with rectangular shape, first arg transposed.""" self.run_test(""" def np_dot17(x, y): from numpy import dot return dot(x.T,y)""", numpy.arange(6.).reshape(3, 2), numpy.arange(18.).reshape(3,6), np_dot17=[NDArray[float,:,:], NDArray[float,:,:]]) def test_dot18(self): """ Check for dot gevm with "no blas type" with rectangular shape, second arg transposed""" self.run_test(""" def np_dot18(x, y): from numpy import dot return dot(x,y.T)""", numpy.arange(6.).reshape(2, 3), numpy.arange(18.).reshape(6,3), np_dot18=[NDArray[float,:,:], NDArray[float,:,:]]) def test_dot19(self): """ Check for dot gevm with "no blas type" with rectangular shape, both args transposed""" self.run_test(""" def np_dot19(x, y): from numpy import dot return dot(x.T,y.T)""", numpy.array(numpy.arange(6.).reshape(3, 2)), numpy.array(numpy.arange(18.).reshape(6,3)), np_dot19=[NDArray[float,:,:], NDArray[float,:,:]]) def test_dot20(self): ''' Mixed type: matrix x matrix''' self.run_test(""" def np_dot20(x, y): from numpy import dot return dot(x, y)""", numpy.array(numpy.arange(6.).reshape(2, 3), dtype=numpy.float32), numpy.array(numpy.arange(18.).reshape(3,6), dtype=numpy.float64), np_dot20=[NDArray[numpy.float32,:,:], NDArray[numpy.float64,:,:]]) def test_dot21(self): ''' Mixed type: matrix x vector''' self.run_test(""" def np_dot21(x, y): from numpy import dot return dot(x, y)""", numpy.array(numpy.arange(6.).reshape(2, 3), dtype=numpy.float32), numpy.array(numpy.arange(3.).reshape(3), dtype=numpy.float64), np_dot21=[NDArray[numpy.float32,:,:], NDArray[numpy.float64,:]]) def test_dot22(self): ''' Mixed type: matrix x vector''' self.run_test(""" def np_dot22(x, y): from numpy import dot return dot(y, x)""", numpy.array(numpy.arange(6.).reshape(3, 2), dtype=numpy.float32), numpy.array(numpy.arange(3.).reshape(3), dtype=numpy.float64), np_dot22=[NDArray[numpy.float32,:,:], NDArray[numpy.float64,:]]) def test_dot23(self): ''' Nd x 1d, N > 2''' self.run_test(""" def np_dot23(x, y): from numpy import dot return dot(x, y)""", numpy.array(numpy.arange(24.).reshape(4, 3, 2), dtype=numpy.float32), numpy.array(numpy.arange(2.).reshape(2), dtype=numpy.float64), np_dot23=[NDArray[numpy.float32,:,:,:], NDArray[numpy.float64,:]]) @unittest.skip("not implemented yet") def test_dot24(self): ''' Nd x 1d, N > 2''' self.run_test(""" def np_dot24(x, y): from numpy import dot return dot(x, y)""", numpy.array(numpy.arange(24.).reshape(4, 3, 2), dtype=numpy.float32), numpy.array(numpy.arange(24.).reshape(2,3,2,2), dtype=numpy.float64), np_dot24=[NDArray[numpy.float32,:,:,:], NDArray[numpy.float64,:,:,:,:]]) def test_vdot0(self): self.run_test(""" def np_vdot0(x, y): from numpy import vdot return vdot(x, y)""", numpy.array(numpy.arange(6.).reshape(3, 2), dtype=numpy.float32), numpy.array(numpy.arange(6.).reshape(6), dtype=numpy.float32), np_vdot0=[NDArray[numpy.float32,:,:], NDArray[numpy.float32,:]]) def test_vdot1(self): self.run_test(""" def np_vdot1(x, y): from numpy import vdot return vdot(x, y)""", numpy.array(numpy.arange(6.).reshape(3, 2), dtype=numpy.float32), numpy.array(numpy.arange(6.).reshape(6), dtype=numpy.float64), np_vdot1=[NDArray[numpy.float32,:,:], NDArray[numpy.float64,:]]) def test_vdot2(self): self.run_test(""" def np_vdot2(x, y): from numpy import vdot return vdot(x, y)""", numpy.array(numpy.arange(6.).reshape(3, 2), dtype=numpy.complex128), numpy.array(numpy.arange(6.).reshape(6), dtype=numpy.complex128), np_vdot2=[NDArray[numpy.complex128,:,:], NDArray[numpy.complex128,:]]) def test_vdot3(self): self.run_test(""" def np_vdot3(x, y): from numpy import vdot return vdot(x, y)""", numpy.array(numpy.arange(6.), dtype=numpy.complex128), numpy.array(numpy.arange(6.), dtype=numpy.complex128) * -1j, np_vdot3=[NDArray[numpy.complex128,:], NDArray[numpy.complex128,:]]) def test_digitize0(self): self.run_test("def np_digitize0(x): from numpy import array, digitize ; bins = array([0.0, 1.0, 2.5, 4.0, 10.0]) ; return digitize(x, bins)", numpy.array([0.2, 6.4, 3.0, 1.6]), np_digitize0=[NDArray[float,:]]) def test_digitize1(self): self.run_test("def np_digitize1(x): from numpy import array, digitize ; bins = array([ 10.0, 4.0, 2.5, 1.0, 0.0]) ; return digitize(x, bins)", numpy.array([0.2, 6.4, 3.0, 1.6]), np_digitize1=[NDArray[float,:]]) def test_diff0(self): self.run_test("def np_diff0(x): from numpy import diff; return diff(x)", numpy.array([1, 2, 4, 7, 0]), np_diff0=[NDArray[int,:]]) def test_diff1(self): self.run_test("def np_diff1(x): from numpy import diff; return diff(x,2)", numpy.array([1, 2, 4, 7, 0]), np_diff1=[NDArray[int,:]]) def test_diff2(self): self.run_test("def np_diff2(x): from numpy import diff; return diff(x)", numpy.array([[1, 3, 6, 10], [0, 5, 6, 8]]), np_diff2=[NDArray[int,:,:]]) def test_diff3(self): self.run_test("def np_diff3(x): from numpy import diff; return diff(x,2)", numpy.array([[1, 3, 6, 10], [0, 5, 6, 8]]), np_diff3=[NDArray[int,:,:]]) def test_diff4(self): self.run_test("def np_diff4(x): from numpy import diff; return diff(x + x)", numpy.array([1, 2, 4, 7, 0]), np_diff4=[NDArray[int,:]]) def test_diff5(self): self.run_test("def np_diff5(x): from numpy import diff; return diff(x + x, 2, axis=0)", numpy.arange(100).reshape(10, 10)*2, np_diff5=[NDArray[int,:,:]]) def test_diff6(self): self.run_test("def np_diff6(x): from numpy import diff; return diff(x, axis=0)", numpy.arange(100).reshape(10, 10)*2, np_diff6=[NDArray[int,:,:]]) def test_diff7(self): self.run_test("def np_diff7(x): from numpy import diff; return diff(x, axis=0)", numpy.arange(300).reshape(3, 10, 10)*2, np_diff7=[NDArray[int,:,:,:]]) def test_diff8(self): self.run_test("def np_diff8(x): from numpy import diff; return diff(x, axis=1)", numpy.arange(300).reshape(3, 10, 10)*2, np_diff8=[NDArray[int,:,:,:]]) def test_diff9(self): self.run_test("def np_diff9(x): from numpy import diff; return diff(x, axis=2)", numpy.arange(300).reshape(3, 10, 10)*2, np_diff9=[NDArray[int,:,:,:]]) def test_trace0(self): self.run_test("def np_trace0(x): return x.trace()", numpy.arange(9).reshape(3,3), np_trace0=[NDArray[int,:,:]]) def test_trace1(self): self.run_test("def np_trace1(x): from numpy import trace; return trace(x, 1)", numpy.arange(12).reshape(3,4), np_trace1=[NDArray[int,:,:]]) def test_trace2(self): self.run_test("def np_trace2(x): from numpy import trace; return trace(x, 1)", numpy.arange(12).reshape(3,4), np_trace2=[NDArray[int,:,:]]) def test_tri0(self): self.run_test("def np_tri0(a): from numpy import tri; return tri(a)", 3, np_tri0=[int]) def test_tri1(self): self.run_test("def np_tri1(a): from numpy import tri; return tri(a, 4)", 3, np_tri1=[int]) def test_tri2(self): self.run_test("def np_tri2(a): from numpy import tri; return tri(a, 3, -1)", 4, np_tri2=[int]) def test_tri3(self): self.run_test("def np_tri3(a): from numpy import tri, int64; return tri(a, 5, 1, int64)", 3, np_tri3=[int]) def test_trim_zeros0(self): self.run_test(""" def np_trim_zeros0(x): from numpy import array, trim_zeros return trim_zeros(x)""", numpy.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)), np_trim_zeros0=[NDArray[int,:]]) def test_trim_zeros1(self): self.run_test(""" def np_trim_zeros1(x): from numpy import array, trim_zeros return trim_zeros(x, "f")""", numpy.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)), np_trim_zeros1=[NDArray[int,:]]) def test_trim_zeros2(self): self.run_test(""" def np_trim_zeros2(x): from numpy import trim_zeros return trim_zeros(x, "b")""", numpy.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)), np_trim_zeros2=[NDArray[int,:]]) def test_triu0(self): self.run_test("def np_triu0(x): from numpy import triu; return triu(x)", numpy.arange(12).reshape(3,4), np_triu0=[NDArray[int,:,:]]) def test_triu1(self): self.run_test("def np_triu1(x): from numpy import triu; return triu(x, 1)", numpy.arange(12).reshape(3,4), np_triu1=[NDArray[int,:,:]]) def test_triu2(self): self.run_test("def np_triu2(x): from numpy import triu; return triu(x, -1)", numpy.arange(12).reshape(3,4), np_triu2=[NDArray[int,:,:]]) def test_tril0(self): self.run_test("def np_tril0(x): from numpy import tril; return tril(x)", numpy.arange(12).reshape(3,4), np_tril0=[NDArray[int,:,:]]) def test_tril1(self): self.run_test("def np_tril1(x): from numpy import tril; return tril(x, 1)", numpy.arange(12).reshape(3,4), np_tril1=[NDArray[int,:,:]]) def test_tril2(self): self.run_test("def np_tril2(x): from numpy import tril; return tril(x, -1)", numpy.arange(12).reshape(3,4), np_tril2=[NDArray[int,:,:]]) def test_union1d(self): self.run_test("def np_union1d(x): from numpy import arange, union1d ; y = arange(1,4); return union1d(x, y)", numpy.arange(-1,2), np_union1d=[NDArray[int,:]]) def test_unique0(self): self.run_test("def np_unique0(x): from numpy import unique ; return unique(x)", numpy.array([1,1,2,2,2,1,5]), np_unique0=[NDArray[int,:]]) def test_unique1(self): self.run_test("def np_unique1(x): from numpy import unique ; return unique(x)", numpy.array([[1,2,2],[2,1,5]]), np_unique1=[NDArray[int,:,:]]) def test_unique2(self): self.run_test("def np_unique2(x): from numpy import unique ; return unique(x, True)", numpy.array([1,1,2,2,2,1,5]), np_unique2=[NDArray[int,:]]) def test_unique3(self): self.run_test("def np_unique3(x): from numpy import unique ; return unique(x, True, True)", numpy.array([1,1,2,2,2,1,5]), np_unique3=[NDArray[int,:]]) def test_unique4(self): self.run_test("def np_unique4(x): from numpy import unique ; return unique(x, True, True, True)", numpy.array([1,1,2,2,2,1,5]), np_unique4=[NDArray[int,:]]) def test_unique5(self): self.run_test("def np_unique5(x): from numpy import unique ; return unique(x, False)", numpy.array([1,1,2,2,2,1,5]), np_unique5=[NDArray[int,:]]) def test_unique6(self): self.run_test("def np_unique6(x): from numpy import unique ; return unique(x, False, True)", numpy.array([1,1,2,2,2,1,5]), np_unique6=[NDArray[int,:]]) def test_unique7(self): self.run_test("def np_unique7(x): from numpy import unique ; return unique(x, False, False)", numpy.array([1,1,2,2,2,1,5]), np_unique7=[NDArray[int,:]]) def test_unique8(self): self.run_test("def np_unique8(x): from numpy import unique ; return unique(x, return_inverse=True)", numpy.array([1,1,2,2,2,1,5]), np_unique8=[NDArray[int,:]]) def test_unique9(self): self.run_test("def np_unique9(x): from numpy import unique ; return unique(x, True, False)", numpy.array([1,1,2,2,2,1,5]), np_unique9=[NDArray[int,:]]) def test_unique10(self): self.run_test("def np_unique10(x): from numpy import unique ; return unique(x, True, True, False)", numpy.array([1,1,2,2,2,1,5]), np_unique10=[NDArray[int,:]]) def test_unique11(self): self.run_test("def np_unique11(x): from numpy import unique ; return unique(x, True, False, False)", numpy.array([1,1,2,2,2,1,5]), np_unique11=[NDArray[int,:]]) def test_unique12(self): self.run_test("def np_unique12(x): from numpy import unique ; return unique(x, True, False, True)", numpy.array([1,1,2,2,2,1,5]), np_unique12=[NDArray[int,:]]) def test_unique13(self): self.run_test("def np_unique13(x): from numpy import unique ; return unique(x, False, True, False)", numpy.array([1,1,2,2,2,1,5]), np_unique13=[NDArray[int,:]]) def test_unique14(self): self.run_test("def np_unique14(x): from numpy import unique ; return unique(x, False, True, True)", numpy.array([1,1,2,2,2,1,5]), np_unique14=[NDArray[int,:]]) def test_unique15(self): self.run_test("def np_unique15(x): from numpy import unique ; return unique(x, False, False, False)", numpy.array([1,1,2,2,2,1,5]), np_unique15=[NDArray[int,:]]) def test_unique16(self): self.run_test("def np_unique16(x): from numpy import unique ; return unique(x, False, False, True)", numpy.array([1,1,2,2,2,1,5]), np_unique16=[NDArray[int,:]]) def test_unique17(self): self.run_test("def np_unique17(x): from numpy import unique ; return unique(x, return_counts=1)", numpy.array([1,1,2,2,2,1,5]), np_unique17=[NDArray[int,:]]) def test_unwrap0(self): self.run_test("def np_unwrap0(x): from numpy import unwrap, pi ; x[:3] += 2.6*pi; return unwrap(x)", numpy.arange(6, dtype=float), np_unwrap0=[NDArray[float,:]]) def test_unwrap1(self): self.run_test("def np_unwrap1(x): from numpy import unwrap, pi ; x[:3] += 2*pi; return unwrap(x, 4)", numpy.arange(6, dtype=float), np_unwrap1=[NDArray[float,:]]) def test_unwrap2(self): self.run_test("def np_unwrap2(x): from numpy import unwrap, pi ; x[:3] -= 2*pi; return unwrap(x, 4)", numpy.arange(6, dtype=float), np_unwrap2=[NDArray[float,:]]) def test_unravel_index_0(self): self.run_test("def np_unravel_index0(x, y): from numpy import unravel_index; return unravel_index(x, y)", 1621, (6, 7, 8, 9), np_unravel_index0=[int, Tuple[int, int, int, int]]) def test_unravel_index_1(self): self.run_test("def np_unravel_index1(x, y): from numpy import unravel_index; return unravel_index(x, y, 'F')", 1621, (6, 7, 8, 9), np_unravel_index1=[int, Tuple[int, int, int, int]]) def test_copyto_0(self): self.run_test("def np_copyto0(x, y): from numpy import copyto; copyto(x, y); return x", numpy.array([1,2]), numpy.array([3,4]), np_copyto0=[NDArray[int, :], NDArray[int, :]]) def test_copyto_1(self): self.run_test("def np_copyto1(x, y): from numpy import copyto; copyto(x, y); return x", numpy.array([[1,2], [7, 8]]), numpy.array([3,4]), np_copyto1=[NDArray[int, :, :], NDArray[int, :]]) def test_numpy_pow0(self): self.run_test('def numpy_pow0(a): return a ** 2', numpy.arange(100).reshape((10, 10)), numpy_pow0=[NDArray[int,:,:]]) def test_numpy_pow1(self): self.run_test('def numpy_pow1(a): return a ** 2', numpy.arange(100, dtype=float).reshape((10, 10)), numpy_pow1=[NDArray[float,:,:]]) def test_numpy_pow2(self): self.run_test('def numpy_pow2(a): return a ** 2.2', numpy.arange(100, dtype=float).reshape((10, 10)), numpy_pow2=[NDArray[float,:,:]]) def test_numpy_pow3(self): self.run_test('def numpy_pow3(a): return a ** -0.2', numpy.arange(100, dtype=int), numpy_pow3=[NDArray[int,:]]) def test_add0(self): self.run_test("def np_add0(a, b): return a + b", numpy.ones(10), numpy.ones(10), np_add0=[NDArray[float,:], NDArray[float,:]]) def test_add1(self): self.run_test("def np_add1(a, b): return a + b + a", numpy.ones(10), numpy.ones(10), np_add1=[NDArray[float,:], NDArray[float,:]]) def test_add2(self): self.run_test("def np_add2(a, b): return a + b + 1", numpy.ones(10), numpy.ones(10), np_add2=[NDArray[float,:], NDArray[float,:]]) def test_add3(self): self.run_test("def np_add3(a, b): return 1. + a + b + 1.", numpy.ones(10), numpy.ones(10), np_add3=[NDArray[float,:], NDArray[float,:]]) def test_add4(self): self.run_test("def np_add4(a, b): return ( a + b ) + ( a + b )", numpy.ones(10), numpy.ones(10), np_add4=[NDArray[float,:], NDArray[float,:]]) def test_add5(self): self.run_test("def np_add5(a, b): return (-a) + (-b)", numpy.ones(10), numpy.ones(10), np_add5=[NDArray[float,:], NDArray[float,:]]) def test_sub0(self): self.run_test("def np_sub0(a, b): return a - b", numpy.ones(10), numpy.ones(10), np_sub0=[NDArray[float,:], NDArray[float,:]]) def test_sub1(self): self.run_test("def np_sub1(a, b): return a - b - a", numpy.ones(10), numpy.ones(10), np_sub1=[NDArray[float,:], NDArray[float,:]]) def test_sub2(self): self.run_test("def np_sub2(a, b): return a - b - 1", numpy.ones(10), numpy.ones(10), np_sub2=[NDArray[float,:], NDArray[float,:]]) def test_sub3(self): self.run_test("def np_sub3(a, b): return 1. - a - b - 1.", numpy.ones(10), numpy.ones(10), np_sub3=[NDArray[float,:], NDArray[float,:]]) def test_sub4(self): self.run_test("def np_sub4(a, b): return ( a - b ) - ( a - b )", numpy.ones(10), numpy.ones(10), np_sub4=[NDArray[float,:], NDArray[float,:]]) def test_addsub0(self): self.run_test("def np_addsub0(a, b): return a - b + a", numpy.ones(10), numpy.ones(10), np_addsub0=[NDArray[float,:], NDArray[float,:]]) def test_addsub1(self): self.run_test("def np_addsub1(a, b): return a + b - a", numpy.ones(10), numpy.ones(10), np_addsub1=[NDArray[float,:], NDArray[float,:]]) def test_addsub2(self): self.run_test("def np_addsub2(a, b): return a + b - 1", numpy.ones(10), numpy.ones(10), np_addsub2=[NDArray[float,:], NDArray[float,:]]) def test_addsub3(self): self.run_test("def np_addsub3(a, b): return 1. + a - b + 1.", numpy.ones(10), numpy.ones(10), np_addsub3=[NDArray[float,:], NDArray[float,:]]) def test_addsub4(self): self.run_test("def np_addsub4(a, b): return ( a - b ) + ( a + b )", numpy.ones(10), numpy.ones(10), np_addsub4=[NDArray[float,:], NDArray[float,:]]) def test_addcossub0(self): self.run_test("def np_addcossub0(a, b): from numpy import cos ; return a - b + cos(a)", numpy.ones(10), numpy.ones(10), np_addcossub0=[NDArray[float,:], NDArray[float,:]]) def test_addcossub1(self): self.run_test("def np_addcossub1(a, b): from numpy import cos ; return a + cos(b - a)", numpy.ones(10), numpy.ones(10), np_addcossub1=[NDArray[float,:], NDArray[float,:]]) def test_addcossub2(self): self.run_test("def np_addcossub2(a, b): from numpy import cos ; return a + cos(b - 1)", numpy.ones(10), numpy.ones(10), np_addcossub2=[NDArray[float,:], NDArray[float,:]]) def test_addcossub3(self): self.run_test("def np_addcossub3(a, b): from numpy import cos ; return cos(1. + a - b + cos(1.))", numpy.ones(10), numpy.ones(10), np_addcossub3=[NDArray[float,:], NDArray[float,:]]) def test_addcossub4(self): self.run_test("def np_addcossub4(a, b): from numpy import cos ; return cos( a - b ) + ( a + b )", numpy.ones(10), numpy.ones(10), np_addcossub4=[NDArray[float,:], NDArray[float,:]]) def test_sin0(self): self.run_test("def np_sin0(a, b): from numpy import sin ; return sin(a) + b", numpy.ones(10), numpy.ones(10), np_sin0=[NDArray[float,:], NDArray[float,:]]) def test_tan0(self): self.run_test("def np_tan0(a, b): from numpy import tan ; return tan(a - b)", numpy.ones(10), numpy.ones(10), np_tan0=[NDArray[float,:], NDArray[float,:]]) def test_arccos0(self): self.run_test("def np_arccos0(a, b): from numpy import arccos ; return arccos(a - b) + 1", numpy.ones(10), numpy.ones(10), np_arccos0=[NDArray[float,:], NDArray[float,:]]) def test_arcsin0(self): self.run_test("def np_arcsin0(a, b): from numpy import arcsin ; return arcsin(a + b - a + -b) + 1.", numpy.ones(10), numpy.ones(10), np_arcsin0=[NDArray[float,:], NDArray[float,:]]) def test_arctan0(self): self.run_test("def np_arctan0(a, b): from numpy import arctan ; return arctan(a -0.5) + a", numpy.ones(10), numpy.ones(10), np_arctan0=[NDArray[float,:], NDArray[float,:]]) def test_arctan20(self): self.run_test("def np_arctan20(a, b): from numpy import arctan2 ; return b - arctan2(a , b)", numpy.ones(10), numpy.ones(10), np_arctan20=[NDArray[float,:], NDArray[float,:]]) def test_cos1(self): self.run_test("def np_cos1(a): from numpy import cos; return cos(a)", 5, np_cos1=[int]) def test_sin1(self): self.run_test("def np_sin1(a): from numpy import sin; return sin(a)", 0.5, np_sin1=[float]) def test_tan1(self): self.run_test("def np_tan1(a): from numpy import tan; return tan(a)", 0.5, np_tan1=[float]) def test_arccos1(self): self.run_test("def np_arccos1(a): from numpy import arccos ; return arccos(a)", 1, np_arccos1=[int]) def test_arcsin1(self): self.run_test("def np_arcsin1(a): from numpy import arcsin ; return arcsin(a)", 1, np_arcsin1=[int]) def test_arctan1(self): self.run_test("def np_arctan1(a): from numpy import arctan ; return arctan(a)", 0.5, np_arctan1=[float]) def test_arctan21(self): self.run_test("def np_arctan21(a): from numpy import arctan2 ; b = .5 ; return arctan2(a , b)", 1., np_arctan21=[float]) def test_negative_mod(self): self.run_test("def np_negative_mod(a): return a % 5", numpy.array([-1, -5, -2, 7]), np_negative_mod=[NDArray[int,:]]) def test_binary_op_list0(self): self.run_test("def np_binary_op_list0(n): return n + [1,2,3]", numpy.array([4,5,6]), np_binary_op_list0=[NDArray[int,:]]) def test_binary_op_list1(self): self.run_test("def np_binary_op_list1(n): return [1,2,3] + n", numpy.array([4,5,6]), np_binary_op_list1=[NDArray[int,:]]) def test_binary_op_list2(self): self.run_test("def np_binary_op_list2(n): return [[1],[2],[3]] + n", numpy.array([[4],[5],[6]]), np_binary_op_list2=[NDArray[int,:,:]]) def test_binary_op_array0(self): self.run_test("def np_binary_op_array0(n): return n + (1,2,3)", numpy.array([4,5,6]), np_binary_op_array0=[NDArray[int,:]]) def test_binary_op_array1(self): self.run_test("def np_binary_op_array1(n): return (1,2,3) + n", numpy.array([4,5,6]), np_binary_op_array1=[NDArray[int,:]]) def test_binary_op_array2(self): self.run_test("def np_binary_op_array2(n): return ((1,2,3),) + n", numpy.array([[4,5,6]]), np_binary_op_array2=[NDArray[int,:,:]]) def test_round_method(self): self.run_test("def np_round_method(a): return a.round()", numpy.array([[4.3,5.5,6.1]]), np_round_method=[NDArray[float,:,:]]) def test_list_imag0(self): self.run_test("def list_imag0(a): import numpy ; return numpy.imag(a)", numpy.array([complex(4.3,5.5),complex(6.1,3.2)]), list_imag0=[NDArray[complex,:]]) def test_list_imag1(self): self.run_test("def list_imag1(a): import numpy ; numpy.imag(a)[0] = 1; return a", numpy.array([complex(4.3,5.5),complex(6.1,3.2)]), list_imag1=[NDArray[complex,:]]) def test_list_real0(self): self.run_test("def list_real0(a): import numpy ; return numpy.real(a)", numpy.array([complex(4.3,5.5),complex(6.1,3.2)]), list_real0=[NDArray[complex,:]]) def test_list_real1(self): self.run_test("def list_real1(a): import numpy ; numpy.real(a)[0] = 1; return a", numpy.array([complex(4.3,5.5),complex(6.1,3.2)]), list_real1=[NDArray[complex,:]]) def test_fill_diagonal_0(self): self.run_test("def fill_diagonal_0(a): import numpy ; numpy.fill_diagonal(a, 0); return a", numpy.ones((4,4)), fill_diagonal_0=[NDArray[float,:,:]]) def test_fill_diagonal_1(self): self.run_test("def fill_diagonal_1(a): import numpy ; numpy.fill_diagonal(a, 0); return a", numpy.ones((4,6)), fill_diagonal_1=[NDArray[float,:,:]]) def test_fill_diagonal_2(self): self.run_test("def fill_diagonal_2(n): import numpy ; a = numpy.ones((n,n, 5));numpy.fill_diagonal(a[0], 0); return a", 4, fill_diagonal_2=[int]) def test_fill_diagonal_3(self): self.run_test("def fill_diagonal_3(n): import numpy ; a = numpy.ones((n, n, 2, 2));numpy.fill_diagonal(a[0,1:3], 0); return a", 4, fill_diagonal_3=[int]) def test_interp_0(self): self.run_test('def interp0(x,xp,fp): import numpy as np; return np.interp(x,xp,fp)', numpy.random.randn(100), numpy.sort(numpy.random.randn(1000)), numpy.random.randn(1000), interp0=[NDArray[float,:],NDArray[float,:],NDArray[float,:]]) def test_interp_1(self): self.run_test('def interp1(x,xp,fp): import numpy as np; return np.interp(x,xp,fp,-10.,10.)', numpy.random.randn(100), numpy.sort(numpy.random.randn(1000)), numpy.random.randn(1000), interp1=[NDArray[float,:],NDArray[float,:],NDArray[float,:]]) def test_interp_2(self): self.run_test('def interp2(x,xp,fp): import numpy as np; return np.interp(x,xp[::2],fp[::2],-10.,10.)', numpy.random.randn(100), numpy.sort(numpy.random.randn(1000)), numpy.random.randn(1000), interp2=[NDArray[float,:],NDArray[float,:],NDArray[float,:]]) def test_interp_3(self): self.run_test('def interp3(x,xp,fp): import numpy as np; return np.interp(x[::3],xp[::2],fp[::2],-10.)', numpy.random.randn(100), numpy.sort(numpy.random.randn(1000)), numpy.random.randn(1000), interp3=[NDArray[float,:],NDArray[float,:],NDArray[float,:]]) def test_interp_4(self): self.run_test('def interp4(x,xp,fp): import numpy as np; return np.interp(x,xp,fp,period=1.1)', numpy.random.randn(100), numpy.sort(numpy.random.randn(1000)), numpy.random.randn(1000), interp4=[NDArray[float,:],NDArray[float,:],NDArray[float,:]]) def test_interp_5(self): self.run_test('def interp5(x,factor): N = len(x); import numpy as np; return np.interp(np.arange(0, N - 1, factor), np.arange(N), x, None, None, None)', numpy.random.randn(100), 10., interp5=[NDArray[float,:],float]) def test_setdiff1d0(self): self.run_test('def setdiff1d0(x,y): import numpy as np; return np.setdiff1d(x,y)', numpy.random.randn(100), numpy.random.randn(1000), setdiff1d0=[NDArray[float,:],NDArray[float,:]]) def test_setdiff1d1(self): self.run_test('def setdiff1d0(x,y): import numpy as np; return np.setdiff1d(x,y, True)', numpy.unique(numpy.random.randn(1000)), numpy.unique(numpy.random.randn(1000)), setdiff1d0=[NDArray[float,:],NDArray[float,:]]) def test_setdiff1d2(self): self.run_test('def setdiff1d2(x,y): import numpy as np; return np.setdiff1d(x,y)', numpy.random.randn(100), numpy.array([[1,2],[2,4]]), setdiff1d2=[NDArray[float,:],NDArray[int,:,:]]) def test_setdiff1d3(self): self.run_test('def setdiff1d3(x,y): import numpy as np; return np.setdiff1d(x,y, True)', numpy.unique(numpy.random.randn(1000)), numpy.array([[3,2],[5,4]]), setdiff1d3=[NDArray[float,:],NDArray[int,:,:]]) pythran-0.10.0+ds2/pythran/tests/test_numpy_linalg.py000066400000000000000000000061061416264035500227100ustar00rootroot00000000000000import unittest from pythran.tests import TestEnv import numpy from pythran.typing import NDArray @TestEnv.module class TestNumpyLinalg(TestEnv): def test_linalg_norm0(self): self.run_test("def linalg_norm0(x): from numpy.linalg import norm ; return norm(x)", numpy.arange(6.), linalg_norm0=[NDArray[float,:]]) def test_linalg_norm1(self): self.run_test("def linalg_norm1(x): from numpy.linalg import norm ; return norm(x)", numpy.arange(6.).reshape(2,3), linalg_norm1=[NDArray[float,:,:]]) def test_linalg_norm2(self): self.run_test("def linalg_norm2(x): from numpy.linalg import norm ; from numpy import inf ; return norm(x, inf)", numpy.arange(6.), linalg_norm2=[NDArray[float,:]]) def test_linalg_norm3(self): self.run_test("def linalg_norm3(x): from numpy.linalg import norm ; from numpy import inf ; return norm(x, -inf)", numpy.arange(6.), linalg_norm3=[NDArray[float,:]]) def test_linalg_norm4(self): self.run_test("def linalg_norm4(x): from numpy.linalg import norm ; from numpy import inf ; return norm(x, 0)", numpy.arange(6.), linalg_norm4=[NDArray[float,:]]) def test_linalg_norm5(self): self.run_test("def linalg_norm5(x): from numpy.linalg import norm ; from numpy import inf ; return norm(x, ord=inf, axis=1)", (numpy.arange(9) - 4).reshape((3,3)), linalg_norm5=[NDArray[int,:,:]]) def test_linalg_norm6(self): self.run_test("def linalg_norm6(x): from numpy.linalg import norm ; from numpy import inf ; return norm(x, ord=5, axis=(0,))", (numpy.arange(9) - 4).reshape((3,3)), linalg_norm6=[NDArray[int,:,:]]) def test_linalg_norm7(self): self.run_test("def linalg_norm7(x): from numpy.linalg import norm ; return norm(x)", numpy.arange(6).reshape(2,3), linalg_norm7=[NDArray[int,:,:]]) def test_linalg_norm8(self): self.run_test("def linalg_norm8(x): from numpy.linalg import norm ; return norm(x)", numpy.arange(6).reshape(2,3) * 1j + 1, linalg_norm8=[NDArray[complex,:,:]]) def test_linalg_norm_pydoc(self): self.run_test(''' def linalg_norm_pydoc(x): import numpy as np from numpy import linalg as LA a = np.arange(9) - x b = a.reshape((3, 3)) c = np.array([[ 1, 2, 3], [-1, 1, x]]) return (LA.norm(a), LA.norm(b), LA.norm(a, np.Inf), #LA.norm(b, np.inf), LA.norm(a, -np.inf), #LA.norm(b, -np.inf), LA.norm(a, 1), #LA.norm(b, 1), LA.norm(a, -1), #LA.norm(b, -1), LA.norm(a, 2), #LA.norm(b, 2), LA.norm(a, -2), #LA.norm(b, -2), LA.norm(a, 3), LA.norm(a, -3), LA.norm(c, axis=0), LA.norm(c, axis=1), LA.norm(c, ord=1, axis=1), )''', 10, linalg_norm_pydoc=[int]) pythran-0.10.0+ds2/pythran/tests/test_numpy_random.py000066400000000000000000001766731416264035500227430ustar00rootroot00000000000000from pythran.tests import TestEnv import warnings warnings.filterwarnings("ignore",category=DeprecationWarning) @TestEnv.module class TestNumpyRandom(TestEnv): ########################################################################### #Tests for numpy.random.random ########################################################################### def test_numpy_random0(self): """ Check numpy random without arguments. """ self.run_test(""" def numpy_random0(n): from numpy.random import random s = sum(random() for x in range(n)) return (abs(s / n - .5) < .05)""", 10 ** 5, numpy_random0=[int]) def test_numpy_random1(self): """ Check numpy random with size argument. """ self.run_test(""" def numpy_random1(n): from numpy.random import random s = sum(random(size=n)) return (abs(s / n - .5) < .05)""", 10 ** 5, numpy_random1=[int]) def test_numpy_random2(self): """ Check numpy random with shape argument. """ self.run_test(""" def numpy_random2(n): from numpy.random import random from numpy import sum s = sum(random((n, n))) return (abs(s / (n * n) - .5) < .05)""", 10 ** 3, numpy_random2=[int]) def test_numpy_random3(self): """ Check numpy random with constant size argument. """ self.run_test(""" def numpy_random3(n): from numpy.random import random s = sum(random(10 ** 5)) return (abs(s / n - .5) < .05)""", 10 ** 5, numpy_random3=[int]) ########################################################################### #Tests for numpy.random.random_sample ########################################################################### def test_numpy_random_sample0(self): """ Check numpy random_sample without arguments. """ self.run_test(""" def numpy_random_sample0(n): from numpy.random import random_sample s = sum(random_sample() for x in range(n)) return (abs(s / n - .5) < .05)""", 10 ** 5, numpy_random_sample0=[int]) def test_numpy_random_sample1(self): """ Check numpy random_sample with size argument. """ self.run_test(""" def numpy_random_sample1(n): from numpy.random import random_sample s = sum(random_sample(size=n)) return (abs(s / n - .5) < .05)""", 10 ** 5, numpy_random_sample1=[int]) def test_numpy_random_sample2(self): """ Check numpy random_sample with shape argument. """ self.run_test(""" def numpy_random_sample2(n): from numpy.random import random_sample from numpy import sum s = sum(random_sample((n, n))) return (abs(s / (n * n) - .5) < .05)""", 10 ** 3, numpy_random_sample2=[int]) ########################################################################### #Tests for numpy.random.ranf ########################################################################### def test_numpy_ranf0(self): """ Check numpy ranf without arguments. """ self.run_test(""" def numpy_ranf0(n): from numpy.random import ranf from numpy import mean s = [ranf() for x in range(n)] return (abs(mean(s) - .5) < .05)""", 10 ** 5, numpy_ranf0=[int]) def test_numpy_ranf1(self): """ Check numpy ranf with size argument. """ self.run_test(""" def numpy_ranf1(n): from numpy.random import ranf from numpy import mean a = ranf(size=n) return (abs(mean(a) - .5) < .05)""", 10 ** 5, numpy_ranf1=[int]) def test_numpy_ranf2(self): """ Check numpy ranf with shape argument. """ self.run_test(""" def numpy_ranf2(n): from numpy.random import ranf from numpy import mean a = ranf((n, n)) return (abs(mean(a) - .5) < .05)""", 10 ** 3, numpy_ranf2=[int]) ########################################################################### #Tests for numpy.random.sample ########################################################################### def test_numpy_sample0(self): """ Check numpy sample without arguments. """ self.run_test(""" def numpy_sample0(n): from numpy.random import sample from numpy import mean s = [sample() for x in range(n)] return (abs(mean(s) - .5) < .05)""", 10 ** 5, numpy_sample0=[int]) def test_numpy_sample1(self): """ Check numpy sample with size argument. """ self.run_test(""" def numpy_sample1(n): from numpy.random import sample from numpy import mean s = sample(size=n) return (abs(mean(s) - .5) < .05)""", 10 ** 5, numpy_sample1=[int]) def test_numpy_sample2(self): """ Check numpy sample with shape argument. """ self.run_test(""" def numpy_sample2(n): from numpy.random import sample from numpy import mean s = sample((n, n)) return (abs(mean(s) - .5) < .05)""", 10 ** 3, numpy_sample2=[int]) ########################################################################### #Tests for numpy.random.rand ########################################################################### def test_numpy_rand0(self): """ Check numpy rand without arguments. """ self.run_test(""" def numpy_rand0(n): from numpy.random import rand s = sum(rand() for x in range(n)) return (abs(s / n - .5) < .05)""", 10 ** 5, numpy_rand0=[int]) def test_numpy_rand1(self): """ Check numpy rand with multiple arguments. """ self.run_test(""" def numpy_rand1(n): from numpy.random import rand from numpy import sum s = sum(rand(n, n)) return (abs(s / (n * n) - .5) < .05)""", 10 ** 3, numpy_rand1=[int]) ########################################################################### #Tests for numpy.random.binomial ########################################################################### def test_numpy_binomial0(self): code = """ def numpy_binomial0(n, p, size): from numpy.random import binomial from numpy import var a = [binomial(n, p) for x in range(size)] return (abs(float(sum(a))/size - n * p) < .05 and abs(var(a) - n*p*(1-p)) < .05) """ self.run_test(code, 10., .2, 10**5, numpy_binomial0=[float, float, int]) def test_numpy_binomial1(self): code = """ def numpy_binomial1(n, p, size): from numpy.random import binomial from numpy import var a=binomial(n, p, size) return (abs(float(sum(a))/size - n * p) < .05 and abs(var(a) - n*p*(1-p)) < .05) """ self.run_test(code, 7., .2, 10**5, numpy_binomial1=[float, float, int]) def test_numpy_binomial2(self): code = """ def numpy_binomial2(n, p, size): from numpy.random import binomial from numpy import sum, var a=binomial(n, p, (size, size)) return (abs(float(sum(a))/(size*size) - n * p) < .05 and abs(var(a) - n*p*(1-p)) < .05) """ self.run_test(code, 9., .2, 10**3, numpy_binomial2=[float, float, int]) def test_numpy_binomial_exception(self): code = """ def numpy_binomial_exception(): from numpy.random import binomial c = 0; try : binomial(-1, 0.1) except ValueError : c += 1 try : binomial(1, -1) except ValueError : c += 1 try : binomial(1, 10) except ValueError : c += 1 return c """ self.run_test(code, numpy_binomial_exception=[]) ########################################################################### #Tests for numpy.random.standard_normal ########################################################################### def test_numpy_standard_normal0(self): """ Check standard_normal without argument with mean and variance. """ code = """ def numpy_standard_normal0(size): from numpy.random import standard_normal from numpy import var, mean a = [standard_normal() for x in range(size)] print(mean(a)) return (abs(mean(a)) < .05 and abs(var(a) - 1) < .05) """ self.run_test(code, 10 ** 5, numpy_standard_normal0=[int]) def test_numpy_standard_normal1(self): """ Check standard_normal with size argument with mean and variance.""" code = """ def numpy_standard_normal1(size): from numpy.random import standard_normal from numpy import var, mean a = standard_normal(size) print(mean(a)) return (abs(mean(a)) < .05 and abs(var(a) - 1) < .05) """ self.run_test(code, 10 ** 5, numpy_standard_normal1=[int]) def test_numpy_standard_normal2(self): """Check standard_normal with shape argument with mean and variance.""" code = """ def numpy_standard_normal2(size): from numpy.random import standard_normal from numpy import mean, var a = standard_normal((size, size)) print(mean(a)) return (abs(mean(a)) < .05 and abs(var(a) - 1) < .05) """ self.run_test(code, 10 ** 3, numpy_standard_normal2=[int]) ########################################################################### #Tests for numpy.random.normal ########################################################################### def test_numpy_normal0(self): """ Check normal without argument with mean and variance. """ code = """ def numpy_normal0(size): from numpy.random import normal from numpy import var, mean a = [normal() for x in range(size)] print(mean(a)) return (abs(mean(a)) < .05 and abs(var(a) - 1) < .05) """ self.run_test(code, 10 ** 5, numpy_normal0=[int]) def test_numpy_normal0a(self): """ Check normal with 1 argument with mean and variance. """ code = """ def numpy_normal0a(size): from numpy.random import normal from numpy import var, mean a = [normal(3.) for x in range(size)] print(mean(a)) return (abs(mean(a)-3) < 0.05 and abs(var(a) - 1) < .05) """ self.run_test(code, 10 ** 5, numpy_normal0a=[int]) def test_numpy_normal0b(self): """ Check normal with 2 argument with mean and variance. """ code = """ def numpy_normal0b(size): from numpy.random import normal from numpy import var, mean, sqrt mu, sigma = 0, 0.1 a = normal(mu, sigma, size) print(mean(a)) return (abs(mu - mean(a)) < 0.05 and abs(sigma - sqrt(var(a,ddof=1))) < .05) """ self.run_test(code, 10 ** 5, numpy_normal0b=[int]) def test_numpy_normal1(self): """ Check normal with size argument with mean and variance.""" code = """ def numpy_normal1(size): from numpy.random import normal from numpy import var, mean a = normal(size=size) print(mean(a)) return (abs(mean(a)) < .05 and abs(var(a) - 1) < .05) """ self.run_test(code, 10 ** 5, numpy_normal1=[int]) def test_numpy_normal2(self): """Check normal with shape argument with mean and variance.""" code = """ def numpy_normal2(size): from numpy.random import normal from numpy import mean, var a = normal(size=(size, size)) print(mean(a)) return (abs(mean(a)) < .05 and abs(var(a) - 1) < .05) """ self.run_test(code, 10 ** 3, numpy_normal2=[int]) ########################################################################### #Tests for numpy.random.poisson ########################################################################### def test_numpy_poisson0(self): """ Check poisson without argument with mean and variance. """ code = """ def numpy_poisson0(size): from numpy.random import poisson from numpy import var, mean a = [poisson() for x in range(size)] print(mean(a)) return (abs(mean(a)-1) < .05 and abs(var(a) - 1) < .05) """ self.run_test(code, 10 ** 5, numpy_poisson0=[int]) def test_numpy_poisson0a(self): """ Check poisson with 1 argument with mean and variance. """ code = """ def numpy_poisson0a(size): from numpy.random import poisson from numpy import var, mean a = [poisson(3.) for x in range(size)] print(mean(a)) return (abs(mean(a)-3) < .05 and abs(var(a) - 3) < .05) """ self.run_test(code, 10 ** 5, numpy_poisson0a=[int]) def test_numpy_poisson0b(self): """ Check poisson with 2 argument with mean and variance. """ code = """ def numpy_poisson0b(size): from numpy.random import poisson from numpy import var, mean, sqrt lam = 10 a = poisson(lam, size) print(mean(a)) return (abs(mean(a)-lam) < 0.05 and abs(sqrt(lam) - sqrt(var(a,ddof=1))) < .05) """ self.run_test(code, 10 ** 5, numpy_poisson0b=[int]) def test_numpy_poisson1(self): """ Check poisson with size argument with mean and variance.""" code = """ def numpy_poisson1(size): from numpy.random import poisson from numpy import var, mean a = poisson(size=size) print(mean(a)) return (abs(mean(a)-1) < .05 and abs(var(a) - 1) < .05) """ self.run_test(code, 10 ** 5, numpy_poisson1=[int]) def test_numpy_poisson2(self): """Check poisson with shape argument with mean and variance.""" code = """ def numpy_poisson2(size): from numpy.random import poisson from numpy import mean, var a = poisson(size=(size, size)) print(mean(a)) return (abs(mean(a)-1) < .05 and abs(var(a) - 1) < .05) """ self.run_test(code, 10 ** 3, numpy_poisson2=[int]) ########################################################################### #Tests for numpy.random.randn ########################################################################### def test_numpy_randn0(self): """ Check numpy randn without arguments. """ self.run_test(""" def numpy_randn0(n): from numpy.random import randn from numpy import mean, var a = [randn() for x in range(n)] return (abs(mean(a)) < .05 and abs(var(a) - 1) < .05)""", 10 ** 5, numpy_randn0=[int]) def test_numpy_randn1(self): """ Check numpy randn with multiple arguments. """ self.run_test(""" def numpy_randn1(n): from numpy.random import randn from numpy import mean, var a = randn(n, n) return (abs(mean(a)) < .05 and abs(var(a) - 1) < .05)""", 10 ** 3, numpy_randn1=[int]) ########################################################################### #Tests for numpy.random.randint ########################################################################### def test_numpy_randint0(self): """ Check numpy randint with one argument. """ self.run_test(""" def numpy_randint0(n): from numpy.random import randint from numpy import mean, var a = [randint(11) for x in range(n)] return (abs(mean(a) - 5) < .05)""", 10 ** 5, numpy_randint0=[int]) def test_numpy_randint1(self): """ Check numpy randint with min/max argument. """ self.run_test(""" def numpy_randint1(n): from numpy.random import randint from numpy import mean, var a = [randint(10, 21) for x in range(n)] return (abs(mean(a) - 15) < .05)""", 10 ** 5, numpy_randint1=[int]) def test_numpy_randint2(self): """ Check numpy randint with size argument. """ self.run_test(""" def numpy_randint2(n): from numpy.random import randint from numpy import mean, var a = randint(10, 21, n) return (abs(mean(a) - 15) < .05)""", 10 ** 5, numpy_randint2=[int]) def test_numpy_randint3(self): """ Check numpy randint with shape argument. """ self.run_test(""" def numpy_randint3(n): from numpy.random import randint from numpy import mean, var a = randint(10, 21, (n, n)) return (abs(mean(a) - 15) < .05)""", 10 ** 3, numpy_randint3=[int]) def test_numpy_randint4(self): """ Check numpy randint with shape argument. """ self.run_test(""" def numpy_randint4(n): from numpy.random import randint from numpy import mean, var a = randint(10, size=n) return (abs(mean(a) - 5) < .05)""", 10 ** 3, numpy_randint4=[int]) ########################################################################### #Tests for numpy.random.random_integer ########################################################################### def test_numpy_random_integers0(self): """ Check numpy random_integers with one argument. """ self.run_test(""" def numpy_random_integers0(n): from numpy.random import random_integers from numpy import mean, var a = [random_integers(9) for x in range(n)] return all(0<=r<=9 for r in a)""", 3, numpy_random_integers0=[int]) def test_numpy_random_integers1(self): """ Check numpy random_integers with min/max argument. """ self.run_test(""" def numpy_random_integers1(n): from numpy.random import random_integers from numpy import mean, var a = [random_integers(10, 20) for x in range(n)] return all(10<=r<=20 for r in a)""", 3, numpy_random_integers1=[int]) def test_numpy_random_integers2(self): """ Check numpy random_integers with size argument. """ self.run_test(""" def numpy_random_integers2(n): from numpy.random import random_integers from numpy import mean, var a = random_integers(10, 20, n) return all(10<=r<=20 for r in a)""", 3, numpy_random_integers2=[int]) def test_numpy_random_integers3(self): """ Check numpy random_integers with shape argument. """ self.run_test(""" def numpy_random_integers3(n): from numpy.random import random_integers from numpy import mean, var a = random_integers(10, 20, (n, n)) return all(10<=r<=20 for r in a.flat)""", 3, numpy_random_integers3=[int]) ########################################################################### #Tests for numpy.random.choice ########################################################################### def test_numpy_random_choice0(self): """ Check numpy.random.choice with one int argument. """ self.run_test(""" def numpy_random_choice0(n): from numpy.random import choice from numpy import mean, var a = [choice(11) for _ in range(n)] return (abs(mean(a) - 5) < .05)""", 10 ** 5, numpy_random_choice0=[int]) def test_numpy_random_choice1(self): """ Check numpy.random.choice with one ndarray argument. """ self.run_test(""" def numpy_random_choice1(n): from numpy.random import choice from numpy import mean, var, arange a = [choice(arange(11)) for _ in range(n)] return (abs(mean(a) - 5) < .05)""", 10 ** 5, numpy_random_choice1=[int]) def test_numpy_random_choice2(self): """ Check numpy.random.choice with one numpy_expr argument. """ self.run_test(""" def numpy_random_choice2(n, x): from numpy.random import choice from numpy import mean, var, arange a = [choice(arange(11) + n) for _ in range(n)] return (abs(mean(a) - (5 + x)) < .05)""", 10 ** 5, 1, numpy_random_choice2=[int, int]) def test_numpy_random_choice3(self): """ Check numpy.random.choice with int and int argument. """ self.run_test(""" def numpy_random_choice3(n): from numpy.random import choice from numpy import mean, var, arange a = choice(11, n) return (abs(mean(a) - 5) < .05)""", 10 ** 5, numpy_random_choice3=[int]) def test_numpy_random_choice4(self): """ Check numpy.random.choice with int and tuple argument. """ self.run_test(""" def numpy_random_choice4(n): from numpy.random import choice from numpy import mean, var, arange a = choice(11, (n, n)) return (abs(mean(a) - 5) < .05)""", 10 ** 3, numpy_random_choice4=[int]) def test_numpy_random_choice5(self): """ Check numpy.random.choice with int, tuple and proba argument. """ self.run_test(""" def numpy_random_choice5(n): from numpy.random import choice from numpy import mean, var, arange a = choice(5, (n, n), True, [0.3, 0.3, 0.2, 0.1, 0.1]) return (abs(mean(a) - 1.4) < .05)""", 10 ** 3, numpy_random_choice5=[int]) def test_numpy_random_choice6(self): """ Check numpy.random.choice with int, int and proba argument. """ self.run_test(""" def numpy_random_choice6(n): from numpy.random import choice from numpy import mean, var, arange a = choice(5, n, True, [0.3, 0.3, 0.2, 0.1, 0.1]) return (abs(mean(a) - 1.4) < .05)""", 10 ** 5, numpy_random_choice6=[int]) def test_numpy_random_choice7(self): """ Check numpy.random.choice with ndarray and int argument. """ self.run_test(""" def numpy_random_choice7(n): from numpy.random import choice from numpy import mean, var, arange a = choice(arange(11), n) return (abs(mean(a) - 5) < .05)""", 10 ** 5, numpy_random_choice7=[int]) def test_numpy_random_choice8(self): """ Check numpy.random.choice with ndarray and tuple argument. """ self.run_test(""" def numpy_random_choice8(n): from numpy.random import choice from numpy import mean, var, arange a = choice(arange(11), (n, n)) return (abs(mean(a) - 5) < .05)""", 10 ** 3, numpy_random_choice8=[int]) def test_numpy_random_choice9(self): """Check numpy.random.choice with ndarray, tuple and proba argument.""" self.run_test(""" def numpy_random_choice9(n): from numpy.random import choice from numpy import mean, var, arange a = choice(arange(5), (n, n), True, [0.3, 0.3, 0.2, 0.1, 0.1]) return (abs(mean(a) - 1.4) < .05)""", 10 ** 3, numpy_random_choice9=[int]) def test_numpy_random_choice10(self): """ Check numpy.random.choice with ndarray, int and proba argument. """ self.run_test(""" def numpy_random_choice10(n): from numpy.random import choice from numpy import mean, var, arange a = choice(arange(5), n, True, [0.3, 0.3, 0.2, 0.1, 0.1]) return (abs(mean(a) - 1.4) < .05)""", 10 ** 5, numpy_random_choice10=[int]) ########################################################################### #Tests for numpy.random.bytes ########################################################################### def test_numpy_random_bytes1(self): """ Check numpy.random.bytes. """ self.run_test(""" def numpy_random_bytes1(n): from numpy.random import bytes from numpy import mean, fromstring, uint8, asarray a = bytes(n) return (abs(mean(asarray(fromstring(a, uint8), dtype=float)) - 127.5) < .05)""", 10 ** 8, numpy_random_bytes1=[int]) ########################################################################### #Tests for numpy.random.exponential ########################################################################### def test_numpy_exponential0(self): """ Check exponential without argument with mean and variance. """ code = """ def numpy_exponential0(size): from numpy.random import exponential from numpy import var, mean a = [exponential() for x in range(size)] return (abs(mean(a) -1) < .05 and abs(var(a) - 1) < .05) """ self.run_test(code, 10 ** 6, numpy_exponential0=[int]) def test_numpy_exponential0a(self): """ Check exponential with 1 argument with mean and variance. """ code = """ def numpy_exponential0a(size): from numpy.random import exponential from numpy import var, mean scale = 2. a = [exponential(scale) for x in range(size)] return (abs(mean(a) - scale) < 0.05 and abs(var(a) - scale**2) < .05) """ self.run_test(code, 10 ** 6, numpy_exponential0a=[int]) def test_numpy_exponential0b(self): """ Check exponential with 2 argument with mean and variance. """ code = """ def numpy_exponential0b(size): from numpy.random import exponential from numpy import var, mean, sqrt scale = 2 a = exponential(scale, size) return (abs(mean(a) - scale) < 0.05 and abs(var(a,ddof=1) - scale**2 ) < .05) """ self.run_test(code, 10 ** 6, numpy_exponential0b=[int]) def test_numpy_exponential1(self): """ Check exponential with size argument with mean and variance.""" code = """ def numpy_exponential1(size): from numpy.random import exponential from numpy import var, mean a = exponential(size=size) return (abs(mean(a) -1 )< .05 and abs(var(a) - 1) < .05) """ self.run_test(code, 10 ** 6, numpy_exponential1=[int]) def test_numpy_exponential2(self): """Check exponential with shape argument with mean and variance.""" code = """ def numpy_exponential2(size): from numpy.random import exponential from numpy import mean, var a = exponential(size=(size, size)) return (abs(mean(a)) -1 < .05 and abs(var(a) - 1) < .05) """ self.run_test(code, 10 ** 3, numpy_exponential2=[int]) ########################################################################### #Tests for numpy.random.chisquare ########################################################################### def test_numpy_chisquare0a(self): """ Check chisquare with 1 argument with mean and variance. """ code = """ def numpy_chisquare0a(size): from numpy.random import chisquare from numpy import var, mean df = 3. a = [chisquare(df) for x in range(size)] return (abs(mean(a) - df) < 0.05 and abs(var(a) - 2*df) < .05) """ self.run_test(code, 10 ** 6, numpy_chisquare0a=[int]) def test_numpy_chisquare0b(self): """ Check chisquare with 2 argument with mean and variance. """ code = """ def numpy_chisquare0b(size): from numpy.random import chisquare from numpy import var, mean, sqrt df = 2 a = chisquare(df, size) return (abs(mean(a) - df) < 0.05 and abs(var(a) - df*2 ) < .05) """ self.run_test(code, 10 ** 6, numpy_chisquare0b=[int]) def test_numpy_chisquare2(self): """Check chisquare with shape argument with mean and variance.""" code = """ def numpy_chisquare2(size): from numpy.random import chisquare from numpy import mean, var df = 1 a = chisquare(df, size=(size, size)) return (abs(mean(a)) - df < .05 and abs(var(a) - 2*df) < .05) """ self.run_test(code, 10 ** 3, numpy_chisquare2=[int]) ########################################################################### #Tests for numpy.random.gamma ########################################################################### def test_numpy_gamma0a(self): """ Check gamma with 1 argument with mean and variance. """ code = """ def numpy_gamma0a(size): from numpy.random import gamma from numpy import var, mean shape = 1 a = [gamma(3.) for x in range(size)] return (abs(mean(a)- shape) < 0.1 and abs(var(a) - shape) < .1) """ self.run_test(code, 10 ** 6, numpy_gamma0a=[int]) def test_numpy_gamma0b(self): """ Check gamma with 2 argument with mean and variance. """ code = """ def numpy_gamma0b(size): from numpy.random import gamma from numpy import var, mean, sqrt shape, scale = 1,2 a = gamma(shape, scale, size) return (abs(mean(a) - shape*scale) < 0.05 and abs(var(a) - shape*scale**2) < .05) """ self.run_test(code, 10 ** 6, numpy_gamma0b=[int]) def test_numpy_gamma2(self): """Check gamma with shape argument with mean and variance.""" code = """ def numpy_gamma2(size): from numpy.random import gamma from numpy import mean, var shape = 2 a = gamma(shape = shape, size=(size, size)) return (abs(mean(a) - shape) < .05 and abs(var(a) - shape) < .05) """ self.run_test(code, 10 ** 3, numpy_gamma2=[int]) ########################################################################### #Tests for numpy.random.weibull ########################################################################### def test_numpy_weibull0a(self): """ Check weibull with 1 argument with mean and variance. """ code = """ def numpy_weibull0a(size): from numpy.random import weibull from numpy import var, mean pa = 3. a = [weibull(pa) for x in range(size)] return (abs(mean(a) - pa) < 0.05 and abs(var(a) - 2*pa) < .05) """ self.run_test(code, 10 ** 6, numpy_weibull0a=[int]) def test_numpy_weibull0b(self): """ Check weibull with 2 argument with mean and variance. """ code = """ def numpy_weibull0b(size): from numpy.random import weibull from numpy import var, mean, sqrt pa = 2 a = weibull(pa, size) return (abs(mean(a) - pa) < 0.05 and abs(var(a) - pa*2 ) < .05) """ self.run_test(code, 10 ** 6, numpy_weibull0b=[int]) def test_numpy_weibull2(self): """Check weibull with shape argument with mean and variance.""" code = """ def numpy_weibull2(size): from numpy.random import weibull from numpy import mean, var pa = 1 a = weibull(pa, size=(size, size)) return (abs(mean(a)) - pa < .05 and abs(var(a) - 2*pa) < .05) """ self.run_test(code, 10 ** 3, numpy_weibull2=[int]) ########################################################################### #Tests for numpy.random.lognormal ########################################################################### def test_numpy_lognormal0(self): """ Check lognormal without argument with mean and variance. """ code = """ def numpy_lognormal0(size): from numpy.random import lognormal from numpy import var, mean, e a = [lognormal() for x in range(size)] m = 0 s = 1/2 rmean = e**(m+(s**2/2)) rvar = (e**(s**2) - 1)*e**(2*m+s**2) return (abs(mean(a) - rmean) < .1 and abs(var(a) - rvar) < .1) """ self.run_test(code, 10 ** 6, numpy_lognormal0=[int]) def test_numpy_lognormal0a(self): """ Check lognormal with 1 argument with mean and variance. """ code = """ def numpy_lognormal0a(size): from numpy.random import lognormal from numpy import var, mean, e m = 0 s = 1/5 a = [lognormal(m) for x in range(size)] rmean = e**(m+(s**2/2)) rvar = (e**(s**2) - 1)*e**(2*m+s**2) return (abs(mean(a)- rmean) < 0.1 and abs(var(a) - rvar) < .1) """ self.run_test(code, 10 ** 6, numpy_lognormal0a=[int]) def test_numpy_lognormal0b(self): """ Check lognormal with 2 argument with mean and variance. """ code = """ def numpy_lognormal0b(size): from numpy.random import lognormal from numpy import var, mean, e m = 1 s = 1/8 a = lognormal(m, s, size) rmean = e**(m+(s**2/2)) rvar = (e**(s**2) - 1)*e**(2*m+s**2) return (abs(mean(a) - rmean) < 0.1 and abs(var(a) - rvar) < .1) """ self.run_test(code, 10 ** 6, numpy_lognormal0b=[int]) def test_numpy_lognormal1(self): """ Check lognormal with size argument with mean and variance.""" code = """ def numpy_lognormal1(size): from numpy.random import lognormal from numpy import var, mean, e m = 0 s = 1/4 rmean = e**(m+(s**2/2)) rvar = (e**(s**2) - 1)*e**(2*m+s**2) a = lognormal(size=size) return (abs(mean(a) - rmean) < .1 and abs(var(a) - rvar) < .1) """ self.run_test(code, 10 ** 6, numpy_lognormal1=[int]) def test_numpy_lognormal2(self): """Check lognormal with shape argument with mean and variance.""" code = """ def numpy_lognormal2(size): from numpy.random import lognormal from numpy import mean, var, e m = 2 s = 1/2 rmean = e**(m+(s**2/2)) rvar = (e**(s**2) - 1)*e**(2*m+s**2) a = lognormal(size=(size, size)) return (abs(mean(a) - rmean) < .1 and abs(var(a) - rvar) < .1) """ self.run_test(code, 10 ** 3, numpy_lognormal2=[int]) ########################################################################### #Tests for numpy.random.geometric ########################################################################### def test_numpy_geometric0a(self): """ Check geometric with 1 argument with mean and variance. """ code = """ def numpy_geometric0a(size): from numpy.random import geometric from numpy import var, mean a = [geometric(0.6) for x in range(size)] return (abs(mean(a)- 2) < .05 and abs(var(a) - 3) < 1/8) """ self.run_test(code, 10 ** 6, numpy_geometric0a=[int]) def test_numpy_geometric0b(self): """ Check geometric with 2 argument with mean and variance. """ code = """ def numpy_geometric0b(size): from numpy.random import geometric from numpy import var, mean, sqrt p = 0.25 a = geometric(p, size) return (abs(mean(a)- 4) < 0.05 and abs(sqrt(p) - sqrt(var(a,ddof=1))) < 1/64) """ self.run_test(code, 10 ** 6, numpy_geometric0b=[int]) def test_numpy_geometric2(self): """Check geometric with shape argument with mean and variance.""" code = """ def numpy_geometric2(size): from numpy.random import geometric from numpy import mean, var p = 0.5 a = geometric(p, size=(size, size)) return (abs(mean(a)-2) < .05 and abs(var(a) - 1) < 1/8) """ self.run_test(code, 10 ** 3, numpy_geometric2=[int]) ########################################################################### #Tests for numpy.random.pareto ########################################################################### def test_numpy_pareto0a(self): """ Check pareto with 1 argument with mean and variance. """ code = """ def numpy_pareto0a(size): from numpy.random import pareto from numpy import var, mean alpha = 10 rvar = alpha/((alpha-1)**2*(alpha-2)) a = [pareto(alpha) for x in range(size)] return (abs(mean(a)- 0.5) < .05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 6, numpy_pareto0a=[int]) def test_numpy_pareto0b(self): """ Check pareto with 2 argument with mean and variance. """ code = """ def numpy_pareto0b(size): from numpy.random import pareto from numpy import var, mean, sqrt alpha = 6 rvar = alpha/((alpha-1)**2*(alpha-2)) a = pareto(alpha, size) return (abs(mean(a)- 0.5) < 0.05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 6, numpy_pareto0b=[int]) def test_numpy_pareto2(self): """Check pareto with shape argument with mean and variance.""" code = """ def numpy_pareto2(size): from numpy.random import pareto from numpy import mean, var alpha = 5 rvar = alpha/((alpha-1)**2*(alpha-2)) a = pareto(alpha, size=(size, size)) return (abs(mean(a)- 0.5) < .05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 3, numpy_pareto2=[int]) ########################################################################### #Tests for numpy.random.power ########################################################################### def test_numpy_power0a(self): """ Check power with 1 argument with mean and variance. """ code = """ def numpy_power0a(size): from numpy.random import power from numpy import var, mean alpha = 1 rmean = alpha / (alpha + 1) rvar = alpha/((alpha+1)**2*(alpha+2)) a = [power(alpha) for x in range(size)] return (abs(mean(a)- rmean) < .05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 6, numpy_power0a=[int]) def test_numpy_power0b(self): """ Check power with 2 argument with mean and variance. """ code = """ def numpy_power0b(size): from numpy.random import power from numpy import var, mean, sqrt alpha = 1 rmean = alpha / (alpha + 1) rvar = alpha/((alpha+1)**2*(alpha+2)) a = power(alpha, size) return (abs(mean(a)- rmean) < 0.05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 6, numpy_power0b=[int]) def test_numpy_power2(self): """Check power with shape argument with mean and variance.""" code = """ def numpy_power2(size): from numpy.random import power from numpy import mean, var alpha = 1 rmean = alpha / (alpha + 1) rvar = alpha/((alpha+1)**2*(alpha+2)) a = power(alpha, size=(size, size)) return (abs(mean(a)- rmean) < .05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 3, numpy_power2=[int]) ########################################################################### #Tests for numpy.random.rayleigh ########################################################################### def test_numpy_rayleigh0(self): """ Check rayleigh without argument with mean and variance. """ code = """ def numpy_rayleigh0(size): from numpy.random import rayleigh from numpy import var, mean, sqrt, pi a = [rayleigh() for x in range(size)] s = 2 rmean = s*sqrt(pi/2) rvar = ((4-pi)/2)*s**2 return (abs(mean(a)-rmean) < .05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 5, numpy_rayleigh0=[int]) def test_numpy_rayleigh0a(self): """ Check rayleigh with 1 argument with mean and variance. """ code = """ def numpy_rayleigh0a(size): from numpy.random import rayleigh from numpy import var, mean, sqrt, pi s = 2 a = [rayleigh(s) for x in range(size)] rmean = s*sqrt(pi/2) rvar = ((4-pi)/2)*s**2 return (abs(mean(a)-rmean) < .05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 5, numpy_rayleigh0a=[int]) def test_numpy_rayleigh0b(self): """ Check rayleigh with 2 argument with mean and variance. """ code = """ def numpy_rayleigh0b(size): from numpy.random import rayleigh from numpy import var, mean, sqrt, pi s = 2 a = rayleigh(s, size) rmean = s*sqrt(pi/2) rvar = ((4-pi)/2)*s**2 return (abs(mean(a)-rmean) < .05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 5, numpy_rayleigh0b=[int]) def test_numpy_rayleigh1(self): """ Check rayleigh with size argument with mean and variance.""" code = """ def numpy_rayleigh1(size): from numpy.random import rayleigh from numpy import var, mean, sqrt, pi a = rayleigh(size=size) s = 2 rmean = s*sqrt(pi/2) rvar = ((4-pi)/2)*s**2 return (abs(mean(a)-rmean) < .05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 5, numpy_rayleigh1=[int]) def test_numpy_rayleigh2(self): """Check rayleigh with shape argument with mean and variance.""" code = """ def numpy_rayleigh2(size): from numpy.random import rayleigh from numpy import mean, var, sqrt, pi a = rayleigh(size=(size, size)) s = 2 rmean = s*sqrt(pi/2) rvar = ((4-pi)/2)*s**2 return (abs(mean(a)-rmean) < .05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 3, numpy_rayleigh2=[int]) ########################################################################### #Tests for numpy.random.f ########################################################################### def test_numpy_f0a(self): """ Check f with 2 argument with mean and variance. """ code = """ def numpy_f0a(size): from numpy.random import f from numpy import var, mean dfnum = 50 dfden = 50 rmean = dfden / (dfden - 2) rvar = (2 * dfden**2 *( dfnum + dfden -2))/(dfnum * (dfden -2)**2 * (dfden -4)) a = [f(dfnum, dfden) for x in range(size)] return (abs(mean(a)- rmean) < 0.1 and abs(var(a) - rvar) < .1) """ self.run_test(code, 10 ** 6, numpy_f0a=[int]) def test_numpy_f0b(self): """ Check f with 2 argument with mean and variance. """ code = """ def numpy_f0b(size): from numpy.random import f from numpy import var, mean dfnum = 50 dfden = 50 rmean = dfden / (dfden - 2) rvar = (2 * dfden**2 *( dfnum + dfden -2))/(dfnum * (dfden -2)**2 * (dfden -4)) a = f(dfnum, dfden, size) return (abs(mean(a) - rmean) < 0.1 and abs(var(a) - rvar) < .1) """ self.run_test(code, 10 ** 6, numpy_f0b=[int]) def test_numpy_f2(self): """Check f with shape argument with mean and variance.""" code = """ def numpy_f2(size): from numpy.random import f from numpy import mean, var dfnum = 50 dfden = 50 rmean = dfden / (dfden - 2) rvar = (2 * dfden**2 *( dfnum + dfden -2))/(dfnum * (dfden -2)**2 * (dfden -4)) a = f(dfnum, dfden, size=(size, size)) return (abs(mean(a) - rmean) < .1 and abs(var(a) - rvar) < .1) """ self.run_test(code, 10 ** 3, numpy_f2=[int]) ########################################################################### #Tests for numpy.random.negative_binomial ########################################################################### def test_numpy_negative_binomial0a(self): """ Check negative_binomial with 1 argument with mean and variance. """ code = """ def numpy_negative_binomial0a(size): from numpy.random import negative_binomial from numpy import var, mean n = 1 p = 1 rmean = (n*(1-p))/p rvar = (n*(1-p))/p**2 a = [negative_binomial(n,p) for x in range(size)] return (abs(mean(a)- rmean) < .05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 6, numpy_negative_binomial0a=[int]) def test_numpy_negative_binomial0b(self): """ Check negative_binomial with 2 argument with mean and variance. """ code = """ def numpy_negative_binomial0b(size): from numpy.random import negative_binomial from numpy import var, mean, sqrt n = 1 p = 1 rmean = (n*(1-p))/p rvar = (n*(1-p))/p**2 a = negative_binomial(n, p, size) return (abs(mean(a)- rmean) < 0.05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 6, numpy_negative_binomial0b=[int]) def test_numpy_negative_binomial2(self): """Check negative_binomial with shape argument with mean and variance.""" code = """ def numpy_negative_binomial2(size): from numpy.random import negative_binomial from numpy import mean, var n = 1 p = 1 rmean = (n*(1-p))/p rvar = (n*(1-p))/p**2 a = negative_binomial(n, p , size=(size, size)) return (abs(mean(a)- rmean) < .05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 3, numpy_negative_binomial2=[int]) ########################################################################### #Tests for numpy.random.standard_exponential ########################################################################### def test_numpy_standard_exponential0(self): """ Check standard_exponential without argument with mean and variance. """ code = """ def numpy_standard_exponential0(size): from numpy.random import standard_exponential from numpy import var, mean a = [standard_exponential() for x in range(size)] return (abs(mean(a) - 1) < .05 and abs(var(a) - 1) < .05) """ self.run_test(code, 10 ** 5, numpy_standard_exponential0=[int]) def test_numpy_standard_exponential1(self): """ Check standard_exponential with size argument with mean and variance.""" code = """ def numpy_standard_exponential1(size): from numpy.random import standard_exponential from numpy import var, mean a = standard_exponential(size) return (abs(mean(a) - 1) < .05 and abs(var(a) - 1) < .05) """ self.run_test(code, 10 ** 5, numpy_standard_exponential1=[int]) def test_numpy_standard_exponential2(self): """Check standard_exponential with shape argument with mean and variance.""" code = """ def numpy_standard_exponential2(size): from numpy.random import standard_exponential from numpy import mean, var a = standard_exponential((size, size)) return (abs(mean(a) - 1) < .05 and abs(var(a) - 1) < .05) """ self.run_test(code, 10 ** 3, numpy_standard_exponential2=[int]) ########################################################################### #Tests for numpy.random.standard_gamma ########################################################################### def test_numpy_standard_gamma0(self): """ Check standard_gamma without argument with mean and variance. """ code = """ def numpy_standard_gamma0(size): from numpy.random import standard_gamma from numpy import var, mean a = [standard_gamma(1) for x in range(size)] return (abs(mean(a) - 1) < .05 and abs(var(a) - 1) < .05) """ self.run_test(code, 10 ** 5, numpy_standard_gamma0=[int]) def test_numpy_standard_gamma1(self): """ Check standard_gamma with size argument with mean and variance.""" code = """ def numpy_standard_gamma1(size): from numpy.random import standard_gamma from numpy import var, mean a = standard_gamma(2, size) return (abs(mean(a) - 2) < .05 and abs(var(a) - 2) < .05) """ self.run_test(code, 10 ** 5, numpy_standard_gamma1=[int]) def test_numpy_standard_gamma2(self): """Check standard_gamma with shape argument with mean and variance.""" code = """ def numpy_standard_gamma2(size): from numpy.random import standard_gamma from numpy import mean, var a = standard_gamma(3, (size, size)) return (abs(mean(a) - 3) < .05 and abs(var(a) - 3) < .05) """ self.run_test(code, 10 ** 3, numpy_standard_gamma2=[int]) ########################################################################### #Tests for numpy.random.gumbel ########################################################################### def test_numpy_gumbel0(self): """ Check gumbel without argument with mean and variance. """ code = """ def numpy_gumbel0(size): from numpy.random import gumbel from numpy import var, mean, pi u = 0. rmean = u + 0.57721 rvar = (pi**2/6) a = [gumbel() for x in range(size)] return (abs(mean(a) - rmean) < .05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 6, numpy_gumbel0=[int]) def test_numpy_gumbel0a(self): """ Check gumbel with 1 argument with mean and variance. """ code = """ def numpy_gumbel0a(size): from numpy.random import gumbel from numpy import var, mean, pi u = 1 rmean = u + 0.57721 rvar = (pi**2/6) a = [gumbel(u) for x in range(size)] return (abs(mean(a) - rmean ) < 0.05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 6, numpy_gumbel0a=[int]) def test_numpy_gumbel0b(self): """ Check gumbel with 2 argument with mean and variance. """ code = """ def numpy_gumbel0b(size): from numpy.random import gumbel from numpy import var, mean, pi u = 1.5 s = 2 rmean = u + 0.57721*s rvar = (pi**2/6)*s**2 a = gumbel(u, s, size) return (abs(mean(a) - rmean) < 0.05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 6, numpy_gumbel0b=[int]) def test_numpy_gumbel1(self): """ Check gumbel with size argument with mean and variance.""" code = """ def numpy_gumbel1(size): from numpy.random import gumbel from numpy import var, mean from numpy import var, mean, pi u = 0. s = 1 rmean = u + 0.57721*s rvar = (pi**2/6)*s**2 a = gumbel(size=size) return (abs(mean(a) - rmean) < .05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 6, numpy_gumbel1=[int]) def test_numpy_gumbel2(self): """Check gumbel with shape argument with mean and variance.""" code = """ def numpy_gumbel2(size): from numpy.random import gumbel from numpy import mean, var from numpy import var, mean, pi u = 0 s = 1 rmean = u + 0.57721*s rvar = (pi**2/6)*s**2 a = gumbel(size=(size, size)) return (abs(mean(a) - rmean) < .05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 3, numpy_gumbel2=[int]) ########################################################################### #Tests for numpy.random.logistic ########################################################################### def test_numpy_logistic0(self): """ Check logistic without argument with mean and variance. """ code = """ def numpy_logistic0(size): from numpy.random import logistic from numpy import var, mean, pi u = 0. rmean = u rvar = (pi**2/3) a = [logistic() for x in range(size)] return (abs(mean(a) - rmean) < .1 and abs(var(a) - rvar) < .1) """ self.run_test(code, 10 ** 6, numpy_logistic0=[int]) def test_numpy_logistic0a(self): """ Check logistic with 1 argument with mean and variance. """ code = """ def numpy_logistic0a(size): from numpy.random import logistic from numpy import var, mean, pi u = 2 rmean = u rvar = (pi**2/3) a = [logistic(u) for x in range(size)] return (abs(mean(a) - rmean ) < 0.1 and abs(var(a) - rvar) < .1) """ self.run_test(code, 10 ** 6, numpy_logistic0a=[int]) def test_numpy_logistic0b(self): """ Check logistic with 2 argument with mean and variance. """ code = """ def numpy_logistic0b(size): from numpy.random import logistic from numpy import var, mean, pi u = 2. s = 2 rmean = u rvar = ((s**2*pi**2)/3) a = logistic(u, s, size) return (abs(mean(a) - rmean) < 0.1 and abs(var(a) - rvar) < .1) """ self.run_test(code, 10 ** 6, numpy_logistic0b=[int]) def test_numpy_logistic1(self): """ Check logistic with size argument with mean and variance.""" code = """ def numpy_logistic1(size): from numpy.random import logistic from numpy import var, mean from numpy import var, mean, pi u = 0. s = 1 rmean = u rvar = ((s**2*pi**2)/3) a = logistic(size=size) return (abs(mean(a) - rmean) < .1 and abs(var(a) - rvar) < .1) """ self.run_test(code, 10 ** 6, numpy_logistic1=[int]) def test_numpy_logistic2(self): """Check logistic with shape argument with mean and variance.""" code = """ def numpy_logistic2(size): from numpy.random import logistic from numpy import mean, var from numpy import var, mean, pi u = 0 s = 1 rmean = u rvar = ((s**2*pi**2)/3) a = logistic(size=(size, size)) return (abs(mean(a) - rmean) < .1 and abs(var(a) - rvar) < .1) """ self.run_test(code, 10 ** 3, numpy_logistic2=[int]) ########################################################################### #Tests for numpy.random.laplace ########################################################################### def test_numpy_laplace0(self): """ Check laplace without argument with mean and variance. """ code = """ def numpy_laplace0(size): from numpy.random import laplace from numpy import var, mean, pi u = 0. s = 1 rmean = u rvar = 2*s**2 a = [laplace() for x in range(size)] return (abs(mean(a) - rmean) < .05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 6, numpy_laplace0=[int]) def test_numpy_laplace0a(self): """ Check laplace with 1 argument with mean and variance. """ code = """ def numpy_laplace0a(size): from numpy.random import laplace from numpy import var, mean, pi u = 2 s = 1 rmean = u rvar = 2*s**2 a = [laplace(u) for x in range(size)] return (abs(mean(a) - rmean ) < 0.05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 6, numpy_laplace0a=[int]) def test_numpy_laplace0b(self): """ Check laplace with 2 argument with mean and variance. """ code = """ def numpy_laplace0b(size): from numpy.random import laplace from numpy import var, mean, pi u = 2. s = 2 rmean = u rvar = 2*s**2 a = laplace(u, s, size) return (abs(mean(a) - rmean) < 0.05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 6, numpy_laplace0b=[int]) def test_numpy_laplace1(self): """ Check laplace with size argument with mean and variance.""" code = """ def numpy_laplace1(size): from numpy.random import laplace from numpy import var, mean from numpy import var, mean, pi u = 0. s = 1 rmean = u rvar = 2*s**2 a = laplace(size=size) return (abs(mean(a) - rmean) < .05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 6, numpy_laplace1=[int]) def test_numpy_laplace2(self): """Check laplace with shape argument with mean and variance.""" code = """ def numpy_laplace2(size): from numpy.random import laplace from numpy import mean, var from numpy import var, mean, pi u = 0 s = 1 rmean = u rvar = 2*s**2 a = laplace(size=(size, size)) return (abs(mean(a) - rmean) < .05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 3, numpy_laplace2=[int]) ########################################################################### #Tests for numpy.random.logseries ########################################################################### def test_numpy_logseries0(self): """ Check logseries without argument with mean and variance. """ code = """ def numpy_logseries0(size): from numpy.random import logseries from numpy import var, mean, log s = 0.5 rmean = s / (log(1 - s)*(s - 1)) rvar = -(s*(s+log(1-s)))/((s - 1)**2*(log(1-s))**2) a = [logseries(s) for x in range(size)] return (abs(mean(a) - rmean) < .05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 5, numpy_logseries0=[int]) def test_numpy_logseries1(self): """ Check logseries with size argument with mean and variance.""" code = """ def numpy_logseries1(size): from numpy.random import logseries from numpy import var, mean, log s = 0.25 rmean = s / (log(1 - s)*(s - 1)) rvar = -(s*(s+log(1-s)))/((s - 1)**2*(log(1-s))**2) a = logseries(s, size) return (abs(mean(a) - rmean) < .05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 5, numpy_logseries1=[int]) def test_numpy_logseries2(self): """Check logseries with shape argument with mean and variance.""" code = """ def numpy_logseries2(size): from numpy.random import logseries from numpy import mean, var, log s = 0.2 rmean = s / (log(1 - s)*(s - 1)) rvar = -(s*(s+log(1-s)))/((s - 1)**2*(log(1-s))**2) a = logseries(s, (size, size)) return (abs(mean(a) - rmean) < .05 and abs(var(a) - rvar) < .05) """ self.run_test(code, 10 ** 3, numpy_logseries2=[int]) ########################################################################### # Tests for numpy.random.uniform ########################################################################### def test_numpy_uniform_no_arg(self): """ Check logseries without argument with mean and variance. """ code = """ def numpy_uniform_no_arg(size): import numpy as np from numpy.random import uniform low, high = 0.0, 1.0 a = np.array([uniform() for _ in range(size)]) rmean = 0.5 * (low + high) rvar = (high - low) ** 2 / 12 cond_mean = (a.mean() - rmean) / rmean < 0.05 cond_var = (np.var(a) - rvar) / rvar < 0.05 return cond_mean and cond_var """ self.run_test(code, 4000, numpy_uniform_no_arg=[int]) def test_numpy_uniform_size_int(self): """ Check logseries with arguments with mean and variance. """ code = """ def numpy_uniform_size_int(size): import numpy as np from numpy.random import uniform low, high = 0., 1234. rmean = 0.5 * (low + high) rvar = (high - low) ** 2 / 12 a = uniform(low, high, size) cond_mean = (a.mean() - rmean) / rmean < 0.05 cond_var = (np.var(a) - rvar) / rvar < 0.05 return cond_mean and cond_var """ self.run_test(code, 4000, numpy_uniform_size_int=[int]) def test_numpy_uniform_size_tuple(self): """ Check logseries with arguments with mean and variance. """ code = """ def numpy_uniform_size_tuple(size): import numpy as np from numpy.random import uniform low, high = -987., 12345. rmean = 0.5 * (low + high) rvar = (high - low) ** 2 / 12 a = uniform(low, high, (size, size)) cond_mean = (a.mean() - rmean) / rmean < 0.05 cond_var = (np.var(a) - rvar) / rvar < 0.05 return cond_mean and cond_var """ self.run_test(code, 70, numpy_uniform_size_tuple=[int]) pythran-0.10.0+ds2/pythran/tests/test_numpy_ufunc_binary.py000066400000000000000000000153601416264035500241300ustar00rootroot00000000000000""" Check unary functions for the numpy module. """ import numpy try: import scipy except ImportError: scipy = None import platform # Scipy special function implementation relies on boost, and boost doesn't # support them on ppc64le if platform.machine().startswith('ppc64'): scipy = None from pythran.tests import TestEnv from pythran.tables import MODULES from pythran.intrinsic import UFunc from pythran.typing import NDArray @TestEnv.module class TestNumpyUFuncBinary(TestEnv): pass #automatic generation of basic test cases for ufunc binary_ufunc = ([("numpy", k) for k, v in MODULES["numpy"].items() if isinstance(v, UFunc)] + [("scipy.special", k) for k, v in MODULES["scipy"]["special"].items() if isinstance(v, UFunc)]) reduced_ufunc = {'add', 'minimum', 'maximum', 'multiply', 'bitwise_or', 'bitwise_and', 'bitwise_xor'} for ns, f in binary_ufunc: if 'bitwise_' in f or 'ldexp' in f or '_shift' in f : setattr(TestNumpyUFuncBinary, 'test_' + f, eval("lambda self: self.run_test('def np_{0}(a): from {1} import {0} ; return {0}(a,a)', numpy.ones(10, numpy.int32), np_{0}=[NDArray[numpy.int32,:]])".format(f, ns))) setattr(TestNumpyUFuncBinary, 'test_' + f + '_scalar', eval("lambda self: self.run_test('def np_{0}_scalar(a): from {1} import {0} ; return {0}(a, a-1)', 1, np_{0}_scalar=[int])".format(f, ns))) setattr(TestNumpyUFuncBinary, 'test_' + f + '_matrix', eval("lambda self: self.run_test('def np_{0}_matrix(a): from {1} import {0} ; return {0}(a,a)', numpy.ones((2,5), numpy.int32), np_{0}_matrix=[NDArray[numpy.int32, :, :]])".format(f, ns))) # Tests for accumulation if f != "ldexp": # accumulate doesn't work with ldexp as typing is incorrect for # accumulation. setattr(TestNumpyUFuncBinary, 'test_accumulate_' + f, eval("lambda self: self.run_test('def np_{0}_accumulate(a): from {1} import {0} ; return {0}.accumulate(a)', numpy.ones(10, numpy.int32), np_{0}_accumulate=[NDArray[numpy.int32, :]])".format(f, ns))) setattr(TestNumpyUFuncBinary, 'test_accumulate_' + f + '_matrix', eval("lambda self: self.run_test('def np_{0}_matrix_accumulate(a): from {1} import {0} ; return {0}.accumulate(a)', numpy.ones((2,5), numpy.int32), np_{0}_matrix_accumulate=[NDArray[numpy.int32,:,:]])".format(f, ns))) # Tests for reduction if f in reduced_ufunc: setattr(TestNumpyUFuncBinary, 'test_reduce_' + f, eval("lambda self: self.run_test('def np_{0}_reduce(a): from {1} import {0} ; return {0}.reduce(a)', numpy.ones(10, numpy.int32), np_{0}_reduce=[NDArray[numpy.int32, :]])".format(f, ns))) setattr(TestNumpyUFuncBinary, 'test_reduce_' + f + '_matrix', eval("lambda self: self.run_test('def np_{0}_matrix_reduce(a): from {1} import {0} ; return {0}.reduce(a)', numpy.ones((2,5), numpy.int32), np_{0}_matrix_reduce=[NDArray[numpy.int32,:,:]])".format(f, ns))) else: if 'scipy' in ns and scipy is None: continue if 'spherical' in f and 'scipy' in ns: setattr(TestNumpyUFuncBinary, 'test_' + f, eval("lambda self: self.run_test('def np_{0}(a): from {1} import {0}; import numpy; return {0}(numpy.array(a, dtype=int) +2 ,a)', numpy.ones(10), np_{0}=[NDArray[float, :]])".format(f, ns))) setattr(TestNumpyUFuncBinary, 'test_' + f + '_scalar', eval("lambda self: self.run_test('def np_{0}_scalar(a): from {1} import {0} ; return {0}(int(a+3), a+0.5)', 0.5, np_{0}_scalar=[float])".format(f, ns))) setattr(TestNumpyUFuncBinary, 'test_' + f + '_matrix', eval("lambda self: self.run_test('def np_{0}_matrix(a): from {1} import {0}; import numpy; return {0}(numpy.array(a, dtype=int),a)', numpy.ones((2,5)) - 0.2 , np_{0}_matrix=[NDArray[float,:,:]])".format(f, ns))) ## Tests for integral numbers try: eval('{1}.{0}(1, 1)'.format(f, ns)) setattr(TestNumpyUFuncBinary, 'test_' + f + '_integer', eval("lambda self: self.run_test('def np_{0}_integer(a): from {1} import {0} ; return {0}(a, a)', numpy.ones(10,dtype=int), np_{0}_integer=[NDArray[int, :]])".format(f, ns))) except TypeError: pass else: setattr(TestNumpyUFuncBinary, 'test_' + f, eval("lambda self: self.run_test('def np_{0}(a): from {1} import {0} ; return {0}(a,a)', numpy.ones(10), np_{0}=[NDArray[float, :]])".format(f, ns))) setattr(TestNumpyUFuncBinary, 'test_' + f + '_scalar', eval("lambda self: self.run_test('def np_{0}_scalar(a): from {1} import {0} ; return {0}(a+0.5, a+0.5)', 0.5, np_{0}_scalar=[float])".format(f, ns))) setattr(TestNumpyUFuncBinary, 'test_' + f + '_matrix', eval("lambda self: self.run_test('def np_{0}_matrix(a): from {1} import {0} ; return {0}(a,a)', numpy.ones((2,5)) - 0.2 , np_{0}_matrix=[NDArray[float,:,:]])".format(f, ns))) ## Tests for complex numbers try: eval('{1}.{0}(1.j, 1.j)'.format(f, ns)) setattr(TestNumpyUFuncBinary, 'test_' + f + '_complex', eval("lambda self: self.run_test('def np_{0}_complex(a): from {1} import {0} ; return {0}(a,a)', numpy.ones(10)*1.j, np_{0}_complex=[NDArray[complex, :]])".format(f, ns))) except TypeError: pass ## Tests for integral numbers try: eval('{1}.{0}(1, 1)'.format(f, ns)) setattr(TestNumpyUFuncBinary, 'test_' + f + '_integer', eval("lambda self: self.run_test('def np_{0}_integer(a): from {1} import {0} ; return {0}(a, a)', numpy.ones(10,dtype=int), np_{0}_integer=[NDArray[int, :]])".format(f, ns))) except TypeError: pass ## Tests for accumulation if 'scipy' not in ns: setattr(TestNumpyUFuncBinary, 'test_accumulate_' + f, eval("lambda self: self.run_test('def np_{0}_accumulate(a): from {1} import {0} ; return {0}.accumulate(a)', numpy.ones(10), np_{0}_accumulate=[NDArray[float,:]])".format(f, ns))) setattr(TestNumpyUFuncBinary, 'test_accumulate_' + f + '_matrix', eval("lambda self: self.run_test('def np_{0}_matrix_accumulate(a): from {1} import {0} ; return {0}.accumulate(a)', numpy.ones((2,5)) - 0.2 , np_{0}_matrix_accumulate=[NDArray[float, :, :]])".format(f, ns))) ## Tests for reduction if f in reduced_ufunc: setattr(TestNumpyUFuncBinary, 'test_reduce_' + f, eval("lambda self: self.run_test('def np_{0}_reduce(a): from {1} import {0} ; return {0}.reduce(a)', numpy.ones(10), np_{0}_reduce=[NDArray[float,:]])".format(f, ns))) setattr(TestNumpyUFuncBinary, 'test_reduce_' + f + '_matrix', eval("lambda self: self.run_test('def np_{0}_matrix_reduce(a): from {1} import {0} ; return {0}.reduce(a)', numpy.ones((2,5)) - 0.2 , np_{0}_matrix_reduce=[NDArray[float, :, :]])".format(f, ns))) pythran-0.10.0+ds2/pythran/tests/test_numpy_ufunc_unary.py000066400000000000000000000067221416264035500240040ustar00rootroot00000000000000import unittest from pythran.tests import TestEnv import numpy from pythran.typing import NDArray try: import scipy except ImportError: scipy = None @TestEnv.module class TestNumpyUFuncUnary(TestEnv): pass # automatic generation of basic test cases for ufunc unary_func_by_module = { 'numpy': ( 'abs', 'absolute', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh', 'bitwise_not', 'cbrt', 'ceil', 'conj', 'conjugate', 'cos', 'cosh', 'deg2rad', 'degrees', 'exp', 'expm1', 'fabs', 'float32', 'float64', 'floor', 'int8', 'int16', 'int32', 'int64', 'isinf', 'isneginf', 'isposinf', 'isnan', 'invert', 'isfinite', 'log', 'log10', 'log1p', 'log2', 'logical_not', 'negative', 'rad2deg', 'radians', 'reciprocal', 'rint', 'round', 'round_', 'sign', 'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'tan', 'tanh', 'trunc', 'uint8', 'uint16', 'uint32', 'uint64' ), 'scipy.special': ('gammaln', 'gamma') } if scipy is None: del unary_func_by_module['scipy.special'] test_inputs_by_type = { 'float': { '_scalar': ('0.5', '[float]'), '': ('numpy.ones(10)', '[NDArray[float,:]]'), '_matrix': ('numpy.ones((2,5))', '[NDArray[float,:,:]]') }, 'complex': { '_scalar': ('0.5j', '[complex]'), '': ('numpy.ones(10)*1.j', '[NDArray[complex,:]]'), '_matrix': ('numpy.ones((2,5))*1.j', '[NDArray[complex,:,:]]') }, 'numpy.int32': { '_scalar': ('numpy.int32(1)', '[numpy.int32]'), '': ('numpy.ones(10, numpy.int32)', '[NDArray[numpy.int32,:]]'), '_matrix': ('numpy.ones((2,5), numpy.int32)', '[NDArray[numpy.int32,:,:]]') } } for module, functions in unary_func_by_module.items(): for f in functions: for input_type in ('float', 'complex'): try: eval('{}.{}({})'.format(module, f, test_inputs_by_type[input_type]['_scalar'][0])) except TypeError: if input_type == 'float': input_type = 'numpy.int32' else: continue # no need to test that, it does not work on numpy if input_type == 'complex' and 'gamma' in f: continue # skip for test_suffix, (input, pythran_input_type) \ in test_inputs_by_type[input_type].items(): func_name = "numpy_ufunc_unary_{}_{}{}_{}".format(module.replace('.', '_'), f, test_suffix, input_type.replace('.','_')) code = """ lambda self: self.run_test( '''{func}''', {input}, {func_name}={pythran_input_type} ) """.replace('\n', '').format( func=""" def {func_name}(a): from {module} import {f} return {f}(a) """.format(func_name=func_name, module=module, f=f), func_name=func_name, input=input, pythran_input_type=pythran_input_type, ) setattr( TestNumpyUFuncUnary, 'test_numpy_ufunc_unary_{}'.format(func_name), eval(code) ) pythran-0.10.0+ds2/pythran/tests/test_openmp.py000066400000000000000000000030751416264035500215120ustar00rootroot00000000000000import unittest from distutils.errors import CompileError from pythran.tests import TestFromDir import os import pythran from pythran.syntax import PythranSyntaxError from pythran.spec import Spec class TestOpenMP(TestFromDir): path = os.path.join(os.path.dirname(__file__), "openmp") class TestOpenMP4(TestFromDir): path = os.path.join(os.path.dirname(__file__), "openmp.4") @staticmethod def interface(name, file=None): return Spec({name: []}) @staticmethod def extract_runas(name, filepath): return ['#runas {}()'.format(name)] class TestOpenMPLegacy(TestFromDir): ''' Test old style OpenMP constructs, not using comments but strings and relying on function-scope locals ''' path = os.path.join(os.path.dirname(__file__), "openmp.legacy") @staticmethod def interface(name, file=None): return Spec({name: []}) @staticmethod def extract_runas(name, filepath): return ['#runas {}()'.format(name)] # only activate OpenMP tests if the underlying compiler supports OpenMP try: pythran.compile_cxxcode("omp", '#include ', extra_compile_args=['-fopenmp'], extra_link_args=['-fopenmp']) import omp if '-fopenmp' in pythran.config.cfg.get('compiler', 'ldflags'): TestOpenMP4.populate(TestOpenMP4) TestOpenMP.populate(TestOpenMP) TestOpenMPLegacy.populate(TestOpenMPLegacy) except PythranSyntaxError: raise except (CompileError, ImportError): pass if __name__ == '__main__': unittest.main() pythran-0.10.0+ds2/pythran/tests/test_operator.py000066400000000000000000000365221416264035500220520ustar00rootroot00000000000000from pythran.tests import TestEnv import unittest from pythran.typing import List @TestEnv.module class TestOperator(TestEnv): def test_lt(self): self.run_test("def lt(a,b):\n from operator import lt\n return lt(a,b)", 1, 2, lt=[int,int]) def test_le(self): self.run_test("def le(a,b):\n from operator import le\n return le(a,b)", 1, 2, le=[int,int]) def test_eq(self): self.run_test("def eq(a,b):\n from operator import eq\n return eq(a,b)", 2, 2, eq=[int,int]) def test_ne(self): self.run_test("def ne(a,b):\n from operator import ne\n return ne(a,b)", 2, 2, ne=[int,int]) def test_ge(self): self.run_test("def ge(a,b):\n from operator import ge\n return ge(a,b)", 2, 2, ge=[int,int]) def test_gt(self): self.run_test("def gt(a,b):\n from operator import gt\n return gt(a,b)", 2, 2, gt=[int,int]) def test___lt__(self): self.run_test("def __lt__(a,b):\n from operator import __lt__\n return __lt__(a,b)", 2, 2, __lt__=[int,int]) def test___le__(self): self.run_test("def __le__(a,b):\n from operator import __le__\n return __le__(a,b)", 2, 2, __le__=[int,int]) def test___eq__(self): self.run_test("def __eq__(a,b):\n from operator import __eq__\n return __eq__(a,b)", 2, 2, __eq__=[int,int]) def test___ne__(self): self.run_test("def __ne__(a,b):\n from operator import __ne__\n return __ne__(a,b)", 2, 2, __ne__=[int,int]) def test___ge__(self): self.run_test("def __ge__(a,b):\n from operator import __ge__\n return __ge__(a,b)", 2, 2, __ge__=[int,int]) def test___gt__(self): self.run_test("def __gt__(a,b):\n from operator import __gt__\n return __gt__(a,b)", 2, 2, __gt__=[int,int]) def test_not_(self): self.run_test("def not_(a):\n from operator import not_\n return not_(a)", True, not_=[bool]) def test___not__(self): self.run_test("def __not__(a):\n from operator import __not__\n return __not__(a)", True, __not__=[bool]) def test_truth(self): self.run_test("def truth(a):\n from operator import truth\n return truth(a)", True, truth=[bool]) def test_is_(self): self.run_test("def is_(a,b):\n from operator import is_\n return is_(a,b)", 1, 2, is_=[int,int]) def test_is_not(self): self.run_test("def is_not(a,b):\n from operator import is_not\n return is_not(a,b)", 1, 2, is_not=[int,int]) def test_abs(self): self.run_test("def abs(a):\n from operator import abs\n return abs(a)", -2j + 2, abs=[complex]) def test___abs__(self): self.run_test("def __abs__(a):\n from operator import __abs__\n return __abs__(a)", -2, __abs__=[int]) def test__add_(self): self.run_test("def add(a,b):\n from operator import add\n return add(a,b)", -1, 2, add=[int,int]) def test___add__(self): self.run_test("def __add__(a,b):\n from operator import __add__\n return __add__(a,b)", -1, 2, __add__=[int,int]) def test_and_(self): self.run_test("def and_(a,b):\n from operator import and_\n return and_(a,b)", 0x01, 0x02, and_=[int,int]) def test___and__(self): self.run_test("def __and__(a,b):\n from operator import __and__\n return __and__(a,b)", 0x01, 0x02, __and__=[int,int]) def test_floordiv(self): self.run_test("def floordiv(a,b):\n from operator import floordiv\n return floordiv(a,b)", 5, 2, floordiv=[int,int]) def test___floordiv__(self): self.run_test("def __floordiv__(a,b):\n from operator import __floordiv__\n return __floordiv__(a,b)", 5, 2, __floordiv__=[int,int]) def test_inv(self): self.run_test("def inv(a):\n from operator import inv\n return inv(a)", 0x02, inv=[int]) def test_invert(self): self.run_test("def invert(a):\n from operator import invert\n return invert(a)", 0x02, invert=[int]) def test___inv__(self): self.run_test("def __inv__(a):\n from operator import __inv__\n return __inv__(a)", 0x02, __inv__=[int]) def test___invert__(self): self.run_test("def __invert__(a):\n from operator import __invert__\n return __invert__(a)", 0x02, __invert__=[int]) def test_lshift(self): self.run_test("def lshift(a,b):\n from operator import lshift\n return lshift(a,b)", 0x02, 1, lshift=[int,int]) def test___lshift__(self): self.run_test("def __lshift__(a,b):\n from operator import __lshift__\n return __lshift__(a,b)",0x02 , 1, __lshift__=[int,int]) def test_mod(self): self.run_test("def mod(a,b):\n from operator import mod\n return mod(a,b)", 5, 2, mod=[int,int]) def test___mod__(self): self.run_test("def __mod__(a,b):\n from operator import __mod__\n return __mod__(a,b)", 5, 2, __mod__=[int,int]) def test_mul(self): self.run_test("def mul(a,b):\n from operator import mul\n return mul(a,b)", 5, 2, mul=[int,int]) def test___mul__(self): self.run_test("def __mul__(a,b):\n from operator import __mul__\n return __mul__(a,b)", 5, 2, __mul__=[int,int]) def test_neg(self): self.run_test("def neg(a):\n from operator import neg\n return neg(a)", 1, neg=[int]) def test___neg__(self): self.run_test("def __neg__(a):\n from operator import __neg__\n return __neg__(a)", 1, __neg__=[int]) def test_or_(self): self.run_test("def or_(a,b):\n from operator import or_\n return or_(a,b)", 0x02, 0x01, or_=[int,int]) def test___or__(self): self.run_test("def __or__(a,b):\n from operator import __or__\n return __or__(a,b)", 0x02, 0x01, __or__=[int,int]) def test_pos(self): self.run_test("def pos(a):\n from operator import pos\n return pos(a)", 2, pos=[int]) def test___pos__(self): self.run_test("def __pos__(a):\n from operator import __pos__\n return __pos__(a)", 2, __pos__=[int]) def test_rshift(self): self.run_test("def rshift(a,b):\n from operator import rshift\n return rshift(a,b)", 0x02, 1, rshift=[int,int]) def test___rshift__(self): self.run_test("def __rshift__(a,b):\n from operator import __rshift__\n return __rshift__(a,b)", 0x02, 1, __rshift__=[int,int]) def test_sub(self): self.run_test("def sub(a,b):\n from operator import sub\n return sub(a,b)", 5, 2, sub=[int,int]) def test___sub__(self): self.run_test("def __sub__(a,b):\n from operator import __sub__\n return __sub__(a,b)", 5, 2, __sub__=[int,int]) def test_truediv(self): self.run_test("def truediv(a,b):\n from operator import truediv\n return truediv(a,b)", 5, 2, truediv=[int,int]) def test___truediv__(self): self.run_test("def __truediv__(a,b):\n from operator import __truediv__\n return __truediv__(a,b)", 5, 2, __truediv__=[int,int]) def test_xor(self): self.run_test("def xor(a,b):\n from operator import xor\n return xor(a,b)", 0x02, 0x01, xor=[int,int]) def test___xor__(self): self.run_test("def __xor__(a,b):\n from operator import __xor__\n return __xor__(a,b)", 0x02, 0x01, __xor__=[int,int]) def test_iadd(self): self.run_test("def iadd(a,b):\n from operator import iadd\n return iadd(a,b)", -1, 3, iadd=[int,int]) def test_iadd_argument_modification_not_mutable(self): self.run_test("def iadd2(b):\n a = -1\n from operator import iadd\n iadd(a,b)\n return a", 3, iadd2=[int]) def test_iadd_argument_modification_mutable(self): self.run_test("def iadd3(b):\n a = []\n from operator import iadd\n iadd(a,b)\n return a", [3], iadd3=[List[int]]) def test_iadd_argument_modification_mutable2(self): self.run_test("def iadd4(b):\n from operator import iadd\n return iadd([],b)", [3], iadd4=[List[int]]) def test___iadd__(self): self.run_test("def __iadd__(a,b):\n from operator import __iadd__\n return __iadd__(a,b)", 1, -4, __iadd__=[int,int]) def test___iadd___argument_modification_not_mutable(self): self.run_test("def __iadd2__(b):\n a = -1\n from operator import __iadd__\n __iadd__(a,b)\n return a", 3, __iadd2__=[int]) def test___iadd___argument_modification_mutable(self): self.run_test("def __iadd3__(b):\n a = []\n from operator import __iadd__\n __iadd__(a,b)\n return a", [3], __iadd3__=[List[int]]) def test___iadd___argument_modification_mutable2(self): self.run_test("def __iadd4__(b):\n from operator import __iadd__\n return __iadd__([],b)", [3], __iadd4__=[List[int]]) def test_iand(self): self.run_test("def iand(a,b):\n from operator import iand\n return iand(a,b)", 0x01, 0x11, iand=[int,int]) def test_iand2(self): self.run_test("def iand2(b):\n from operator import iand\n a=0x01\n return iand(a,b)", 0x11, iand2=[int]) def test_iand3(self): self.run_test("def iand3(b):\n from operator import iand\n a=0x01\n iand(a,b)\n return a", 0x11, iand3=[int]) def test___iand__(self): self.run_test("def __iand__(a,b):\n from operator import __iand__\n return __iand__(a,b)", 0x10, 0xFF, __iand__=[int,int]) def test_iconcat(self): self.run_test("def iconcat(a,b):\n from operator import iconcat\n return iconcat(a,b)", [3], [4], iconcat=[List[int],List[int]]) def test_iconcat2(self): self.run_test("def iconcat2(b):\n from operator import iconcat\n a=[3]\n return iconcat(a,b)", [4], iconcat2=[List[int]]) def test_iconcat3(self): self.run_test("def iconcat3(b):\n from operator import iconcat\n a=[3]\n iconcat(a,b)\n return a", [4], iconcat3=[List[int]]) def test_iconcat4(self): self.run_test("def iconcat4(b):\n from operator import iconcat\n a=[]\n iconcat(a,b)\n return a", [4], iconcat4=[List[int]]) def test_iconcat5(self): self.run_test("def iconcat5(b):\n from operator import iconcat\n return iconcat([],b)", [4], iconcat5=[List[int]]) def test___iconcat__(self): self.run_test("def __iconcat__(a,b):\n from operator import __iconcat__\n return __iconcat__(a,b)", [3], [4], __iconcat__=[List[int],List[int]]) def test_ifloordiv(self): self.run_test("def ifloordiv(a,b):\n from operator import ifloordiv\n return ifloordiv(a,b)", 5, 2, ifloordiv=[int,int]) def test___ifloordiv__(self): self.run_test("def __ifloordiv__(a,b):\n from operator import __ifloordiv__\n return __ifloordiv__(a,b)", 5, 2, __ifloordiv__=[int,int]) def test_ilshift(self): self.run_test("def ilshift(a,b):\n from operator import ilshift\n return ilshift(a,b)", 0x02, 3, ilshift=[int,int]) def test___ilshift__(self): self.run_test("def __ilshift__(a,b):\n from operator import __ilshift__\n return __ilshift__(a,b)", 0x02, 3, __ilshift__=[int,int]) def test_imod(self): self.run_test("def imod(a,b):\n from operator import imod\n return imod(a,b)", 4, 2, imod=[int,int]) def test___imod__(self): self.run_test("def __imod__(a,b):\n from operator import __imod__\n return __imod__(a,b)", 5, 3, __imod__=[int,int]) def test_imul(self): self.run_test("def imul(a,b):\n from operator import imul\n return imul(a,b)", 5, -1, imul=[int,int]) def test___imul__(self): self.run_test("def __imul__(a,b):\n from operator import __imul__\n return __imul__(a,b)", -6.1, -2, __imul__=[float,int]) def test_ior(self): self.run_test("def ior(a,b):\n from operator import ior\n return ior(a,b)", 0x02, 0x01, ior=[int,int]) def test___ior__(self): self.run_test("def __ior__(a,b):\n from operator import __ior__\n return __ior__(a,b)", 0x02, 0x02, __ior__=[int,int]) def test_ipow(self): self.run_test("def ipow(a,b):\n from operator import ipow\n return ipow(a,b)", 5, 5, ipow=[int,int]) def test___ipow__(self): self.run_test("def __ipow__(a,b):\n from operator import __ipow__\n return __ipow__(a,b)", 2, 8, __ipow__=[int,int]) def test_irshift(self): self.run_test("def irshift(a,b):\n from operator import irshift\n return irshift(a,b)", 0x02, 3, irshift=[int,int]) def test___irshift__(self): self.run_test("def __irshift__(a,b):\n from operator import __irshift__\n return __irshift__(a,b)", 0x02, 1, __irshift__=[int,int]) def test_isub(self): self.run_test("def isub(a,b):\n from operator import isub\n return isub(a,b)", 5, -8, isub=[int,int]) def test___isub__(self): self.run_test("def __isub__(a,b):\n from operator import __isub__\n return __isub__(a,b)", -8, 5, __isub__=[int,int]) def test_itruediv(self): self.run_test("def itruediv(a,b):\n from operator import itruediv\n return itruediv(a,b)", 5, 2, itruediv=[int,int]) def test_itruediv2(self): self.run_test("def itruediv2(b):\n from operator import itruediv\n a=5\n return itruediv(a,b)", 2, itruediv2=[int]) def test_itruediv3(self): self.run_test("def itruediv3(b):\n from operator import itruediv\n a=5\n itruediv(a,b)\n return a", 2, itruediv3=[int]) def test___itruediv__(self): self.run_test("def __itruediv__(a,b):\n from operator import __itruediv__\n return __itruediv__(a,b)", 5, 2, __itruediv__=[int,int]) def test_ixor(self): self.run_test("def ixor(a,b):\n from operator import ixor\n return ixor(a,b)", 0x02, 0x01, ixor=[int,int]) def test___ixor__(self): self.run_test("def __ixor__(a,b):\n from operator import __ixor__\n return __ixor__(a,b)", 0x02, 0x02, __ixor__=[int,int]) def test_concat(self): self.run_test("def concat(a,b):\n from operator import concat\n return concat(a,b)", [3], [4], concat=[List[int],List[int]]) def test___concat__(self): self.run_test("def __concat__(a,b):\n from operator import __concat__\n return __concat__(a,b)", [], [1], __concat__=[List[int],List[int]]) def test_contains(self): self.run_test("def contains(a,b):\n from operator import contains\n return contains(a,b)", [1,2,3,4], 2, contains=[List[int],int]) def test___contains__(self): self.run_test("def __contains__(a,b):\n from operator import __contains__\n return __contains__(a,b)", [1,2,3,4], 5, __contains__=[List[int],int]) def test_countOf(self): self.run_test("def countOf(a,b):\n from operator import countOf\n return countOf(a,b)", [1,2,3,4,3,3,3,2,3,1], 3, countOf=[List[int],int]) def test_delitem(self): self.run_test("def delitem(a,b):\n from operator import delitem\n return delitem(a,b)", [1,2,3,4], 3, delitem=[List[int],int]) def test___delitem__(self): self.run_test("def __delitem__(a,b):\n from operator import __delitem__\n return __delitem__(a,b)", [1,2,3,4], 2, __delitem__=[List[int],int]) def test_getitem(self): self.run_test("def getitem(a,b):\n from operator import getitem\n return getitem(a,b)", [4,3,2,1], 1, getitem=[List[int],int]) def test___getitem__(self): self.run_test("def __getitem__(a,b):\n from operator import __getitem__\n return __getitem__(a,b)", [4,3,2,1], 2, __getitem__=[List[int],int]) def test_indexOf(self): self.run_test("def indexOf(a,b):\n from operator import indexOf\n return indexOf(a,b)", [4,3,2,1], 4, indexOf=[List[int],int]) def test_itemgetter(self): self.run_test("def itemgetter(i,a):\n from operator import itemgetter\n g = itemgetter(i)\n return g(a)", 2, [4,3,2,1], itemgetter=[int,List[int]]) def test_itemgetter2(self): self.run_test("def foo():\n from operator import itemgetter\n g = itemgetter(1)", foo=[]) def test_itemgetter3(self): self.run_test("def itemgetter3(i,j,k,a):\n from operator import itemgetter\n g = itemgetter(i,j,k)\n return g(a)", 2, 3, 4, [4,3,2,1,0], itemgetter3=[int,int,int,List[int]]) pythran-0.10.0+ds2/pythran/tests/test_optimizations.py000066400000000000000000000526161416264035500231320ustar00rootroot00000000000000from pythran.tests import TestEnv from pythran.typing import List import unittest import pythran class TestOptimization(TestEnv): def test_constant_fold_nan(self): code = "def constant_fold_nan(a): from numpy import nan; a[0] = nan; return a" self.run_test(code, [1., 2.], constant_fold_nan=[List[float]]) def test_constant_fold_empty_array(self): code = "def constant_fold_empty_array(): from numpy import ones; return ones((0,0,0)).shape" self.run_test(code, constant_fold_empty_array=[]) def test_constant_fold_divide_by_zero(self): code = "def constant_fold_divide_by_zero(): return 1/0" with self.assertRaises(pythran.syntax.PythranSyntaxError): self.check_ast(code, "syntax error anyway", ["pythran.optimizations.ConstantFolding"]) def test_genexp(self): self.run_test("def test_genexp(n): return sum((x*x for x in range(n)))", 5, test_genexp=[int]) def test_genexp_2d(self): self.run_test("def test_genexp_2d(n1, n2): return sum((x*y for x in range(n1) for y in range(n2)))", 2, 3, test_genexp_2d=[int, int]) def test_genexp_if(self): self.run_test("def test_genexp_if(n): return sum((x*x for x in range(n) if x < 4))", 5, test_genexp_if=[int]) def test_genexp_mixedif(self): self.run_test("def test_genexp_mixedif(m, n): return sum((x*y for x in range(m) for y in range(n) if x < 4))", 2, 3, test_genexp_mixedif=[int, int]) def test_genexp_triangular(self): self.run_test("def test_genexp_triangular(n): return sum((x*y for x in range(n) for y in range(x)))", 2, test_genexp_triangular=[int]) def test_aliased_readonce(self): self.run_test(""" def foo(f,l): return map(f,l[1:]) def alias_readonce(n): map = foo return list(map(lambda t: (t[0]*t[1] < 50), list(zip(range(n), range(n))))) """, 10, alias_readonce=[int]) def test_replace_aliased_map(self): self.run_test(""" def alias_replaced(n): map = filter return list(map(lambda x : x < 5, range(n))) """, 10, alias_replaced=[int]) def test_listcomptomap_alias(self): self.run_test(""" def foo(f,l): return map(f,l[3:]) def listcomptomap_alias(n): map = foo return list([x for x in range(n)]) """, 10, listcomptomap_alias=[int]) def test_readonce_nested_calls(self): self.run_test(""" def readonce_nested_calls(Lq): import numpy as np return np.prod(np.sign(Lq)) """, [-5.], readonce_nested_calls=[List[float]]) def test_readonce_return(self): self.run_test(""" def foo(l): return l def readonce_return(n): l = list(foo(range(n))) return l[:] """, 5, readonce_return=[int]) def test_readonce_assign(self): self.run_test(""" def foo(l): l[2] = 5 return list(range(10)) def readonce_assign(n): return foo(list(range(n))) """, 5, readonce_assign=[int]) def test_readonce_assignaug(self): self.run_test(""" def foo(l): l += [2,3] return range(10) def readonce_assignaug(n): return list(foo(list(range(n)))) """, 5, readonce_assignaug=[int]) def test_readonce_for(self): self.run_test(""" def foo(l): s = [] for x in range(10): s.extend(list(l)) return s def readonce_for(n): return foo(range(n)) """, 5, readonce_for=[int]) def test_readonce_2for(self): self.run_test(""" def foo(l): s = 0 for x in l: s += x for x in l: s += x return list(range(s)) def readonce_2for(n): return foo(range(n)) """, 5, readonce_2for=[int]) def test_readonce_while(self): self.run_test(""" def foo(l): r = [] while (len(r) < 50): r.extend(list(l)) return r def readonce_while(n): return foo(range(n)) """, 5, readonce_while=[int]) def test_readonce_if(self): self.run_test(""" def h(l): return sum(l) def g(l): return sum(l) def foo(l): if True: return g(l) else: return h(l) def readonce_if(n): return foo(range(n)) """, 5, readonce_if=[int]) def test_readonce_if2(self): self.run_test(""" def h(l): return sum(l) def g(l): return max(l[1:]) def foo(l): if True: return g(l) else: return h(l) def readonce_if2(n): return foo(list(range(n))) """, 5, readonce_if2=[int]) def test_readonce_slice(self): self.run_test(""" def foo(l): return list(l[:]) def readonce_slice(n): return foo(list(range(n))) """, 5, readonce_slice=[int]) def test_readonce_listcomp(self): self.run_test(""" def foo(l): return [z for x in l for y in l for z in range(x+y)] def readonce_listcomp(n): return foo(range(n)) """, 5, readonce_listcomp=[int]) def test_readonce_genexp(self): self.run_test(""" def foo(l): return (z for x in l for y in l for z in range(x+y)) def readonce_genexp(n): return list(foo(range(n))) """, 5, readonce_genexp=[int]) def test_readonce_recursive(self): self.run_test(""" def foo(l,n): if n < 5: return foo(l,n+1) else: return sum(l) def readonce_recursive(n): return foo(range(n),0) """, 5, readonce_recursive=[int]) def test_readonce_recursive2(self): self.run_test(""" def foo(l,n): if n < 5: return foo(l,n+1) else: return sum(l[1:]) def readonce_recursive2(n): return foo(list(range(n)),0) """, 5, readonce_recursive2=[int]) def test_readonce_cycle(self): self.run_test(""" def foo(l,n): if n < 5: return bar(l,n) else: return sum(l) def bar(l,n): return foo(l, n+1) def readonce_cycle(n): return foo(range(n),0) """, 5, readonce_cycle=[int]) def test_readonce_cycle2(self): self.run_test(""" def foo(l,n): if n < 5: return bar(l,n) else: return sum(l) def bar(l,n): return foo(l, n+1) def readonce_cycle2(n): return foo(range(n),0) """, 5, readonce_cycle2=[int]) def test_readonce_list(self): init = "def foo(l): return sum(list(l))" ref = """def foo(l): return builtins.sum(l)""" self.check_ast(init, ref, ["pythran.optimizations.IterTransformation"]) def test_readonce_tuple(self): init = "def foo(l): return sum(tuple(l))" ref = """def foo(l): return builtins.sum(l)""" self.check_ast(init, ref, ["pythran.optimizations.IterTransformation"]) def test_readonce_array(self): init = "def foo(l): import numpy as np; return sum(np.array(l))" ref = """import numpy as __pythran_import_numpy def foo(l): return builtins.sum(l)""" self.check_ast(init, ref, ["pythran.optimizations.IterTransformation"]) def test_readonce_np_sum_copy(self): init = "def foo(l): import numpy as np; return np.sum(np.copy(l))" ref = """import numpy as __pythran_import_numpy def foo(l): return __pythran_import_numpy.sum(l)""" self.check_ast(init, ref, ["pythran.optimizations.IterTransformation"]) def test_omp_forwarding(self): init = """ def foo(): a = 2 #omp parallel if 1: builtins.print(a) """ ref = """\ def foo(): a = 2 'omp parallel' if 1: builtins.print(a) return builtins.None""" self.check_ast(init, ref, ["pythran.optimizations.ForwardSubstitution"]) def test_omp_forwarding2(self): init = """ def foo(): #omp parallel if 1: a = 2 builtins.print(a) """ ref = """\ def foo(): 'omp parallel' if 1: pass builtins.print(2) return builtins.None""" self.check_ast(init, ref, ["pythran.optimizations.ForwardSubstitution"]) def test_omp_forwarding3(self): init = """ def foo(): #omp parallel if 1: a = 2 builtins.print(a) """ ref = """\ def foo(): 'omp parallel' if 1: a = 2 builtins.print(a) return builtins.None""" self.check_ast(init, ref, ["pythran.optimizations.ForwardSubstitution"]) def test_forwarding0(self): init = ''' def foo(x): for i in x: if i: j = i return j''' ref = init self.check_ast(init, ref, ["pythran.optimizations.ForwardSubstitution"]) def test_forwarding1(self): init = 'def f(i):\n while i:\n if i > 3: x=1; continue\n x=2\n return x' ref = 'def f(i):\n while i:\n if (i > 3):\n x = 1\n continue\n x = 2\n return x' self.check_ast(init, ref, ["pythran.optimizations.ForwardSubstitution"]) def test_full_unroll0(self): init = """ def full_unroll0(): k = [] for i,j in zip([1,2,3],[4,5,6]): k.append((i,j)) return k""" ref = '''def full_unroll0(): k = [] __tuple0 = (1, 4) j = __tuple0[1] i = __tuple0[0] builtins.list.append(k, (i, j)) __tuple0 = (2, 5) j = __tuple0[1] i = __tuple0[0] builtins.list.append(k, (i, j)) __tuple0 = (3, 6) j = __tuple0[1] i = __tuple0[0] builtins.list.append(k, (i, j)) return k''' self.check_ast(init, ref, ["pythran.optimizations.ConstantFolding", "pythran.optimizations.LoopFullUnrolling"]) def test_full_unroll1(self): self.run_test(""" def full_unroll1(): c = 0 for i in range(3): for j in range(3): for k in range(3): for l in range(3): c += 1 return c""", full_unroll1=[]) def test_deadcodeelimination(self): init = """ def bar(a): builtins.print(a) return 10 def foo(a): if 1 < bar(a): b = 2 return b""" ref = """\ def bar(a): builtins.print(a) return 10 def foo(a): (1 < bar(a)) return 2""" self.check_ast(init, ref, ["pythran.optimizations.ForwardSubstitution", "pythran.optimizations.DeadCodeElimination"]) def test_deadcodeelimination2(self): init = """ def foo(a): if 1 < max(a, 2): b = 2 return b""" ref = """def foo(a): return 2""" self.check_ast(init, ref, ["pythran.optimizations.ForwardSubstitution", "pythran.optimizations.DeadCodeElimination"]) def test_deadcodeelimination3(self): init = """ def bar(a): return a def foo(a): "omp flush" bar(a) return 2""" ref = """def bar(a): return a def foo(a): 'omp flush' pass return 2""" self.check_ast(init, ref, ["pythran.optimizations.DeadCodeElimination"]) def test_deadcodeelimination4(self): init = 'def noeffect(i): a=[];b=[a]; builtins.list.append(b[0],i); return 1' ref = 'def noeffect(i):\n return 1' self.check_ast(init, ref, ["pythran.optimizations.ForwardSubstitution", "pythran.optimizations.ConstantFolding", "pythran.optimizations.DeadCodeElimination"]) def test_patternmatching(self): init = """ def foo(a): return len(set(range(len(set(a)))))""" ref = """def foo(a): return builtins.pythran.len_set(builtins.range(builtins.pythran.len_set(a)))""" self.check_ast(init, ref, ["pythran.optimizations.PatternTransform"]) def test_patternmatching2(self): init = """ def foo(a): return reversed(range(len(set(a))))""" ref = """def foo(a): return builtins.range((builtins.pythran.len_set(a) - 1), (-1), (-1))""" self.check_ast(init, ref, ["pythran.optimizations.PatternTransform"]) def test_patternmatching3(self): init = """ def foo(a): return a * a""" ref = """def foo(a): return (a ** 2)""" self.check_ast(init, ref, ["pythran.optimizations.PatternTransform"]) def test_patternmatching4(self): init = """ def foo(a): return a ** .5""" ref = """import numpy as __pythran_import_numpy def foo(a): return __pythran_import_numpy.sqrt(a)""" self.check_ast(init, ref, ["pythran.optimizations.PatternTransform"]) def test_patternmatching5(self): init = """ def foo(a): return a ** (1./3.)""" ref = """import numpy as __pythran_import_numpy def foo(a): return __pythran_import_numpy.cbrt(a)""" self.check_ast(init, ref, ["pythran.optimizations.ConstantFolding", "pythran.optimizations.PatternTransform"]) def test_inline_builtins_broadcasting0(self): init = """ import numpy as np def foo(a): return np.array([a, 1]) == 1""" ref = """import numpy as __pythran_import_numpy def foo(a): return __pythran_import_numpy.array(((a == 1), (1 == 1)))""" self.check_ast(init, ref, ["pythran.optimizations.InlineBuiltins"]) def test_inline_builtins_broadcasting1(self): init = """ import numpy as np def foo(a): return np.asarray([a, 1]) + 1""" ref = """import numpy as __pythran_import_numpy def foo(a): return __pythran_import_numpy.array(((a + 1), (1 + 1)))""" self.check_ast(init, ref, ["pythran.optimizations.InlineBuiltins"]) def test_inline_builtins_broadcasting2(self): init = """ import numpy as np def foo(a): return - np.asarray([a, 1])""" ref = """import numpy as __pythran_import_numpy def foo(a): return __pythran_import_numpy.array(((- a), (- (1))))""" self.check_ast(init, ref, ["pythran.optimizations.InlineBuiltins"]) def test_inline_builtins_broadcasting3(self): init = """ import numpy as np def foo(a): return np.asarray([a, 1]) + (3, 3)""" ref = """import numpy as __pythran_import_numpy def foo(a): return __pythran_import_numpy.array(((a + 3), (1 + 3)))""" self.check_ast(init, ref, ["pythran.optimizations.InlineBuiltins"]) def test_patternmatching3(self): init = """ def foo(a): return a * a""" ref = """def foo(a): return (a ** 2)""" self.check_ast(init, ref, ["pythran.optimizations.PatternTransform"]) class TestConstantUnfolding(TestEnv): def test_constant_folding_int_literals(self): self.run_test("def constant_folding_int_literals(): return 1+2*3.5", constant_folding_int_literals=[]) def test_constant_folding_str_literals(self): self.run_test("def constant_folding_str_literals(): return \"1\"+'2'*3", constant_folding_str_literals=[]) def test_constant_folding_list_literals(self): self.run_test("def constant_folding_list_literals(): return [1]+[2]*3", constant_folding_list_literals=[]) def test_constant_folding_set_literals(self): self.run_test("def constant_folding_set_literals(): return {1,2,3,3}", constant_folding_set_literals=[]) def test_constant_folding_builtins(self): self.run_test("def constant_folding_builtins(): return list(map(len,zip(range(2), range(2))))", constant_folding_builtins=[]) def test_constant_folding_imported_functions(self): self.run_test("def constant_folding_imported_functions(): from math import cos ; return float(int(10*cos(1)))", constant_folding_imported_functions=[]) def test_constant_folding_list_method_calls(self): self.run_test("def foo(n): l=[] ; l.append(n) ; return l\ndef constant_folding_list_method_calls(n): return foo(n)", 1, constant_folding_list_method_calls=[int]) def test_constant_folding_complex_calls(self): self.run_test("def constant_folding_complex_calls(): return complex(1,1)", constant_folding_complex_calls=[]) def test_constant_folding_expansive_calls(self): self.run_test("def constant_folding_expansive_calls(): return list(range(2**6))", constant_folding_expansive_calls=[]) def test_constant_folding_too_expansive_calls(self): self.run_test("def constant_folding_too_expansive_calls(): return list(range(2**16))", constant_folding_too_expansive_calls=[]) def test_constant_folding_bool_array(self): self.run_test("def constant_folding_bool_array(): import numpy as np; return np.concatenate([np.array([True]),np.array([True])])", constant_folding_bool_array=[]) class TestAnalyses(TestEnv): def test_imported_ids_shadow_intrinsic(self): self.run_test("def imported_ids_shadow_intrinsic(range): return [ i*range for i in [1,2,3] ]", 2, imported_ids_shadow_intrinsic=[int]) def test_shadowed_variables(self): self.run_test("def shadowed_variables(a): b=1 ; b+=a ; a= 2 ; b+=a ; return a,b", 18, shadowed_variables=[int]) def test_decl_shadow_intrinsic(self): self.run_test("def decl_shadow_intrinsic(l): len=lambda l:1 ; return len(l)", [1,2,3], decl_shadow_intrinsic=[List[int]]) def test_used_def_chains(self): self.run_test("def use_def_chain(a):\n i=a\n for i in range(4):\n print(i)\n i=5.4\n print(i)\n break\n i = 4\n return i", 3, use_def_chain=[int]) def test_used_def_chains2(self): self.run_test("def use_def_chain2(a):\n i=a\n for i in range(4):\n print(i)\n i='lala'\n print(i)\n i = 4\n return i", 3, use_def_chain2=[int]) @unittest.skip("Variable defined in a branch in loops are not accepted.") def test_importedids(self): self.run_test("def importedids(a):\n i=a\n for i in range(4):\n if i==0:\n b = []\n else:\n b.append(i)\n return b", 3, importedids=[int]) def test_falsepoly(self): self.run_test("def falsepoly():\n i = 2\n if i:\n i='ok'\n else:\n i='lolo'\n return i", falsepoly=[]) def test_global_effects_unknown(self): code = ''' def bb(x): return x[0]() def ooo(a): def aa(): return a return aa, def global_effects_unknown(a): return bb(ooo(a))''' self.run_test(code, 1, global_effects_unknown=[int]) def test_argument_effects_unknown(self): code = ''' def int_datatype(n): return list, str, n def list_datatype(parent): def parser(value): return parent[0](value) def formatter(value): return parent[1](value) return parser, formatter def argument_effects_unknown(n): list_datatype(int_datatype(n))''' self.run_test(code, 1, argument_effects_unknown=[int]) def test_inlining_globals_side_effect(self): code = ''' import random r = random.random() def inlining_globals_side_effect(): return r == r == r ''' self.run_test(code, inlining_globals_side_effect=[]) def test_subscript_function_aliasing(self): code = ''' SP = 0x20 STX = 0x02 ETX = 0x03 def _div_tuple(base, div): a = base // div b = base % div return a, b def number_datatype(base, dc, fs=6): def parser(value): if not value.isdigit(): raise ValueError("Invalid number") value = int(value) ret = [] while value > 0: a, b = _div_tuple(value, len(base)) ret.insert(0, ord(base[b])) value = a ret = [ord('0')] * (dc - len(ret)) + ret ret = [SP] * (fs - len(ret)) + ret return ret def formatter(v): ret = 0 for a in [chr(c) for c in v][-dc:]: ret = ret * len(base) + base.index(a) return str(int(ret)) return parser, formatter def int_datatype(dc, fs=6): return number_datatype(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], dc, fs) def hex_datatype(dc, fs=6): return number_datatype(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'], dc, fs) simple_commands = [('aa', 107, int_datatype(4)), ('bb', 112, int_datatype(1)), ] str_commands = {c: (c, v, f) for c, v, f in simple_commands} def subscript_function_aliasing(id, ai, pfc, value): data = [0x0] * 16 _, pfc, fcts = str_commands[pfc] data[5:9] = int_datatype(4, 4)[0](str(pfc)) data[9:15] = fcts[0](value) return data''' self.run_test(code, 'aa', 2, 'bb', '3', subscript_function_aliasing=[str, int, str, str]) def test_range_simplify_jl(self): code = ''' import numpy as np silent = 0 def B(n): TS = 10 outSig = [] while n: outSamps = np.zeros((10, 2)) outSig.append(outSamps.copy()) outSamps = np.zeros((10, 2)) outSig.append(outSamps.copy()) return outSig, TS def range_simplify_jl(n): outSignal, TS = B(n) return (outSignal)''' self.run_test(code, 0, range_simplify_jl=[int]) def test_range_simplify_subscript(self): code = ''' def LooperMaster___init__(): self_userUseTempo = 1 self = [self_userUseTempo] return self def range_simplify_subscript(n): ML = LooperMaster___init__() ML[0] = n return ML''' self.run_test(code, 1, range_simplify_subscript=[int]) def test_insert_none0(self): code = ''' def insert_none0(x): for ii in range(len(x)): if x[ii]: return x[ii] else: return 0''' self.run_test(code, [], insert_none0=[List[int]]) pythran-0.10.0+ds2/pythran/tests/test_os.py000066400000000000000000000015501416264035500206310ustar00rootroot00000000000000from pythran.tests import TestEnv @TestEnv.module class TestOs(TestEnv): def test_os_path(self): self.run_test('def os_path(l): import os; return os.path.join(l)', "mypath", os_path=[str]) def test_os_path2(self): self.run_test('def os_path2(l): import os; return os.path.join("lili", l)', "mypath", os_path2=[str]) def test_os_path3(self): self.run_test('def os_path3(l): import os; return os.path.join("lili", l)', "/mypath", os_path3=[str]) def test_os_path4(self): self.run_test('def os_path4(l): import os; return os.path.join("lili/", l)', "mypath", os_path4=[str]) def test_os_path5(self): self.run_test('def os_path5(l): import os; return os.path.join("", l)', "mypath", os_path5=[str]) pythran-0.10.0+ds2/pythran/tests/test_random.py000066400000000000000000000120311416264035500214640ustar00rootroot00000000000000""" Check the random module behavior. """ from pythran.tests import TestEnv @TestEnv.module class TestRandom(TestEnv): """ Check implementation for usual random functions. Functions are: - random.random - random.gauss - random.uniform - random.expovariate - random.randrange - random.randint - random.sample - random.choice - random.seed """ def test_random(self): """ Check random.random is a uniform distributed generator. """ self.run_test(""" def random(n): from random import random s = sum(random() for x in range(n)) return abs(s / n - 0.5) < .05""", 10 ** 5, random=[int]) def test_gauss(self): """ Check random.gauss distribution. """ self.run_test(""" def gauss(n, mu, sigma): from random import gauss s = sum(gauss(mu,sigma) for x in range(n)) return abs(s / n - mu) / sigma < .05""", 10 ** 6, 5, 2, gauss=[int, int, int]) def test_uniform(self): """ Check uniform distribution between b and e. """ self.run_test(""" def uniform(n, b, e): from random import uniform s = sum(uniform(b, e) for x in range(n)) return abs(s / n - (b + e) * .5) < .05""", 10 ** 6, 5, 25, uniform=[int, int, int]) def test_expovariate(self): """ Check expovariate distribution. """ self.run_test(""" def expovariate(n, l): from random import expovariate s = sum(expovariate(l) for x in range(n)) return abs(s / n - 1 / l) < .05""", 10 ** 6, 5., expovariate=[int, float]) def test_randrange0(self): """ Check randrange with only end value. """ self.run_test(""" def randrange0(n): from random import randrange s = sum(randrange(n) for x in range(n)) return abs(s / float(n * n) - .5) < .05""", 10 ** 5, randrange0=[int]) def test_randrange1(self): """ Check randrange with begin and end. """ self.run_test(""" def randrange1(n): from random import randrange s = sum(randrange(-n, n + 1) for x in range(n)) return abs(s / float(n * n)) < .05""", 10 ** 5, randrange1=[int]) def test_randrange2(self): """ Check randrange step generated values. """ self.run_test(""" def randrange2(n): from random import randrange return all(randrange(3, n, 3) % 3 == 0 for x in range(n))""", 10 ** 4, randrange2=[int]) def test_randint(self): """ Check randint distribution. """ self.run_test(""" def randint(n): from random import randint s = sum(randint(0,100) for x in range(n)) return abs(s / float(n * 100) - 1 / 2.) < .05""", 10 ** 6, randint=[int]) def test_sample_(self): """ Check sample picked values distribution. """ self.run_test(""" def sample(n, k): from random import sample s = sum(sum(sample(range(100), k)) for x in range(n)) return abs(s / float(k * n) - 99 / 2.) < .05""", 10 ** 6, 4, sample=[int, int]) def test_shuffle1(self): """ Check shuffling with default random function. """ self.run_test(""" def shuffle1(n): from random import shuffle r = list(range(n)) shuffle(r) return r != list(range(n)) and sorted(r) == list(range(n))""", 10, shuffle1=[int]) def test_shuffle2(self): """ Check shuffling with custom function. """ self.run_test(""" def shuffle2(n): from random import shuffle r = list(range(n)) shuffle(r, lambda: 0) return r != list(range(n)) and sorted(r) == list(range(n))""", 10 ** 4, shuffle2=[int]) def test_shuffle3(self): """ Check shuffling with random function. """ self.run_test(""" def shuffle3(n): from random import shuffle, random r = list(range(n)) shuffle(r, random) return r != list(range(n)) and sorted(r) == list(range(n))""", 10 ** 4, shuffle3=[int]) def test_choice(self): """ Check choice picked values distribution. """ self.run_test(""" def choice(n): from random import choice s = sum(choice(range(100)) for x in range(n)) return abs(s / float(n) - 99 / 2.) < .05""", 10 ** 7, choice=[int]) def test_random_seed(self): """ Check seeded random generate always the same value. """ self.run_test(""" def random_seed(): from random import random, seed seed(1) a = random() seed(1) b = random() return a == b""", random_seed=[]) pythran-0.10.0+ds2/pythran/tests/test_rec.py000066400000000000000000000005721416264035500207640ustar00rootroot00000000000000from pythran.tests import TestEnv class TestBase(TestEnv): def test_rec0(self): self.run_test(""" def test_rec0(n): z = 1 if n > 1: z = n * test_rec0(n-1) return z""", 5, test_rec0=[int]) def test_rec1(self): self.run_test(""" def test_rec1(n): z = 1 while n > 1: z = n * test_rec1(n-1) n -= 1 return z""", 5, test_rec1=[int]) pythran-0.10.0+ds2/pythran/tests/test_rosetta.py000066400000000000000000000003631416264035500216720ustar00rootroot00000000000000import unittest from pythran.tests import TestFromDir import os class TestRosetta(TestFromDir): path = os.path.join(os.path.dirname(__file__),"rosetta") TestRosetta.populate(TestRosetta) if __name__ == '__main__': unittest.main() pythran-0.10.0+ds2/pythran/tests/test_scipy.py000066400000000000000000000114401416264035500213360ustar00rootroot00000000000000from pythran.typing import List, NDArray from pythran.tests import TestFromDir, TestEnv import os import numpy as np import platform # from http://www.scipy.org/Download , weave/example directory class TestScipy(TestFromDir): def test_laplace(self): code=""" def laplace(u,dx, dy): nx, ny=len(u), len(u[0]) for i in range(1, nx-1): for j in range(1, ny-1): u[i][j] = ((u[i-1][j] + u[i+1][j])*dy**2 + (u[i][j-1] + u[i][j+1])*dx**2)/(2.0*(dx**2 + dy**2)) """ self.run_test(code, [[0.1,0.2,0.3],[0.1,0.2,0.3],[0.1,0.2,0.3]], 0.01, 0.02, laplace=[List[List[float]], float, float]) def test_recursive_fibonnaci(self): code=""" def recursive_fibonnaci(a): if a <= 2: return 1 else: return recursive_fibonnaci(a-2) + recursive_fibonnaci(a-1) """ self.run_test(code, 5, recursive_fibonnaci=[int]) def test_iterative_fibonnaci(self): code=""" def iterative_fibonnaci(a): if a <= 2: return 1 last = next_to_last = 1 i = 2 while i < a: result = last + next_to_last next_to_last = last last = result i+=1 return result; """ self.run_test(code, 5, iterative_fibonnaci=[int]) def test_binary_search(self): code=""" def binary_search(seq, t): min = 0; max = len(seq) - 1 while 1: if max < min: return -1 m = (min + max) // 2 if seq[m] < t: min = m + 1 elif seq[m] > t: max = m - 1 else: return m """ self.run_test(code,[1,2,3,4,5,6,7,8,9], 4, binary_search=[List[int], int]) def test_ramp(self): code=""" def ramp(result, start, end): size=len(result) assert size > 1 step = (end-start)//(size-1) for i in range(size): result[i] = start + step*i """ self.run_test(code,[0. for x in range(10)], 1.5, 9.5, ramp=[List[float], float, float]) path = os.path.join(os.path.dirname(__file__), "scipy") TestScipy.populate(TestScipy) class TestPyData(TestFromDir): path = os.path.join(os.path.dirname(__file__), "pydata") TestPyData.populate(TestPyData) class TestScikitImage(TestFromDir): path = os.path.join(os.path.dirname(__file__), "scikit-image") TestPyData.populate(TestScikitImage) try: import scipy if not platform.machine().startswith('ppc64'): @TestEnv.module class TestScipySpecial(TestEnv): def test_jv_scalar(self): self.run_test(""" from scipy.special import jv def jv_scalar(v, x): return jv(v, x)""", 5, 1.414, jv_scalar=[int, float]) def test_spherical_jn_scalar(self): self.run_test(""" from scipy.special import spherical_jn def spherical_bessel_j_scalar(v, x): return spherical_jn(v, x)""", 5, 1.414, spherical_bessel_j_scalar=[int, float]) def test_spherical_jn_arg1d(self): self.run_test(""" from scipy.special import spherical_jn def spherical_bessel_j_1d(v, x): return spherical_jn(v, x)""", 5, np.array([1.0, 2.0, 3.0]), spherical_bessel_j_1d=[int, NDArray[float,:]]) def test_spherical_jn_order1d(self): self.run_test(""" from scipy.special import spherical_jn def spherical_bessel_j_order1d(v, x): return spherical_jn(v, x)""", np.array([1, 2, 3]), 5.0, spherical_bessel_j_order1d=[NDArray[int,:], float]) def test_spherical_jn_arg2d(self): self.run_test(""" from scipy.special import spherical_jn def spherical_bessel_j_2d(v, x): return spherical_jn(v, x)""", 5, np.array([[1.0, 2.0], [3.0, 4.0]]), spherical_bessel_j_2d=[int, NDArray[float,:,:]]) def test_binom_scalar(self): self.run_test(""" from scipy.special import binom def binom_scalar(v, x): return binom(v, x)""", 5, 4, binom_scalar=[int, int]) def test_binom_arg1d(self): self.run_test(""" from scipy.special import binom def binom_1d(v, x): return binom(v, x)""", 5, np.array([1, 2, 3]), binom_1d=[int, NDArray[int,:]]) except ImportError: pass pythran-0.10.0+ds2/pythran/tests/test_set.py000066400000000000000000000320361416264035500210060ustar00rootroot00000000000000from pythran.tests import TestEnv from pythran.typing import Set, List, Tuple class TestSet(TestEnv): def test_cpy_constructor(self): code=""" def are_equal(s1): s2 = set(s1) return s2 == s1 """ self.run_test(code, {'jack', 'sjoerd'}, are_equal=[Set[str]]) def test_in(self): self.run_test("def _in(a,b):\n return b in a", {'aze', 'qsd'},'qsd', _in=[Set[str],str]) def test_empty_in(self): self.run_test("def empty_in(b):\n return b in set()",'qsd', empty_in=[str]) def test_len(self): self.run_test("def _len(a):\n return len(a)", {'aze', 'qsd', 'azeqsd'}, _len=[Set[str]]) def test_disjoint(self): self.run_test("def _isdisjoint(a,b):\n return a.isdisjoint(b)", {1,3,2}, {7.,2.,5.}, _isdisjoint=[Set[int],Set[float]]) def test_operator_le(self): self.run_test("def _le(a,b):\n return a <= b", {1.,5.}, {1,2,5}, _le=[Set[float],Set[int]]) def test_issubset(self): self.run_test("def _issubset(a,b):\n return a.issubset(b)", {1.,5.}, {1,2,5}, _issubset=[Set[float],Set[int]]) def test_operator_lt(self): self.run_test("def _lt(a,b):\n return a < b", {1.,5.}, {1,2,5}, _lt=[Set[float],Set[int]]) def test_operator_ge(self): self.run_test("def _ge(a,b):\n return a >= b", {1.,5.}, {1,2,5}, _ge=[Set[float],Set[int]]) def test_issuperset(self): self.run_test("def _issuperset(a,b):\n return a.issuperset(b)", {1.,5.}, {1,2,5}, _issuperset=[Set[float],Set[int]]) def test_operator_gt(self): self.run_test("def _gt(a,b):\n return a > b", {1.,5.}, {1,2,5}, _gt=[Set[float],Set[int]]) def test_operator_is(self): self.run_test("def _is(a,b):\n return a is b", {1.,5.}, {1,2,5}, _is=[Set[float],Set[int]]) def test_clear(self): self.run_test("def _clear(a):\n a.clear()\n return a", {1.,5.}, _clear=[Set[float]]) def test_pop(self): self.run_test("def _pop(a):\n a.pop()\n return a", {1.,5.}, _pop=[Set[float]]) def test_remove(self): self.run_test("def _remove(a,b):\n a.remove(b)\n return a", {1,3}, 1., _remove=[Set[int], float]) def test_remove_strict(self): self.run_test("def _remove_strict(a,b):\n a.remove(b)\n return a <= {3} and a >= {3}", {1,3}, 1., _remove_strict=[Set[int], float]) def test_discard(self): self.run_test("def _discard(a ,b):\n a.discard(b)\n return a", {1,3}, 1., _discard=[Set[int],float]) def test_copy(self): self.run_test("def _copy(a):\n b=a.copy()\n return (a <= {3}) and (a >= {3}) and (not (a is b))", {1,3}, _copy=[Set[int]]) def test_fct_union(self): self.run_test("def _fct_union(b, c):\n a={1.}\n return a.union(b, c)", {1,3}, {1.,3.,4.,5.,6.} , _fct_union=[Set[int],Set[float]]) def test_fct_union_empty_set(self): self.run_test("def _fct_union_empty_set(b, c):\n a=set()\n return a.union(b, c)", {1,3}, {1.,3.,4.,5.,6.} , _fct_union_empty_set=[Set[int],Set[float]]) def test_fct_union_empty_set_list(self): self.run_test("def _fct_union_empty_set_list(b, c):\n a=set()\n return a.union(b, c)", {1,3}, [1.,3.,4.,5.,6.] , _fct_union_empty_set_list=[Set[int],List[float]]) def test_fct_union_list(self): self.run_test("def _fct_union_list(b, c):\n a={1.}\n return a.union(b, c)", [1,3], {1.,3.,4.,5.,6.} , _fct_union_list=[List[int],Set[float]]) def test_fct_union_1arg(self): self.run_test("def _fct_union_1arg(b):\n a={1.}\n return a.union(b)", {1,3,4,5,6}, _fct_union_1arg=[Set[int]]) def test_operator_union(self): self.run_test("def _operator_union(b, c):\n a={1.}\n return (a | b | c)", {1,3,4,5,6}, {1.,2.,4.}, _operator_union=[Set[int],Set[float]]) def test_update(self): self.run_test("def _update(b, c):\n a={1.}\n a.update(b, c)\n return a", {1,3}, {1.,3.,4.,5.,6.} , _update=[Set[int],Set[float]]) def test_update_list(self): self.run_test("def _update_list(b, c):\n a={1.}; a.update(b, c); return a", {1,3}, [1.,3.,4.,5.,6.] , _update_list=[Set[int],List[float]]) def test_update_range(self): self.run_test("def _update_range(b):\n a=set(); a.update(range(b)); return a", 3, _update_range=[int]) def test_update_map(self): self.run_test("def _update_map(b):\n a=set(); a.update((x**2 for x in range(b))); return a", 3, _update_map=[int]) def test_update_prod(self): self.run_test("def _update_prod(b):\n from itertools import product; a=set(); a.update(product(range(b), range(b))); return a", 3, _update_prod=[int]) def test_update_combinations(self): self.run_test("def _update_comb(b):\n from itertools import combinations; a=set(); a.update(combinations(range(b), 2)); return a", 3, _update_comb=[int]) def test_update_islice(self): self.run_test("def _update_islice(b):\n from itertools import islice; a=set(); a.update(islice(range(b), 1, 4)); return a", 8, _update_islice=[int]) def test_update_permutations(self): self.run_test("def _update_perm(b):\n from itertools import permutations; a=set(); a.update(permutations(range(b))); return a", 3, _update_perm=[int]) def test_update_repeat(self): self.run_test("def _update_rep(b):\n from itertools import repeat; a=set(); a.update(repeat(b, 2)); return a", 3, _update_rep=[int]) def test_update_empty_set_list(self): self.run_test("def _update_empty_set_list(b, c):\n a=set()\n a.update(b, c)\n return a", {1,3}, [1.,3.,4.,5.,6.] , _update_empty_set_list=[Set[int],List[float]]) def test_operator_update(self): self.run_test("def _operator_update(b, c):\n a={1.,10.}\n a |= b | c\n return a", {1,3,4,5,6}, {1.,2.,4.}, _operator_update=[Set[int],Set[float]]) def test_fct_intersection(self): self.run_test("def _fct_intersection(b, c):\n a={1.}\n return a.intersection(b,c)", {1,3,4,5,6}, {1.,2.,4.}, _fct_intersection=[Set[int],Set[float]]) def test_fct_intersection_empty_set(self): self.run_test("def _fct_intersection_empty_set(b, c):\n a=set()\n return a.intersection(b,c)", {1,3,4,5,6}, {1.,2.,4.}, _fct_intersection_empty_set=[Set[int],Set[float]]) def test_fct_intersection_list(self): self.run_test("def _fct_intersection_list(b, c):\n a={1.}\n return a.intersection(b,c)", {1,3,4,5,6}, [1.,2.,4.], _fct_intersection_list=[Set[int],List[float]]) def test_operator_intersection(self): self.run_test("def _operator_intersection(b, c):\n a={1.}\n return (a & b & c)", {1,3,4,5,6}, {1.,2.,4.}, _operator_intersection=[Set[int],Set[float]]) def test_fct_intersection_update(self): self.run_test("def _fct_intersection_update(b, c):\n a={1.,10.}\n return a.intersection_update(b,c)", {1,3,4,5,6}, {1.,2.,4.}, _fct_intersection_update=[Set[int],Set[float]]) def test_fct_intersection_update_empty_set(self): self.run_test("def _fct_intersection_update_empty_set(b, c):\n a=set()\n return a.intersection_update(b,c)", {1,3,4,5,6}, {1.,2.,4.}, _fct_intersection_update_empty_set=[Set[int],Set[float]]) def test_fct_intersection_empty_set_update(self): self.run_test("def _fct_intersection_empty_set_update(c):\n a={1}\n b=set()\n return a.intersection_update(b,c)", {1.,2.,4.}, _fct_intersection_empty_set_update=[Set[float]]) def test_fct_intersection_update_list(self): self.run_test("def _fct_intersection_update_list(b, c):\n a={1.,10.}\n return a.intersection_update(b,c)", [1,3,4,5,6], {1.,2.,4.}, _fct_intersection_update_list=[List[int],Set[float]]) def test_operator_intersection_update(self): self.run_test("def _operator_intersection_update(b, c):\n a={1.}\n a &= b & c\n return a", {1,3,4,5,6}, {1.,2.,4.}, _operator_intersection_update=[Set[int],Set[float]]) def test_operator_intersection_update_empty_set(self): """ Check intersection update on an empty set. """ self.run_test(""" def _operator_intersection_update_empty_set(b, c): a = set() a &= b & c return a""", {1, 3, 4, 5, 6}, {1., 2., 4.}, _operator_intersection_update_empty_set=[Set[int], Set[float]]) def test_fct_difference(self): self.run_test("def _fct_difference(b, c):\n a={1.,5.,10.}\n return a.difference(b,c)", {1,3,4,5,6}, {1.,2.,4.}, _fct_difference=[Set[int],Set[float]]) def test_fct_difference_empty_set(self): self.run_test("def _fct_difference_empty_set(b, c):\n a=set()\n return a.difference(b,c)", {1,3,4,5,6}, {1.,2.,4.}, _fct_difference_empty_set=[Set[int],Set[float]]) def test_fct_difference_list(self): self.run_test("def _fct_difference_list(b, c):\n a={1.,5.,10.}\n return a.difference(b,c)", [1,3,4,5,6], {1.,2.,4.}, _fct_difference_list=[List[int],Set[float]]) def test_operator_difference(self): self.run_test("def _operator_difference(b, c):\n a={1.}\n return (a - b - c)", {1,3,4,5,6}, {1.,2.,4.}, _operator_difference=[Set[int],Set[float]]) def test_operator_difference_1arg(self): self.run_test("def _operator_difference_1arg(b):\n a={1.,2.,5.}\n return (b - a)", {1,3,4,5,6}, _operator_difference_1arg=[Set[int]]) def test_fct_difference_update(self): self.run_test("def _fct_difference_update(b, c):\n a={1.,5.,10.}\n return a.difference_update(b,c)", {1,3,4,5,6}, {1.,2.,4.}, _fct_difference_update=[Set[int],Set[float]]) def test_fct_difference_update_empty_set(self): self.run_test("def _fct_difference_update_empty_set(b, c):\n a=set()\n return a.difference_update(b,c)", {1,3,4,5,6}, {1.,2.,4.}, _fct_difference_update_empty_set=[Set[int],Set[float]]) def test_fct_difference_update_list(self): self.run_test("def _fct_difference_update_list(b, c):\n a={1.,5.,10.}\n return a.difference_update(b,c)", {1,3,4,5,6}, [1.,2.,4.], _fct_difference_update_list=[Set[int],List[float]]) def test_operator_difference_update(self): self.run_test("def _operator_difference_update(b, c):\n a={1.}\n a -= b - c\n return a", {1,3,4,5,6}, {1.,2.,4.}, _operator_difference_update=[Set[int],Set[float]]) def test_fct_symmetric_difference(self): self.run_test("def _fct_symmetric_difference(b, c):\n return (b.symmetric_difference(c))", {1,3,6}, {1.,2.,5.}, _fct_symmetric_difference=[Set[int],Set[float]]) def test_fct_symmetric_difference_empty_set(self): self.run_test("def _fct_symmetric_difference_empty_set(c):\n b=set()\n return (b.symmetric_difference(c))", {1.,2.,5.}, _fct_symmetric_difference_empty_set=[Set[float]]) def test_fct_symmetric_difference_list(self): self.run_test("def _fct_symmetric_difference_list(b, c):\n return (b.symmetric_difference(c))", {1,3,6}, [1.,2.,5.], _fct_symmetric_difference_list=[Set[int],List[float]]) def test_operator_symmetric_difference(self): self.run_test("def _operator_symmetric_difference(b, c):\n return (b ^ c)", {1,3,6}, {1.,2.,5.}, _operator_symmetric_difference=[Set[int],Set[float]]) def test_fct_symmetric_difference_update(self): self.run_test("def _fct_symmetric_difference_update(b, c):\n return (c.symmetric_difference_update(b))", {1,3,6}, {1.,2.,5.}, _fct_symmetric_difference_update=[Set[int],Set[float]]) def test_fct_symmetric_difference_update_empty_set(self): self.run_test("def _fct_symmetric_difference_update_empty_set(b):\n c=set()\n return (c.symmetric_difference_update(b))", {1.,2.,5.}, _fct_symmetric_difference_update_empty_set=[Set[float]]) def test_fct_symmetric_difference_update2(self): self.run_test("def _fct_symmetric_difference_update2(b, c):\n return (b.symmetric_difference_update(c))", {1,3,6}, {1.,2.,5.}, _fct_symmetric_difference_update2=[Set[int],Set[float]]) def test_fct_symmetric_difference_update_list(self): self.run_test("def _fct_symmetric_difference_update_list(b, c):\n return (b.symmetric_difference_update(c))", {1,3,6}, [1.,2.,5.], _fct_symmetric_difference_update_list=[Set[int],List[float]]) def test_operator_symmetric_difference_update(self): self.run_test("def _operator_symmetric_difference_update(b, c):\n b ^= c\n return b", {1,3,6}, {1.,2.,5.}, _operator_symmetric_difference_update=[Set[int],Set[float]]) def test_operator_symmetric_difference_update2(self): self.run_test("def _operator_symmetric_difference_update2(b, c):\n c ^= b\n return c", {1,3,6}, {1.,2.,5.}, _operator_symmetric_difference_update2=[Set[int],Set[float]]) # Check if conflict between set.pop() & list.pop() def test_conflict_pop(self): self.run_test("def _conflict_pop(a,b):\n a.pop()\n b.pop()\n return len(a)+len(b)", {1.,5.}, [1,2], _conflict_pop=[Set[float],List[int]]) def test_set_to_bool_conversion(self): self.run_test("def set_to_bool_conversion(s, t): return (1 if s else 0), (t if t else set())", set(), {1, 2},set_to_bool_conversion=[Set[int], Set[int]]) def test_print_set(self): self.run_test("def print_set(s): return str(s)", {1, 2}, print_set=[Set[int]]) def test_print_empty_set(self): self.run_test("def print_empty_set(s): return str(s)", set(), print_empty_set=[Set[int]]) def test_set_of_tuple(self): self.run_test("def set_of_tuple(s): return set(s)", (1,2,2,3), set_of_tuple=[Tuple[int,int, int, int]]) pythran-0.10.0+ds2/pythran/tests/test_slice.py000066400000000000000000000722631416264035500213200ustar00rootroot00000000000000""" Module to test slice implementation. """ import numpy from pythran.typing import List, NDArray from pythran.tests import TestEnv class TestSlice(TestEnv): """ Unittest class for code using slices. We skip tests for None step as it is equivalent to 1. TODO : add tests for 1 == step (None as step) """ def test_empty_slices(self): code = 'def empty_slices(x): return x[100:], x[100::2]' self.run_test(code, numpy.arange(90), empty_slices=[NDArray[int,:]]) def test_slice_combination1(self): """ Check for "all none" combination. """ code = """ def slice_combination1(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step], a[::-4][begin:end:step], a[::4][begin:end:step], a[87::-4][begin:end:step], a[1::4][begin:end:step], a[-3::-4][begin:end:step], a[-89::4][begin:end:step], a[88:1:-4][begin:end:step], a[1:88:4][begin:end:step], a[-2:1:-4][begin:end:step], a[-89:88:4][begin:end:step], a[88:-88:-4][begin:end:step], a[2:-1:4][begin:end:step], a[-1:-88:-4][begin:end:step], a[-88:-1:4][begin:end:step], a[:1:-4][begin:end:step], a[:87:4][begin:end:step], a[:-87:-4][begin:end:step], a[:-3:4][begin:end:step]) """.format(begin=None, end=None, step=None) self.run_test(code, numpy.arange(90), slice_combination1=[NDArray[int,:]]) def test_slice_combination2(self): """ Check for positive step combination. """ code = """ def slice_combination2(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step], a[::-4][begin:end:step], a[::4][begin:end:step], a[87::-4][begin:end:step], a[1::4][begin:end:step], a[-3::-4][begin:end:step], a[-89::4][begin:end:step], a[88:1:-4][begin:end:step], a[1:88:4][begin:end:step], a[-2:1:-4][begin:end:step], a[-89:88:4][begin:end:step], a[88:-88:-4][begin:end:step], a[2:-1:4][begin:end:step], a[-1:-88:-4][begin:end:step], a[-88:-1:4][begin:end:step], a[:1:-4][begin:end:step], a[:87:4][begin:end:step], a[:-87:-4][begin:end:step], a[:-3:4][begin:end:step]) """.format(begin=None, end=None, step=2) self.run_test(code, numpy.arange(90), slice_combination2=[NDArray[int, :]]) def test_slice_combination3(self): """ Check for negative step combination. """ code = """ def slice_combination3(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step]) # Reversing values with not continuous step is not implemented # a[::-4][begin:end:step], # a[::4][begin:end:step], # a[87::-4][begin:end:step], # a[1::4][begin:end:step], # a[-3::-4][begin:end:step], # a[-89::4][begin:end:step], # a[88:1:-4][begin:end:step], # a[1:88:4][begin:end:step], # a[-2:1:-4][begin:end:step], # a[-89:88:4][begin:end:step], # a[88:-88:-4][begin:end:step], # a[2:-1:4][begin:end:step], # a[-1:-88:-4][begin:end:step], # a[-88:-1:4][begin:end:step], # a[:1:-4][begin:end:step], # a[:87:4][begin:end:step], # a[:-87:-4][begin:end:step], # a[:-3:4][begin:end:step]) """.format(begin=None, end=None, step=-2) self.run_test(code, numpy.arange(90), slice_combination3=[NDArray[int, :]]) def test_slice_combination4(self): """ Check for pos step/no begin/pos end combination. """ code = """ def slice_combination4(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step], a[::4][begin:end:step], a[87::-4][begin:end:step], a[1::4][begin:end:step], a[-3::-4][begin:end:step], a[-89::4][begin:end:step], a[88:1:-4][begin:end:step], a[1:88:4][begin:end:step], a[-2:1:-4][begin:end:step], a[-89:88:4][begin:end:step], a[88:-88:-4][begin:end:step], a[2:-1:4][begin:end:step], a[-1:-88:-4][begin:end:step], a[-88:-1:4][begin:end:step], a[:1:-4][begin:end:step], a[:87:4][begin:end:step], a[:-87:-4][begin:end:step], a[:-3:4][begin:end:step]) """.format(begin=None, end=7, step=2) self.run_test(code, numpy.arange(90), slice_combination4=[NDArray[int, :]]) def test_slice_combination5(self): """ Check for pos step/no begin/neg end combination. """ code = """ def slice_combination5(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step]) # Not implementer for negative end # a[::4][begin:end:step], # a[87::-4][begin:end:step], # a[1::4][begin:end:step], # a[-3::-4][begin:end:step], # a[-89::4][begin:end:step], # a[88:1:-4][begin:end:step], # a[1:88:4][begin:end:step], # a[-2:1:-4][begin:end:step], # a[-89:88:4][begin:end:step], # a[88:-88:-4][begin:end:step], # a[2:-1:4][begin:end:step], # a[-1:-88:-4][begin:end:step], # a[-88:-1:4][begin:end:step], # a[:1:-4][begin:end:step], # a[:87:4][begin:end:step], # a[:-87:-4][begin:end:step], # a[:-3:4][begin:end:step]) """.format(begin=None, end=-3, step=2) self.run_test(code, numpy.arange(90), slice_combination5=[NDArray[int, :]]) def test_slice_combination6(self): """ Check for pos step/pos begin/no end combination. """ code = """ def slice_combination6(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step], a[::4][begin:end:step], a[87::-4][begin:end:step], a[1::4][begin:end:step], a[-3::-4][begin:end:step], a[-89::4][begin:end:step], a[88:1:-4][begin:end:step], a[1:88:4][begin:end:step], a[-2:1:-4][begin:end:step], a[-89:88:4][begin:end:step], a[88:-88:-4][begin:end:step], a[2:-1:4][begin:end:step], a[-1:-88:-4][begin:end:step], a[-88:-1:4][begin:end:step], a[:1:-4][begin:end:step], a[:87:4][begin:end:step], a[:-87:-4][begin:end:step], a[:-3:4][begin:end:step]) """.format(begin=2, end=None, step=2) self.run_test(code, numpy.arange(90), slice_combination6=[NDArray[int, :]]) def test_slice_combination7(self): """ Check for pos step/pos begin/pos end combination. """ code = """ def slice_combination7(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step], a[::4][begin:end:step], a[87::-4][begin:end:step], a[1::4][begin:end:step], a[-3::-4][begin:end:step], a[-89::4][begin:end:step], a[88:1:-4][begin:end:step], a[1:88:4][begin:end:step], a[-2:1:-4][begin:end:step], a[-89:88:4][begin:end:step], a[88:-88:-4][begin:end:step], a[2:-1:4][begin:end:step], a[-1:-88:-4][begin:end:step], a[-88:-1:4][begin:end:step], a[:1:-4][begin:end:step], a[:87:4][begin:end:step], a[:-87:-4][begin:end:step], a[:-3:4][begin:end:step]) """.format(begin=2, end=9, step=2) self.run_test(code, numpy.arange(90), slice_combination7=[NDArray[int, :]]) def test_slice_combination8(self): """ Check for pos step/neg begin/no end combination. """ code = """ def slice_combination8(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step]) # Not implementer for negative begin # a[::4][begin:end:step], # a[87::-4][begin:end:step], # a[1::4][begin:end:step], # a[-3::-4][begin:end:step], # a[-89::4][begin:end:step], # a[88:1:-4][begin:end:step], # a[1:88:4][begin:end:step], # a[-2:1:-4][begin:end:step], # a[-89:88:4][begin:end:step], # a[88:-88:-4][begin:end:step], # a[2:-1:4][begin:end:step], # a[-1:-88:-4][begin:end:step], # a[-88:-1:4][begin:end:step], # a[:1:-4][begin:end:step], # a[:87:4][begin:end:step], # a[:-87:-4][begin:end:step], # a[:-3:4][begin:end:step]) """.format(begin=-10, end=None, step=2) self.run_test(code, numpy.arange(90), slice_combination8=[NDArray[int, :]]) def test_step1slice_combination1(self): """ Check for "all none" combination. """ code = """ def step1slice_combination1(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step], a[::-1][begin:end:step], a[::1][begin:end:step], a[87::-1][begin:end:step], a[1::1][begin:end:step], a[-3::-1][begin:end:step], a[-89::1][begin:end:step], a[88:1:-1][begin:end:step], a[1:88:1][begin:end:step], a[-2:1:-1][begin:end:step], a[-89:88:1][begin:end:step], a[88:-88:-1][begin:end:step], a[2:-1:1][begin:end:step], a[-1:-88:-1][begin:end:step], a[-88:-1:1][begin:end:step], a[:1:-1][begin:end:step], a[:87:1][begin:end:step], a[:-87:-1][begin:end:step], a[:-3:1][begin:end:step]) """.format(begin=None, end=None, step=None) self.run_test(code, numpy.arange(90), step1slice_combination1=[NDArray[int, :]]) def test_step1slice_combination2(self): """ Check for positive step combination. """ code = """ def step1slice_combination2(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step], a[::-1][begin:end:step], a[::1][begin:end:step], a[87::-1][begin:end:step], a[1::1][begin:end:step], a[-3::-1][begin:end:step], a[-89::1][begin:end:step], a[88:1:-1][begin:end:step], a[1:88:1][begin:end:step], a[-2:1:-1][begin:end:step], a[-89:88:1][begin:end:step], a[88:-88:-1][begin:end:step], a[2:-1:1][begin:end:step], a[-1:-88:-1][begin:end:step], a[-88:-1:1][begin:end:step], a[:1:-1][begin:end:step], a[:87:1][begin:end:step], a[:-87:-1][begin:end:step], a[:-3:1][begin:end:step]) """.format(begin=None, end=None, step=2) self.run_test(code, numpy.arange(90), step1slice_combination2=[NDArray[int, :]]) def test_step1slice_combination3(self): """ Check for negative step combination. """ code = """ def step1slice_combination3(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step], a[::-1][begin:end:step], a[::1][begin:end:step], a[87::-1][begin:end:step], a[1::1][begin:end:step], a[-3::-1][begin:end:step], a[-89::1][begin:end:step], a[88:1:-1][begin:end:step], a[1:88:1][begin:end:step], a[-2:1:-1][begin:end:step], a[-89:88:1][begin:end:step], a[88:-88:-1][begin:end:step], a[2:-1:1][begin:end:step], a[-1:-88:-1][begin:end:step], a[-2:-88:-1][begin:end:step], a[-88:-1:1][begin:end:step], a[:1:-1][begin:end:step], a[:87:1][begin:end:step], a[:-87:-1][begin:end:step], a[:-3:1][begin:end:step]) """.format(begin=None, end=None, step=-2) self.run_test(code, numpy.arange(90), step1slice_combination3=[NDArray[int, :]]) def test_step1slice_combination4(self): """ Check for pos step/no begin/pos end combination. """ code = """ def step1slice_combination4(a): begin = {begin} end = {end} step = {step} return(a[::][begin:end:step], a[::1][begin:end:step], a[87::-1][begin:end:step], a[1::1][begin:end:step], a[-3::-1][begin:end:step], a[-89::1][begin:end:step], a[88:1:-1][begin:end:step], a[1:88:1][begin:end:step], a[-2:1:-1][begin:end:step], a[-89:88:1][begin:end:step], a[88:-88:-1][begin:end:step], a[2:-1:1][begin:end:step], a[-1:-88:-1][begin:end:step], a[-88:-1:1][begin:end:step], a[:1:-1][begin:end:step], a[:87:1][begin:end:step], a[:-87:-1][begin:end:step], a[:-3:1][begin:end:step]) """.format(begin=None, end=7, step=2) self.run_test(code, numpy.arange(90), step1slice_combination4=[NDArray[int, :]]) def test_step1slice_combination5(self): """ Check for pos step/no begin/neg end combination. """ code = """ def step1slice_combination5(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step], a[::1][begin:end:step], a[87::-1][begin:end:step], a[1::1][begin:end:step], a[-3::-1][begin:end:step], a[-89::1][begin:end:step], a[88:1:-1][begin:end:step], a[1:88:1][begin:end:step], a[-2:1:-1][begin:end:step], a[-89:88:1][begin:end:step], a[88:-88:-1][begin:end:step], a[2:-1:1][begin:end:step], a[-1:-88:-1][begin:end:step], a[-88:-1:1][begin:end:step], a[:1:-1][begin:end:step], a[:87:1][begin:end:step], a[:-87:-1][begin:end:step], a[:-3:1][begin:end:step]) """.format(begin=None, end=-3, step=2) self.run_test(code, numpy.arange(90), step1slice_combination5=[NDArray[int, :]]) def test_step1slice_combination6(self): """ Check for pos step/pos begin/no end combination. """ code = """ def step1slice_combination6(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step], a[::1][begin:end:step], a[87::-1][begin:end:step], a[1::1][begin:end:step], a[-3::-1][begin:end:step], a[-89::1][begin:end:step], a[88:1:-1][begin:end:step], a[1:88:1][begin:end:step], a[-2:1:-1][begin:end:step], a[-89:88:1][begin:end:step], a[88:-88:-1][begin:end:step], a[2:-1:1][begin:end:step], a[-1:-88:-1][begin:end:step], a[-88:-1:1][begin:end:step], a[:1:-1][begin:end:step], a[:87:1][begin:end:step], a[:-87:-1][begin:end:step], a[:-3:1][begin:end:step]) """.format(begin=2, end=None, step=2) self.run_test(code, numpy.arange(90), step1slice_combination6=[NDArray[int, :]]) def test_step1slice_combination7(self): """ Check for pos step/pos begin/pos end combination. """ code = """ def step1slice_combination7(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step], a[::1][begin:end:step], a[87::-1][begin:end:step], a[1::1][begin:end:step], a[-3::-1][begin:end:step], a[-89::1][begin:end:step], a[88:1:-1][begin:end:step], a[1:88:1][begin:end:step], a[-2:1:-1][begin:end:step], a[-89:88:1][begin:end:step], a[88:-88:-1][begin:end:step], a[2:-1:1][begin:end:step], a[-1:-88:-1][begin:end:step], a[-88:-1:1][begin:end:step], a[:1:-1][begin:end:step], a[:87:1][begin:end:step], a[:-87:-1][begin:end:step], a[:-3:1][begin:end:step]) """.format(begin=2, end=9, step=2) self.run_test(code, numpy.arange(90), step1slice_combination7=[NDArray[int, :]]) def test_step1slice_combination8(self): """ Check for pos step/neg begin/no end combination. """ code = """ def step1slice_combination8(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step], a[::1][begin:end:step], a[87::-1][begin:end:step], a[1::1][begin:end:step], a[-3::-1][begin:end:step], a[-89::1][begin:end:step], a[88:1:-1][begin:end:step], a[1:88:1][begin:end:step], a[-2:1:-1][begin:end:step], a[-89:88:1][begin:end:step], a[88:-88:-1][begin:end:step], a[2:-1:1][begin:end:step], a[-1:-88:-1][begin:end:step], a[-88:-1:1][begin:end:step], a[:1:-1][begin:end:step], a[:87:1][begin:end:step], a[:-87:-1][begin:end:step], a[:-3:1][begin:end:step]) """.format(begin=-10, end=None, step=2) self.run_test(code, numpy.arange(90), step1slice_combination8=[NDArray[int, :]]) def test_step1slice_combination9(self): """ Check for neg step/no begin/pos end combination. """ code = """ def step1slice_combination9(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step], a[::1][begin:end:step], a[87::-1][begin:end:step], a[1::1][begin:end:step], a[-3::-1][begin:end:step], a[-89::1][begin:end:step], a[88:1:-1][begin:end:step], a[1:88:1][begin:end:step], a[-2:1:-1][begin:end:step], a[-89:88:1][begin:end:step], a[88:-88:-1][begin:end:step], a[2:-1:1][begin:end:step], a[-1:-88:-1][begin:end:step], a[-88:-1:1][begin:end:step], a[:1:-1][begin:end:step], a[:87:1][begin:end:step], a[:-87:-1][begin:end:step], a[:-3:1][begin:end:step]) """.format(begin=None, end=2, step=-2) self.run_test(code, numpy.arange(90), step1slice_combination9=[NDArray[int, :]]) def test_step1slice_combination10(self): """ Check for neg step/no begin/neg end combination. """ code = """ def step1slice_combination10(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step], a[::1][begin:end:step], a[87::-1][begin:end:step], a[1::1][begin:end:step], a[-3::-1][begin:end:step], a[-89::1][begin:end:step], a[88:1:-1][begin:end:step], a[1:88:1][begin:end:step], a[-2:1:-1][begin:end:step], a[-89:88:1][begin:end:step], a[88:-88:-1][begin:end:step], a[2:-1:1][begin:end:step], a[-1:-88:-1][begin:end:step], a[-88:-1:1][begin:end:step], a[:1:-1][begin:end:step], a[:87:1][begin:end:step], a[:-87:-1][begin:end:step], a[:-3:1][begin:end:step]) """.format(begin=None, end=-10, step=-2) self.run_test(code, numpy.arange(90), step1slice_combination10=[NDArray[int, :]]) def test_step1slice_combination11(self): """ Check for neg step/pos begin/neg end combination. """ code = """ def step1slice_combination11(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step], a[::1][begin:end:step], a[87::-1][begin:end:step], a[1::1][begin:end:step], a[-3::-1][begin:end:step], a[-89::1][begin:end:step], a[88:1:-1][begin:end:step], a[1:88:1][begin:end:step], a[-2:1:-1][begin:end:step], a[-89:88:1][begin:end:step], a[88:-88:-1][begin:end:step], a[2:-1:1][begin:end:step], a[-1:-88:-1][begin:end:step], a[-88:-1:1][begin:end:step], a[:1:-1][begin:end:step], a[:87:1][begin:end:step], a[:-87:-1][begin:end:step], a[:-3:1][begin:end:step]) """.format(begin=85, end=-10, step=-2) self.run_test(code, numpy.arange(90), step1slice_combination11=[NDArray[int, :]]) def test_step1slice_combination12(self): """ Check for neg step/pos begin/no end combination. """ code = """ def step1slice_combination12(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step], a[::1][begin:end:step], a[87::-1][begin:end:step], a[1::1][begin:end:step], a[-3::-1][begin:end:step], a[-89::1][begin:end:step], a[88:1:-1][begin:end:step], a[1:88:1][begin:end:step], a[-2:1:-1][begin:end:step], a[-89:88:1][begin:end:step], a[88:-88:-1][begin:end:step], a[2:-1:1][begin:end:step], a[-1:-88:-1][begin:end:step], a[-88:-1:1][begin:end:step], a[:1:-1][begin:end:step], a[:87:1][begin:end:step], a[:-87:-1][begin:end:step], a[:-3:1][begin:end:step]) """.format(begin=85, end=None, step=-2) self.run_test(code, numpy.arange(90), step1slice_combination12=[NDArray[int, :]]) def test_step1slice_combination13(self): """ Check for neg step/pos begin/pos end combination. """ code = """ def step1slice_combination13(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step], a[::1][begin:end:step], a[87::-1][begin:end:step], a[1::1][begin:end:step], a[-3::-1][begin:end:step], a[-89::1][begin:end:step], a[88:1:-1][begin:end:step], a[1:88:1][begin:end:step], a[-2:1:-1][begin:end:step], a[-89:88:1][begin:end:step], a[88:-88:-1][begin:end:step], a[2:-1:1][begin:end:step], a[-1:-88:-1][begin:end:step], a[-88:-1:1][begin:end:step], a[:1:-1][begin:end:step], a[:87:1][begin:end:step], a[:-87:-1][begin:end:step], a[:-3:1][begin:end:step]) """.format(begin=85, end=3, step=-2) self.run_test(code, numpy.arange(90), step1slice_combination13=[NDArray[int, :]]) def test_step1slice_combination14(self): """ Check for pos step/neg begin/no end combination. """ code = """ def step1slice_combination14(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step], a[::1][begin:end:step], a[87::-1][begin:end:step], a[1::1][begin:end:step], a[-3::-1][begin:end:step], a[-89::1][begin:end:step], a[88:1:-1][begin:end:step], a[1:88:1][begin:end:step], a[-2:1:-1][begin:end:step], a[-89:88:1][begin:end:step], a[88:-88:-1][begin:end:step], a[2:-1:1][begin:end:step], a[-1:-88:-1][begin:end:step], a[-88:-1:1][begin:end:step], a[:1:-1][begin:end:step], a[:87:1][begin:end:step], a[:-87:-1][begin:end:step], a[:-3:1][begin:end:step]) """.format(begin=-3, end=None, step=-2) self.run_test(code, numpy.arange(90), step1slice_combination14=[NDArray[int, :]]) def test_step1slice_combination15(self): """ Check for neg step/neg begin/pos end combination. """ code = """ def step1slice_combination15(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step], a[::1][begin:end:step], a[87::-1][begin:end:step], a[1::1][begin:end:step], a[-3::-1][begin:end:step], a[-89::1][begin:end:step], a[88:1:-1][begin:end:step], a[1:88:1][begin:end:step], a[-2:1:-1][begin:end:step], a[-89:88:1][begin:end:step], a[88:-88:-1][begin:end:step], a[2:-1:1][begin:end:step], a[-1:-88:-1][begin:end:step], a[-88:-1:1][begin:end:step], a[:1:-1][begin:end:step], a[:87:1][begin:end:step], a[:-87:-1][begin:end:step], a[:-3:1][begin:end:step]) """.format(begin=-3, end=4, step=-2) self.run_test(code, numpy.arange(90), step1slice_combination15=[NDArray[int, :]]) def test_step1slice_combination16(self): """ Check for neg step/neg begin/neg end combination. """ code = """ def step1slice_combination16(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step], a[::1][begin:end:step], a[87::-1][begin:end:step], a[1::1][begin:end:step], a[-3::-1][begin:end:step], a[-89::1][begin:end:step], a[88:1:-1][begin:end:step], a[1:88:1][begin:end:step], a[-2:1:-1][begin:end:step], a[-89:88:1][begin:end:step], a[88:-88:-1][begin:end:step], a[2:-1:1][begin:end:step], a[-1:-88:-1][begin:end:step], a[-88:-1:1][begin:end:step], a[:1:-1][begin:end:step], a[:87:1][begin:end:step], a[:-87:-1][begin:end:step], a[:-3:1][begin:end:step]) """.format(begin=-3, end=-10, step=-2) self.run_test(code, numpy.arange(90), step1slice_combination16=[NDArray[int, :]]) def test_step1slice_combination17(self): """ Check for pos step/pos begin/neg end combination. """ code = """ def step1slice_combination17(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step], a[::1][begin:end:step], a[87::-1][begin:end:step], a[1::1][begin:end:step], a[-3::-1][begin:end:step], a[-89::1][begin:end:step], a[88:1:-1][begin:end:step], a[1:88:1][begin:end:step], a[-2:1:-1][begin:end:step], a[-89:88:1][begin:end:step], a[88:-88:-1][begin:end:step], a[2:-1:1][begin:end:step], a[-1:-88:-1][begin:end:step], a[-88:-1:1][begin:end:step], a[:1:-1][begin:end:step], a[:87:1][begin:end:step], a[:-87:-1][begin:end:step], a[:-3:1][begin:end:step]) """.format(begin=3, end=-10, step=2) self.run_test(code, numpy.arange(90), step1slice_combination17=[NDArray[int, :]]) def test_step1slice_combination18(self): """ Check for pos step/pos begin/neg end combination. """ code = """ def step1slice_combination18(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step], a[::1][begin:end:step], a[87::-1][begin:end:step], a[1::1][begin:end:step], a[-3::-1][begin:end:step], a[-89::1][begin:end:step], a[88:1:-1][begin:end:step], a[1:88:1][begin:end:step], a[-2:1:-1][begin:end:step], a[-89:88:1][begin:end:step], a[88:-88:-1][begin:end:step], a[2:-1:1][begin:end:step], a[-1:-88:-1][begin:end:step], a[-88:-1:1][begin:end:step], a[:1:-1][begin:end:step], a[:87:1][begin:end:step], a[:-87:-1][begin:end:step], a[:-3:1][begin:end:step]) """.format(begin=-80, end=80, step=2) self.run_test(code, numpy.arange(90), step1slice_combination18=[NDArray[int, :]]) def test_step1slice_combination19(self): """ Check for pos step/neg begin/neg end combination. """ code = """ def step1slice_combination19(a): begin = {begin} end = {end} step = {step} return (a[::][begin:end:step], a[::1][begin:end:step], a[87::-1][begin:end:step], a[1::1][begin:end:step], a[-3::-1][begin:end:step], a[-89::1][begin:end:step], a[88:1:-1][begin:end:step], a[1:88:1][begin:end:step], a[-2:1:-1][begin:end:step], a[-89:88:1][begin:end:step], a[88:-88:-1][begin:end:step], a[2:-1:1][begin:end:step], a[-1:-88:-1][begin:end:step], a[-88:-1:1][begin:end:step], a[:1:-1][begin:end:step], a[:87:1][begin:end:step], a[:-87:-1][begin:end:step], a[:-3:1][begin:end:step]) """.format(begin=-80, end=-2, step=2) self.run_test(code, numpy.arange(90), step1slice_combination19=[NDArray[int, :]]) def test_slice_transpose0(self): code = ''' import numpy as np def slice_transpose0(n): base = np.zeros((16, n)).T slice1 = base[:10, 10:] # should have shape (10, 6) return slice1''' self.run_test(code, 16, slice_transpose0=[int]) pythran-0.10.0+ds2/pythran/tests/test_spec_parser.py000066400000000000000000000076321416264035500225250ustar00rootroot00000000000000import unittest import pythran import os.path #pythran export a((float,(int,uintp),str list) list list) #pythran export a(str) #pythran export a( (str,str), int, intp list list) #pythran export a( float set ) #pythran export a( bool:str dict ) #pythran export a( float ) #pythran export a( int8[] ) #pythran export a( int8[][] order (F)) #pythran export a( byte ) #pythran export a0( uint8 ) #pythran export a1( int16 ) #pythran export a2( uint16 ) #pythran export a3( int32 ) #pythran export a4( uint32 ) #pythran export a5( int64 ) #pythran export a6( uint64 ) #pythran export a7( float32 ) #pythran export a8( float64 ) #pythran export a9( complex64 ) #pythran export a10( complex128 ) #pythran export a( int8 set ) #pythran export b( int8 set? ) #pythran export a( uint8 list) #pythran export a( int16 [], slice) #pythran export a( uint16 [][] order(C)) #pythran export a( uint16 [::][]) #pythran export a( uint16 [:,:,:]) #pythran export a( uint16 [:,::,:]) #pythran export a( uint16 [,,,,]) #pythran export a( (int32, ( uint32 , int64 ) ) ) #pythran export a( uint64:float32 dict ) #pythran export a( float64, complex64, complex128 ) class TestSpecParser(unittest.TestCase): def test_parser(self): real_path = os.path.splitext(os.path.realpath(__file__))[0]+".py" with open(real_path) as fd: print(pythran.spec_parser(fd.read())) def test_invalid_specs0(self): code = '#pythran export foo()\ndef foo(n): return n' with self.assertRaises(pythran.syntax.PythranSyntaxError): pythran.compile_pythrancode("dumber", code) def test_invalid_specs1(self): code = '#pythran export boo(int)\ndef boo(): pass' with self.assertRaises(pythran.syntax.PythranSyntaxError): pythran.compile_pythrancode("dumber", code) def test_invalid_specs2(self): code = '#pythran export bar(int)\ndef foo(): pass' with self.assertRaises(pythran.syntax.PythranSyntaxError): pythran.compile_pythrancode("dumber", code) def test_invalid_specs3(self): code = '#pythran export bar(int, int?, int)\ndef bar(x, y=1, z=1): pass' with self.assertRaises(pythran.syntax.PythranSyntaxError): pythran.compile_pythrancode("dumber", code) def test_multiline_spec0(self): code = ''' #pythran export foo( # ) def foo(): return ''' self.assertTrue(pythran.spec_parser(code)) def test_multiline_spec1(self): code = ''' #pythran export foo(int #, int # ) def foo(i,j): return ''' self.assertTrue(pythran.spec_parser(code)) def test_multiline_spec2(self): code = ''' # pythran export foo(int, # float #, int # ) def foo(i,j,k): return ''' self.assertTrue(pythran.spec_parser(code)) def test_crappy_spec0(self): code = ''' # pythran export soo(int) this is an int test def soo(i): return ''' self.assertTrue(pythran.spec_parser(code)) def test_crappy_spec1(self): code = ''' # pythran export poo(int) #this is a pythran export test def poo(i): return ''' self.assertTrue(pythran.spec_parser(code)) def test_middle_spec0(self): code = ''' def too(i): return # pythran export too(int) #this is a pythran export test def bar(i): return ''' self.assertTrue(pythran.spec_parser(code)) def test_middle_spec1(self): code = ''' def zoo(i): return #this is a pythran export test # pythran export zoo(int) #this is an export test # pythran export zoo(str) def bar(i): return ''' self.assertEquals(len(pythran.spec_parser(code).functions), 1) self.assertEquals(len(pythran.spec_parser(code).functions['zoo']), 2) def test_var_export0(self): code = ''' # pythran export coo coo = 1 ''' self.assertTrue(pythran.spec_parser(code)) pythran-0.10.0+ds2/pythran/tests/test_str.py000066400000000000000000000313021416264035500210160ustar00rootroot00000000000000from pythran.tests import TestEnv from pythran.typing import List import unittest class TestStr(TestEnv): def test_str_startswith0(self): self.run_test("def str_startswith0(s0, s1): return s0.startswith(s1)", "barbapapa", "barba", str_startswith0=[str, str]) self.run_test("def str_startswith0a(s0, s1): return s0[0].startswith(s1)", "barbapapa", "barba", str_startswith0a=[str, str]) def test_str_startswith1(self): self.run_test("def str_startswith1(s0, s1): return s0.startswith(s1)", "barbapapa", "barbi", str_startswith1=[str, str]) self.run_test("def str_startswith1a(s0, s1): return s0[0].startswith(s1)", "barbapapa", "barbi", str_startswith1a=[str, str]) def test_str_endswith0(self): self.run_test("def str_endswith0(s0, s1): return s0.endswith(s1)", "barbapapa", "papa", str_endswith0=[str, str]) self.run_test("def str_endswith0a(s0, s1): return s0[0].endswith(s1)", "barbapapa", "papa", str_endswith0a=[str, str]) def test_str_endswith1(self): self.run_test("def str_endswith1(s0, s1): return s0.endswith(s1)", "barbapapa", "papy", str_endswith1=[str, str]) self.run_test("def str_endswith1a(s0, s1): return s0[0].endswith(s1)", "barbapapa", "papy", str_endswith1a=[str, str]) def test_str_empty(self): self.run_test("def str_empty(s0): return '>o_/' if s0 else '0x0'", "", str_empty=[str]) def test_str_failed_conversion(self): self.run_test("def str_failed_conversion(s):\n try: return int(s)\n except: return 42", "prout", str_failed_conversion=[str]) self.run_test("def str_failed_conversion_a(s):\n try: return int(s[0])\n except: return 42", "prout", str_failed_conversion_a=[str]) def test_str_replace0(self): self.run_test("def str_replace0(s): return s.replace('er', 'rer')", "parler", str_replace0=[str]) self.run_test("def str_replace0a(s): return s.replace('er', 'r')", "parler", str_replace0a=[str]) self.run_test("def str_replace0b(s): return s[0].replace('er', 'r')", "parler", str_replace0b=[str]) def test_str_replace1(self): self.run_test("def str_replace1(s): return s.replace('er', 'rer', 1)", "erlang manger dessert", str_replace1=[str]) self.run_test("def str_replace1a(s): return s.replace('er', 'r', 1)", "erlang manger dessert", str_replace1a=[str]) self.run_test("def str_replace1b(s): return s[0].replace('er', 'r', 1)", "erlang manger dessert", str_replace1b=[str]) def test_str_replace2(self): self.run_test("def str_replace2(s): return s.replace('', 'du vide surgit rien', 1)", "j aime les moulinettes a fromage", str_replace2=[str]) self.run_test("def str_replace2a(s): return s[0].replace('', 'du vide surgit rien', 1)", "j aime les moulinettes a fromage", str_replace2a=[str]) def test_str_ascii_letters(self): self.run_test("def str_ascii_letters(): import string; return string.ascii_letters", str_ascii_letters=[]) def test_str_ascii_lowercase(self): self.run_test("def str_ascii_lowercase(): import string; return string.ascii_lowercase", str_ascii_lowercase=[]) def test_str_ascii_uppercase(self): self.run_test("def str_ascii_uppercase(): import string; return string.ascii_uppercase", str_ascii_uppercase=[]) def test_str_digits(self): self.run_test("def str_digits(): import string; return string.digits", str_digits=[]) def test_str_hexdigits(self): self.run_test("def str_hexdigits(): import string; return string.hexdigits", str_hexdigits=[]) def test_str_octdigits(self): self.run_test("def str_octdigits(): import string; return string.octdigits", str_octdigits=[]) def test_str_lower(self): self.run_test("def str_lower(s): return s.lower()", "ThiS iS a TeST", str_lower=[str]) self.run_test("def str_lower_a(s): return s[0].lower()", "ThiS iS a TeST", str_lower_a=[str]) def test_str_upper(self): self.run_test("def str_upper(s): return s.upper()", "ThiS iS a TeST", str_upper=[str]) self.run_test("def str_upper_a(s): return s[0].upper()", "ThiS iS a TeST", str_upper_a=[str]) def test_str_capitalize(self): self.run_test("def str_capitalize(s): return s.capitalize()", "thiS iS a TeST", str_capitalize=[str]) self.run_test("def str_capitalize_a(s): return s[0].capitalize()", "thiS iS a TeST", str_capitalize_a=[str]) def test_str_strip(self): self.run_test("def str_strip(s): return s.strip()", " ThiS iS a TeST ", str_strip=[str]) self.run_test("def str_strip_a(s): return s[0].strip()", " ThiS iS a TeST ", str_strip_a=[str]) def test_str_strip2(self): self.run_test("def str_strip2(s): return s.strip(\"TSih\")", "ThiS iS a TeST", str_strip2=[str]) def test_str_lstrip(self): self.run_test("def str_lstrip(s): return s.lstrip()", " ThiS iS a TeST ", str_lstrip=[str]) self.run_test("def str_lstrip_a(s): return s[0].lstrip()", " ThiS iS a TeST ", str_lstrip_a=[str]) def test_str_lstrip2(self): self.run_test("def str_lstrip2(s): return s.lstrip(\"TSih\")", "ThiS iS a TeST", str_lstrip2=[str]) def test_str_rstrip(self): self.run_test("def str_rstrip(s): return s.rstrip()", " ThiS iS a TeST ", str_rstrip=[str]) self.run_test("def str_rstrip_a(s): return s[0].rstrip()", " ThiS iS a TeST ", str_rstrip_a=[str]) def test_str_rstrip2(self): self.run_test("def str_rstrip2(s): return s.rstrip(\"TSih\")", "ThiS iS a TeST", str_rstrip2=[str]) def test_str_split1(self): self.run_test("def str_split1(s): return s.split()", "ThiS iS a TeST", str_split1=[str]) self.run_test("def str_split1_a(s): return s[0].split()", "ThiS iS a TeST", str_split1_a=[str]) def test_str_split2(self): self.run_test("def str_split2(s): return s.split('S')", "ThiS iS a TeST", str_split2=[str]) def test_str_split3(self): self.run_test("def str_split3(s): return s.split()", "ThiS iS\t a TeST", str_split3=[str]) def test_str_split4(self): self.run_test("def str_split4(s): return s.split()", "", str_split4=[str]) def test_str_format(self): self.run_test("def str_format(a): return '%.2f %.2f' % (a, a)", 43.23, str_format=[float]) def test_str_join0(self): self.run_test("def str_join0(): a = ['1'] ; a.pop() ; return '+e+'.join(a)", str_join0=[]) self.run_test("def str_join0a(): a = ['1'] ; a.pop() ; return 'e'.join(a)", str_join0a=[]) def test_str_join1(self): self.run_test("def str_join1(): a = ['l', 'l'] ; return 'o'.join(a)", str_join1=[]) def test_str_join2(self): self.run_test("def str_join2(a): return 'o'.join(filter(len, a))", ['l', 'l'], str_join2=[List[str]]) def test_str_find0(self): self.run_test("def str_find0(s): return s.find('pop')", "popop", str_find0=[str]) self.run_test("def str_find0a(s): return s.find('p')", "popop", str_find0a=[str]) self.run_test("def str_find0b(s): return s[0].find('p')", "popop", str_find0b=[str]) def test_str_find1(self): self.run_test("def str_find1(s): return s.find('pap')", "popop", str_find1=[str]) def test_str_reversal(self): self.run_test("def str_reversal(s): return list(map(ord,reversed(s)))", "dear", str_reversal=[str]) self.run_test("def str_reversal_a(s): return list(map(ord,reversed(s[0])))", "dear", str_reversal_a=[str]) def test_str_substring_iteration(self): self.run_test("def str_substring_iteration(s): return list(map(ord, s[1:-1]))", "pythran", str_substring_iteration=[str]) def test_str_isalpha(self): self.run_test("def str_isalpha(s, t, u): return s.isalpha(), t.isalpha(), u.isalpha()", "e", "1", "", str_isalpha=[str,str, str]) self.run_test("def str_isalpha_a(s, t): return s[0].isalpha(), t[0].isalpha()", "e", "1", str_isalpha_a=[str,str]) def test_str_isdigit(self): self.run_test("def str_isdigit(s, t, u): return s.isdigit(), t.isdigit(), u.isdigit()", "e", "1", "", str_isdigit=[str,str, str]) self.run_test("def str_isdigit_a(s, t): return s[0].isdigit(), t[0].isdigit()", "e", "1", str_isdigit_a=[str,str]) def test_str_count(self): self.run_test("def str_count(s, t, u, v): return s.count(t), s.count(u), s.count(v)", "pythran is good for health", "py", "niet", "t", str_count=[str, str, str, str]) self.run_test("def str_count_a(s, t, u, v): return s[0].count(t), s[1].count(u), s.count(v[0])", "pythran is good for health", "py", "niet", "t", str_count_a=[str, str, str, str]) def test_str_literal_cmp(self): code = ''' def eee(a, i): if a == "ABCD": return 2 * i elif a != "ZDSD": return 1 * i return 3 * i def str_literal_cmp(a, i): if a == "EEE": return eee("ZZZ", i), eee("ABCD", i) else: return eee("YYY", i), 3''' self.run_test(code, "EEE", 2, str_literal_cmp=[str, int]) def test_str_literal_cmp1(self): code = ''' def eee(a, i): if a > "ABCD": return 2 * i elif a <= "ZDSD": return 1 * i return 3 * i def str_literal_cmp1(a, i): if a == "EEE": return eee("ZZZ", i), eee("ABCD", i) else: return eee("YYY", i), 3''' self.run_test(code, "EEE", 2, str_literal_cmp1=[str, int]) def test_str_literal_cmp2(self): code = ''' def eee(a, i): if a < "ABCD": return 2 * i elif a >= "ZDSD": return 1 * i return 3 * i def str_literal_cmp2(a, i): if a == "EEE": return eee("ZZZ", i), eee("ABCD", i) else: return eee("YYY", i), 3''' self.run_test(code, "EEE", 2, str_literal_cmp2=[str, int]) def test_str_literal_add(self): code = ''' def eee(a, i): if i > 0: return a + "ABCD" else: return a + "BCD" def str_literal_add(a, i): if a == "EEE": return eee("ZZZ", i), eee("ABCD", i) else: return eee("YYY", i), "3"''' self.run_test(code, "EEE", 2, str_literal_add=[str, int]) def test_str_literal_mult(self): code = ''' def eee(a, i): if i > 0: return a * i else: return a * 3 def str_literal_mult(a, i): if a == "EEE": return eee("ZZZ", i), eee("ABCD", i) else: return eee("YYY", i), "3"''' self.run_test(code, "EEE", 2, str_literal_mult=[str, int]) def test_str_float(self): self.run_test("def str_float(s): return float(s)", "0.000012", str_float=[str]) def test_str_numpy_float32(self): self.run_test("def str_numpy_float32(s): import numpy; return numpy.float32(s)", "0.000012", str_numpy_float32=[str]) def test_str_numpy_float64(self): self.run_test("def str_numpy_float64(s): import numpy; return numpy.float64(s)", "0.000012", str_numpy_float64=[str]) def test_str_int(self): self.run_test("def str_int(s): return int(s)", "12", str_int=[str]) def test_str_id(self): self.run_test("def str_id(x): return id(x) != 0", "hello", str_id=[str]) def test_str_slice_assign(self): self.run_test(''' def str_slice_assign(s, c): if s.startswith(c): s = s[len(c):]; return s''', "LEFT-B6", "LEFT-", str_slice_assign=[str, str]) def test_str_slice_assign2(self): self.run_test(''' def sample_datatype(value): definitions = [ ('LEFT-', 1), ('RIGHT-', 2), ('', 3) ] plate_number = None for definition in definitions: s, n = definition if value.startswith(s): plate_number = n value = value[len(s):] break if plate_number is None: # Comment for make it works raise ValueError("Invalid value") return None def str_slice_assign2(s1): sample_datatype(s1) return s1''', "LEFT-B6", str_slice_assign2=[str]) pythran-0.10.0+ds2/pythran/tests/test_submodules.py000066400000000000000000000017641416264035500224010ustar00rootroot00000000000000from pythran.tests import TestEnv class TestSubmodules(TestEnv): def test_module_import(self): self.run_test('def module_import(l): import os; return os.path.join("lili", l)', "mypath", module_import=[str]) def test_submodule_import(self): self.run_test('def submodule_import(l): import os.path; return os.path.join("lili", l)', "mypath", submodule_import=[str]) def test_import_alias(self): self.run_test('def import_alias(l): import os.path as p; return p.join("lili", l)', "mypath", import_alias=[str]) def test_function_import(self): self.run_test('def function_import(l): from os.path import join; return join("lili", l)', "mypath", function_import=[str]) def test_function_import_alias(self): self.run_test('def function_import_alias(l): from os.path import join as j; return j("lili", l)', "mypath", function_import_alias=[str]) pythran-0.10.0+ds2/pythran/tests/test_time.py000066400000000000000000000013041416264035500211430ustar00rootroot00000000000000""" Tests for the time module. """ import pytest import sys from pythran.tests import TestEnv @TestEnv.module class TestTime(TestEnv): """ Tests for the time module. """ @pytest.mark.skipif(sys.platform == "win32", reason="not supported by winpython for now") def test_time_and_sleep(self): """ Check time and sleep have the "same" behavior ~0.05 sec. """ self.run_test(""" def time_and_sleep(): import time begin = time.time() time.sleep(2) end = time.time() return (end - begin) < 2.05 and (end - begin) > 1.95""", time_and_sleep=[]) pythran-0.10.0+ds2/pythran/tests/test_typing.py000066400000000000000000000527271416264035500215360ustar00rootroot00000000000000from pythran.tests import TestEnv import unittest import numpy as np import pythran from textwrap import dedent from pythran.typing import List, Dict, NDArray, Tuple class TestTyping(TestEnv): def test_index_dict_with_constant(self): code = 'def index_dict_with_constant(d): return d[0]' return self.run_test(code, {0:2}, index_dict_with_constant=[Dict[int,int]]) def test_module_bad_attribute(self): code = 'def module_bad_attribute(): import random as m; return m.real' with self.assertRaises(pythran.syntax.PythranSyntaxError): pythran.compile_pythrancode("dumbo", code, pyonly=True) def test_module_invalid_sequence_mult(self): code = 'def foo(x): return [x] * 3.' with self.assertRaises(pythran.syntax.PythranSyntaxError): pythran.compile_pythrancode("dumbo", code, pyonly=True) def test_immutable_default0(self): code = 'def immutable_default0(x=[1]): pass' with self.assertRaises(pythran.syntax.PythranSyntaxError): pythran.compile_pythrancode("dumbo", code, pyonly=True) def test_undefinied_variable_in_test(self): code = 'def undefinied_variable_in_test(x):\n if x: print(A)' with self.assertRaises(pythran.syntax.PythranSyntaxError): pythran.compile_pythrancode("dumbo", code, pyonly=True) def test_immutable_default1(self): code = 'def immutable_default1(x={1}): pass' with self.assertRaises(pythran.syntax.PythranSyntaxError): pythran.compile_pythrancode("dumbo", code, pyonly=True) def test_immutable_default2(self): code = 'def immutable_default2(x=1): pass' pythran.compile_pythrancode("dumbo", code, pyonly=True) def test_immutable_default3(self): code = 'def immutable_default3(x=(1, 2)): pass' pythran.compile_pythrancode("dumbo", code, pyonly=True) def test_immutable_default4(self): code = 'def immutable_default4(x=None): pass' pythran.compile_pythrancode("dumbo", code, pyonly=True) def test_immutable_default5(self): code = 'def immutable_default5(x=True): pass' pythran.compile_pythrancode("dumbo", code, pyonly=True) def test_immutable_default6(self): code = 'g=1\ndef immutable_default6(x=g): pass' pythran.compile_pythrancode("dumbo", code, pyonly=True) def test_immutable_default7(self): code = 'def g(): pass\ndef immutable_default7(x=g): pass' pythran.compile_pythrancode("dumbo", code, pyonly=True) def test_immutable_default8(self): code = 'def g(): pass\ndef immutable_default8(x=int): pass' pythran.compile_pythrancode("dumbo", code, pyonly=True) def test_list_of_set(self): code = ''' def list_of_set(): l=[set()] l[0].add("12") return l''' self.run_test(code, list_of_set=[]) def test_dict_of_set(self): code = ''' def dict_of_set(): l={0:set()} l[0].add("12") return l''' self.run_test(code, dict_of_set=[]) def test_typing_aliasing_and_indices(self): self.run_test('def typing_aliasing_and_indices(i): d={};e={}; f = e or d; f[1]=i; return d,e,f', 118, typing_aliasing_and_indices=[int]) def test_typing_aliasing_and_combiner(self): self.run_test('def typing_aliasing_and_combiner(i): d=set();e=set(); f = e or d; f.add(i); return d,e,f', 117, typing_aliasing_and_combiner=[int]) def test_typing_aliasing_and_combiner_back(self): self.run_test('def typing_aliasing_and_combiner_back(i): d=set();e=set(); f = e or d; e.add(i); return d,e,f', 116, typing_aliasing_and_combiner_back=[int]) def test_typing_aliasing_and_fwd(self): self.run_test('def typing_aliasing_and_fwd(i): fwd = lambda x:x; l = []; fwd(l).append(i); return l', 115, typing_aliasing_and_fwd=[int]) def test_typing_aliasing_and_constant_subscript(self): code = ''' def typing_aliasing_and_constant_subscript(i): a=[] b=(a,) b[0].append(i) return a, b ''' self.run_test(code, 118, typing_aliasing_and_constant_subscript=[int]) def test_typing_aliasing_and_variable_subscript(self): code = ''' def typing_aliasing_and_variable_subscript(i): a=[] b=[a] b[i].append(i) return a, b ''' self.run_test(code, 0, typing_aliasing_and_variable_subscript=[int]) def test_typing_aliasing_and_variable_subscript_combiner(self): code = ''' def typing_aliasing_and_variable_subscript_combiner(i): a=[list.append, lambda x,y: x.extend([y])] b = [] a[i](b, i) return b ''' self.run_test(code, 1, typing_aliasing_and_variable_subscript_combiner=[int]) def test_typing_and_function_dict(self): code = ''' def typing_and_function_dict(a): funcs = { 'zero' : lambda x: x.add(0), 'one' : lambda x: x.add(1), } s = set() funcs[a](s) return s ''' self.run_test(code, 'one', typing_and_function_dict=[str]) def test_typing_and_iterate_over_function_list(self): code = ''' def typing_and_iterate_over_function_list(): funcs = [ lambda x: x.add(0), lambda x: x.add(2), ] s = set() for f in funcs: f(s) return s ''' self.run_test(code, typing_and_iterate_over_function_list=[]) def test_typing_aliasing_and_update(self): code = ''' def foo(d): f=d f+=[1] def typing_aliasing_and_update(): a= [] foo(a) return a''' self.run_test(code, typing_aliasing_and_update=[]) def test_typing_aliasing_and_update_and_globals(self): code = ''' def f(l): return len(l) def g(l): l.append(1) ; return 0 foo=[f,g] def typing_aliasing_and_update_and_globals(i): h = [] return foo[i](h), h''' self.run_test(code, 1, typing_aliasing_and_update_and_globals=[int]) def test_typing_aliasing_and_update_and_multiple_aliasing0(self): code = ''' def f(l): return len(l) def g(l): l.append(1) ; return 0 def foo(i): if i < 1: return f else: return g def typing_aliasing_and_update_and_multiple_aliasing0(i): h = [] return foo(i)(h), h''' self.run_test(code, 1, typing_aliasing_and_update_and_multiple_aliasing0=[int]) def test_typing_aliasing_and_update_and_multiple_aliasing1(self): code = ''' def foo(i): if i < 1: return list.remove else: return list.append def typing_aliasing_and_update_and_multiple_aliasing1(i): h = [] foo(i)(h, 1) return h''' self.run_test(code, 1, typing_aliasing_and_update_and_multiple_aliasing1=[int]) def test_functional_variant_assign0(self): code=''' def functional_variant_assign0(n): if n > 3: x = "e" * n f = lambda y: x + y else: x = "g" * n f = lambda y: x + "er" return f("re")''' self.run_test(code, 12, functional_variant_assign0=[int]) def test_functional_variant_assign1(self): code=''' def functional_variant_assign1(n): if n > 3: x = "e" * n f = lambda y: x + y else: x = "g" * n f = lambda y: x + "er" return f("re")''' self.run_test(code, 1, functional_variant_assign1=[int]) def test_functional_variant_assign2(self): code=''' def functional_variant_assign2(n): if n > 3: f = lambda x: (1, x * 1.j) else: f = lambda x: (x * 1.j, 1) return sum(f(3))''' self.run_test(code, 1, functional_variant_assign2=[int]) def test_functional_variant_container0(self): code=''' import math def functional_variant_container0(i): l=[] l.append(math.cos) l.append(math.sin) return l[i](12)''' self.run_test(code, 0, functional_variant_container0=[int]) def test_functional_variant_container1(self): code=''' import math def functional_variant_container1(i): l=[math.cos, math.sin] return l[i](12)''' self.run_test(code, 1, functional_variant_container1=[int]) def test_functional_variant_container2(self): code=''' import math l = [math.cos, math.sin, math.asin, math.acos, math.sqrt] def functional_variant_container2(i): return l[i](1.)''' self.run_test(code, 4, functional_variant_container2=[int]) @unittest.skip("bad typing: need backward propagation") def test_type_set_in_loop(self): code = ''' def type_set_in_loop(): a = [[]] for i in range(2): b = [] for j in a: b += [j] + [[1]] a = b return a,b''' self.run_test(code, type_set_in_loop=[]) @unittest.skip("bad typing: need backward propagation") def test_type_set_in_while(self): code = ''' def type_set_in_while(): a = [[]] n = 3 while n: b = [] for j in a: b += [j] + [[1]] a = b n -= 1 return a,b''' self.run_test(code, type_set_in_while=[]) def test_recursive_interprocedural_typing0(self): code = ''' from cmath import exp, pi def fft(x): N = len(x) if N <= 1: return x even = fft(x[0::2]) odd = fft(x[1::2]) return [even[k] + exp(-2j*pi*k/N)*odd[k] for k in range(N//2)] + \ [even[k] - exp(-2j*pi*k/N)*odd[k] for k in range(N//2)] def recursive_interprocedural_typing0(c): import numpy as np l = [1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, c] z = np.abs(fft(l)) return z''' self.run_test(code, 0.0, recursive_interprocedural_typing0=[float]) def test_recursive_interprocedural_typing1(self): code = ''' def s_perm(seq): if not seq: return [[]] else: new_items = [] for item in s_perm(seq[:-1]): new_items += [item + seq for i in range(1)] return new_items def recursive_interprocedural_typing1(c): l = [1,2] * c return s_perm(l)''' self.run_test(code, 3, recursive_interprocedural_typing1=[int]) @unittest.skip("bad typing: recursion and specialized list type") def test_recursive_interprocedural_typing2(self): code = ''' def s_perm(seq): if not seq: return [[]] else: new_items = [] for item in s_perm(seq[:-1]): new_items += [item + seq for i in range(1)] return new_items def recursive_interprocedural_typing2(c): l = [1,2,c] return s_perm(l)''' self.run_test(code, 3, recursive_interprocedural_typing2=[int]) def test_print_numpy_types(self): self.run_test(''' import numpy def print_type(t): print(t) def print_numpy_types(n): print_type(numpy.ones(n, dtype=bool).dtype) print_type(numpy.ones(n, dtype=int).dtype) print_type(numpy.ones(n, dtype=complex).dtype) print_type(numpy.ones(n, dtype=float).dtype) print_type(numpy.ones(n, dtype=numpy.uint8).dtype) print_type(numpy.ones(n, dtype=numpy.uint16).dtype) print_type(numpy.ones(n, dtype=numpy.uint64).dtype) print_type(numpy.ones(n, dtype=numpy.double).dtype) print_type(numpy.ones(n, dtype=complex).dtype) ''', 3, print_numpy_types=[int]) def test_constant_argument_variant_functor0(self): self.run_test(''' def foo(x): x[0] = 0 def bar(x): x[1] = 1 l = [foo, bar] def constant_argument_variant_functor0(i): x = [-1, -1] l[i](x) return x''', 0, constant_argument_variant_functor0=[int]) def test_constant_argument_variant_functor1(self): self.run_test(''' def foo(x): x[0] = 0 def bar(x): x[1] = 1 l = [foo, bar] def constant_argument_variant_functor1(i): x = [i, i] [f(x) for f in l] return x''', -1, constant_argument_variant_functor1=[int]) def test_slice_assign(self): self.run_test(''' import numpy as np def slice_assign (M): errs = np.zeros (M) errs[0] = 1 errs[:] = 0 return errs''', 4, slice_assign=[int]) def verify_type_error(self, code): with self.assertRaises(pythran.types.tog.PythranTypeError): _, eh = pythran.generate_cxx("dumbo", dedent(code)) eh() def test_type_inference0(self): code = ''' def wc(content): d = {} for word in content.split(): d[word] = d.get(word, 0) + 1 # Use list comprehension l = [(freq, word) for word, freq in d.items()] return sorted(l) ''' self.run_test(code, "cha-la head cha-la", wc=[str]) code_bis = code.replace("1", "'1'") self.verify_type_error(code_bis) code_ter = code.replace("0", "None") self.verify_type_error(code_ter) def test_type_inference1(self): code = ''' def invalid_augassign(n): s = n + "1" s += 2 return s''' self.verify_type_error(code) def test_type_inference2(self): code = ''' def invalid_ifexp(n): return 1 if n else "1"''' self.verify_type_error(code) def test_type_inference3(self): code = ''' def invalid_unary_op(n): return -(n + 'n')''' self.verify_type_error(code) def test_type_inference4(self): code = ''' def invalid_list(n): return [n, len(n)]''' self.verify_type_error(code) def test_type_inference5(self): code = ''' def invalid_set(n): return {n, len(n)}''' self.verify_type_error(code) def test_type_inference6(self): code = ''' def invalid_dict_key(n): return {n:1, len(n):2}''' self.verify_type_error(code) def test_type_inference7(self): code = ''' def invalid_dict_value(n): return {1:n, 2:len(n)}''' self.verify_type_error(code) def test_type_inference8(self): code = ''' def invalid_multi_return(n): for i in n: return [n] return {n}''' self.verify_type_error(code) def test_type_inference9(self): code = ''' def invalid_multi_yield(n): for i in n: yield [n] yield n''' self.verify_type_error(code) def test_type_inference10(self): code = ''' def valid_augassign(l): l *= 0 return l[1:2]''' return self.run_test(code, np.array([0,1,2,3,4]), valid_augassign=[NDArray[int, :]]) def test_type_inference11(self): code = ''' def valid_tuple_index(l): return (1, 2, 3, 4)[l]''' return self.run_test(code, 0, valid_tuple_index=[int]) def test_exact_float_check(self): code = 'def exact_float_check(i): return i' return self.run_test(code, np.float64(1.1), exact_float_check=[float]) def test_exact_complex_check(self): code = 'def exact_complex_check(i): return i' return self.run_test(code, np.complex128(2), exact_complex_check=[complex]) def test_alias_update_in_loop_and_test(self): code = ''' def alias_update_in_loop_and_test(X,f): for i in range(2): if i==0: A = f*X[:,i] else: A+=f*X[:,i] return A''' return self.run_test(code, np.arange(9).reshape(3,3), 3, alias_update_in_loop_and_test=[NDArray[int,:,:], int]) def test_alias_update_in_multiple_opaque_tests(self): code = ''' def alias_update_in_multiple_opaque_tests(X,f): if f > 3: A = (f * X[:, 0]) else: A += (f * X[:, 0]) pass if f <= 3: A = (f * X[:, 1]) else: A += (f * X[:, 1])''' return self.run_test(code, np.arange(9).reshape(3,3), 9, alias_update_in_multiple_opaque_tests=[NDArray[int,:,:], int]) def test_alias_update_in_multiple_different_opaque_tests(self): code = ''' def alias_update_in_multiple_different_opaque_tests(f): if f > 3: A = 1 else: A = 2 if f <= 3: A = "e" * A else: A = "f" * A return A''' return self.run_test(code, 9, alias_update_in_multiple_different_opaque_tests=[int]) def test_lambda_partial_merge(self): code = ''' def lambda_partial_merge(a, c, s): if s == "a": x = lambda y:y else: x = lambda y:y+c return x(a)''' return self.run_test(code, 1, 2, "A", lambda_partial_merge=[int, int, str]) def test_unpacking_aliasing(self): code = ''' def effect(c, v): a, _ = c a[0] += v def unpacking_aliasing(v, c): effect(c, v) a, _ = c return a''' return self.run_test(code, 2, ([10, 20], [30]), unpacking_aliasing=[int, Tuple[List[int], List[int]]]) @unittest.skip("bad typing: need higher order function handling") def test_higher_order0(self): code = ''' def b(x, y): x(y, 1) def higher_order0(n): t = [] foo = list.append b(foo, t) return t''' return self.run_test(code, 3, higher_order0=[int]) @unittest.skip("bad typing: need higher order function handling") def test_higher_order1(self): code = ''' def b(x, y): x(y, 1) def higher_order0(n): t = [] def foo(g): t.append(g) b(foo, t) return t''' return self.run_test(code, 3, higher_order0=[int]) def test_rvalue_type_update_list(self): code = ''' def rvalue_type_update_list(x): def foo(x): x.append(1.5); return x return foo([x])''' return self.run_test(code, 3, rvalue_type_update_list=[int]) def test_rvalue_type_update_set(self): code = ''' def rvalue_type_update_set(x): def foo(x): x.add(1.5); return x return foo({x})''' return self.run_test(code, 3, rvalue_type_update_set=[int]) def test_rvalue_type_update_dict(self): code = ''' def rvalue_type_update_dict(x): def foo(x): x[1.5] = 1.5; return x return foo({x:x})''' return self.run_test(code, 3, rvalue_type_update_dict=[int]) def test_numpy_array_combiner0(self): code = ''' import numpy as np def numpy_array_combiner0(n): if n > 10: return np.ones(10) else: return np.ones(n)''' return self.run_test(code, 3, numpy_array_combiner0=[int]) def test_numpy_array_combiner1(self): code = ''' import numpy as np def numpy_array_combiner1(n): if n <= 10: return np.ones(10) else: return np.ones(n)''' return self.run_test(code, 3, numpy_array_combiner1=[int]) def test_numpy_array_combiner2(self): code = ''' import numpy as np def numpy_array_combiner2(n): if n > 10: return np.ones(10) else: return np.arange(n)''' return self.run_test(code, 3, numpy_array_combiner2=[int]) def test_numpy_array_combiner3(self): code = ''' import numpy as np def numpy_array_combiner3(n): if n <= 10: return np.ones(10) else: return np.arange(n)''' return self.run_test(code, 3, numpy_array_combiner3=[int]) def test_numpy_array_combiner4(self): code = ''' import numpy as np def numpy_array_combiner4(n): if n > 10: return np.ones(10) else: return np.arange(n)[1:n//2]''' return self.run_test(code, 6, numpy_array_combiner4=[int]) def test_numpy_array_combiner5(self): code = ''' import numpy as np def numpy_array_combiner5(n): if n <= 10: return np.ones(10) else: return np.arange(n)[1:-1]''' return self.run_test(code, 6, numpy_array_combiner5=[int]) def test_numpy_array_in_tuple0(self): code = ''' import numpy as np def Test_reset(self, P, Q): print('In SHAPE', P, Q) self[0][0] = np.zeros((P, Q)) print('Out SHAPE', self[0][0].shape) def numpy_array_in_tuple0(n): Test_reset(([np.zeros((1, 1))],), 64, n)''' return self.run_test(code, 6, numpy_array_in_tuple0=[int]) pythran-0.10.0+ds2/pythran/tests/test_user_defined_import.py000066400000000000000000000006421416264035500242370ustar00rootroot00000000000000import unittest from pythran.tests import TestFromDir import os import glob import pythran class TestUserImport(TestFromDir): path = os.path.join(os.path.dirname(__file__),"user_defined_import") files = glob.glob(os.path.join(path,"*_main.py")) files.append(os.path.join(path, "tiny_project", "level", "dummy.py")) TestUserImport.populate(TestUserImport) if __name__ == '__main__': unittest.main() pythran-0.10.0+ds2/pythran/tests/test_version.py000066400000000000000000000017601416264035500217000ustar00rootroot00000000000000import re import pythran from pythran.tests import TestEnv class TestVersion(TestEnv): def test_version_check_cython(self): # Cython actually does this check (variable is named # `pythran_is_pre_0_9_6`). Make sure it doesn't break. v = pythran.__version__ pre_096 = tuple(map(int, v.split('.')[0:3])) < (0, 9, 6) self.assertFalse(pre_096) def test_valid_version_string(self): # Verify that the pythran version is a valid one (note: excludes # .post suffix, and complies to PEP 440. Test taken from NumPy version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(|a[0-9]|b[0-9]|rc[0-9])" dev_suffix = r"\.dev0\+[0-9]*\.g[0-9a-f]+" # For released versions: res1 = re.match(version_pattern, pythran.__version__) # For dev versions: res2 = re.match(version_pattern + dev_suffix, pythran.__version__) self.assertTrue(res1 is not None or res2 is not None, pythran.__version__) pythran-0.10.0+ds2/pythran/tests/test_xdoc.py000066400000000000000000000070051416264035500211460ustar00rootroot00000000000000import doctest import inspect import os import pytest import sys import unittest import pythran from pythran import transformations, analyses, optimizations, types, log class TestDoctest(unittest.TestCase): """ Enable automatic doctest integration to unittest. Every module in the pythran package is scanned for doctests and one test per module is created """ @pytest.mark.skipif(sys.platform == "win32", reason="We should create a file for windows.") def test_tutorial(self): failed, _ = doctest.testfile('../../docs/TUTORIAL.rst') self.assertEqual(failed, 0) @pytest.mark.skipif(sys.platform == "win32", reason="We should create a file for windows.") def test_internal(self): tmpfile = self.adapt_rst('../../docs/INTERNAL.rst') failed, _ = doctest.testfile(tmpfile, False) self.assertEqual(failed, 0) os.remove(tmpfile) @pytest.mark.skipif(sys.platform == "win32", reason="We should create a file for windows.") def test_cli(self): tmpfile = self.adapt_rst('../../docs/CLI.rst') failed, _ = doctest.testfile(tmpfile, False) self.assertEqual(failed, 0) os.remove(tmpfile) def adapt_rst(self, relative_path): """ replace '$>' with '>>>' and execute theses command lines by creating a shell return the path of the new adapted tmp file """ import re from tempfile import NamedTemporaryFile filepath = os.path.join(os.path.dirname(__file__), relative_path) with open(filepath) as rst_doc: # hidden doctest sp = re.sub(r'\.\.(\s+>>>)', r'\1', rst_doc.read()) sp = re.sub(r'^([ ]+)$', r'\1', sp, flags=re.MULTILINE) # harmonize argparse output after python 3.10 if sys.version_info >= (3, 10): sp = sp.replace('optional arguments:', 'options:') # hack to support setuptools-generated pythran / pythran-config scripts for tool, sub in (('pythran-config', 'python -m pythran.config'), ('pythran', 'python -m pythran.run'), ('python', sys.executable)): sp = re.sub(r'(\$>.*?[^#])\b' + tool + r'\b([^-.].*)$', r'\1' + sub + r'\2', sp, flags=re.MULTILINE) # convert shell doctest into python ones sp = re.sub(r'\$>(.*?)$', r'>>> import subprocess ; res = subprocess.check_output("\1", shell=True).decode("ascii").strip() ; print(res, end="")', sp, flags=re.MULTILINE) f = NamedTemporaryFile("w", delete=False) f.write(sp) f.close() return f.name def generic_test_package(self, mod): # disable logging during doctest log.logging.getLogger('pythran').setLevel(log.logging.ERROR) failed, _ = doctest.testmod(mod) self.assertEqual(failed, 0) def add_module_doctest(base, module_name): module = getattr(base, module_name) if inspect.ismodule(module): setattr(TestDoctest, 'test_' + module_name, lambda self: generic_test_package(self, module)) # doctest does not goes through imported variables, # so manage the tests manually here for module in (pythran, transformations, analyses, optimizations, types): for submodule in dir(module): add_module_doctest(module, submodule) if __name__ == '__main__': unittest.main() pythran-0.10.0+ds2/pythran/tests/user_defined_import/000077500000000000000000000000001416264035500226245ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/tests/user_defined_import/alias1.py000066400000000000000000000001051416264035500243440ustar00rootroot00000000000000 def add(a, b): return a + b; def sub(a, b): return a - b; pythran-0.10.0+ds2/pythran/tests/user_defined_import/alias2.py000066400000000000000000000001051416264035500243450ustar00rootroot00000000000000 def mul(a, b): return a * b; def div(a, b): return a / b; pythran-0.10.0+ds2/pythran/tests/user_defined_import/alias_main.py000066400000000000000000000002541416264035500252740ustar00rootroot00000000000000import alias1 as a1 import alias2 as alias1 import alias2 #pythran export entry() #runas entry() def entry(): return a1.add(1,2) + alias1.mul(10,2) + alias2.div(5, 2) pythran-0.10.0+ds2/pythran/tests/user_defined_import/builtins_in_imported.py000066400000000000000000000000351416264035500274160ustar00rootroot00000000000000def dint(): return int() pythran-0.10.0+ds2/pythran/tests/user_defined_import/builtins_in_imported_main.py000066400000000000000000000002471416264035500304270ustar00rootroot00000000000000import builtins_in_imported from builtins_in_imported import dint #pythran export entry() #runas entry() def entry(): return dint(), builtins_in_imported.dint() pythran-0.10.0+ds2/pythran/tests/user_defined_import/diamond_1.py000066400000000000000000000001041416264035500250240ustar00rootroot00000000000000import diamond_3 def foo(arg): return diamond_3.foo(arg) + 42 pythran-0.10.0+ds2/pythran/tests/user_defined_import/diamond_2.py000066400000000000000000000001101416264035500250220ustar00rootroot00000000000000import diamond_3 def foo(arg): return diamond_3.foo(arg) + 2 * 42 pythran-0.10.0+ds2/pythran/tests/user_defined_import/diamond_3.py000066400000000000000000000000501416264035500250260ustar00rootroot00000000000000 def foo(arg): return 3 * arg + 42 pythran-0.10.0+ds2/pythran/tests/user_defined_import/diamond_main.py000066400000000000000000000002031416264035500256100ustar00rootroot00000000000000import diamond_1, diamond_2 #pythran export entry() #runas entry() def entry(): return diamond_1.foo(42) + diamond_2.foo(42) pythran-0.10.0+ds2/pythran/tests/user_defined_import/global_init.py000066400000000000000000000000321416264035500254540ustar00rootroot00000000000000def aa(): return 3.14 pythran-0.10.0+ds2/pythran/tests/user_defined_import/global_init_alias_main.py000066400000000000000000000001331416264035500276330ustar00rootroot00000000000000import global_init as gi XX = [gi.aa(), 3] #pythran export bb() def bb(): return XX pythran-0.10.0+ds2/pythran/tests/user_defined_import/global_init_main.py000066400000000000000000000001661416264035500264700ustar00rootroot00000000000000from .global_init import aa def cc(): return aa() XX = [aa(), 3] #pythran export bb() def bb(): return XX pythran-0.10.0+ds2/pythran/tests/user_defined_import/import_from1.py000066400000000000000000000000461416264035500256140ustar00rootroot00000000000000 def foo(a, b): return a + b + 42 pythran-0.10.0+ds2/pythran/tests/user_defined_import/import_from2.py000066400000000000000000000000521416264035500256120ustar00rootroot00000000000000 def foo(a, b): return a - b + 2 * 42 pythran-0.10.0+ds2/pythran/tests/user_defined_import/import_from_main.py000066400000000000000000000002331416264035500265350ustar00rootroot00000000000000from import_from1 import foo from import_from2 import foo as foo2 #pythran export entry() #runas entry() def entry(): return foo(1, 2) + foo2(1, 2) pythran-0.10.0+ds2/pythran/tests/user_defined_import/import_inside_function.py000066400000000000000000000000341416264035500277450ustar00rootroot00000000000000 def foo(a): return a+1 pythran-0.10.0+ds2/pythran/tests/user_defined_import/import_inside_function_main.py000066400000000000000000000001771416264035500307610ustar00rootroot00000000000000#pythran export entry() #runas entry() def entry(): import import_inside_function return import_inside_function.foo(1) pythran-0.10.0+ds2/pythran/tests/user_defined_import/method_in_imported_module.py000066400000000000000000000005011416264035500304100ustar00rootroot00000000000000def aa(a): ret = [] while a > 2: ret.append(0) a -=1 return ret def hh(value): ret = [] while value > 0: a, b = _div_tuple(value, 10) ret.insert(0, 0) value = a return ret def _div_tuple(base, div): a = base // div b = base % div return a, b pythran-0.10.0+ds2/pythran/tests/user_defined_import/method_in_imported_module_main.py000066400000000000000000000004111416264035500314140ustar00rootroot00000000000000from .method_in_imported_module import aa, hh from .other_method_in_imported_module import CC, DD def cc(): return aa(3) #pythran export dd(int) def dd(o): return hh(o) XX = cc() + [3] YY = CC() #pythran export bb() def bb(): return XX, YY, DD() pythran-0.10.0+ds2/pythran/tests/user_defined_import/mix_builtin.py000066400000000000000000000003051416264035500255170ustar00rootroot00000000000000from mix_builtin_main import bar import mix_builtin_main, numpy, math as m from numpy import sin def foo(a): return float(bar(a+42) + mix_builtin_main.foo(a) + m.floor(numpy.cos(a) + sin(a))) pythran-0.10.0+ds2/pythran/tests/user_defined_import/mix_builtin_main.py000066400000000000000000000002571416264035500265310ustar00rootroot00000000000000import mix_builtin, numpy def bar(a): return a+42 def foo(a): return a+2*42 #pythran export entry(int) #runas entry(10) def entry(a): return mix_builtin.foo(a) pythran-0.10.0+ds2/pythran/tests/user_defined_import/other_method_in_imported_module.py000066400000000000000000000003011416264035500316070ustar00rootroot00000000000000from .method_in_imported_module import aa # importing another module can produce funny things too def CC(): return [aa(h) for h in range(10)] def DD(): return [CC for CC in range(10)] pythran-0.10.0+ds2/pythran/tests/user_defined_import/simple_case_import.py000066400000000000000000000000431416264035500270510ustar00rootroot00000000000000 def imported(a): return a+42 pythran-0.10.0+ds2/pythran/tests/user_defined_import/simple_case_main.py000066400000000000000000000002401416264035500264620ustar00rootroot00000000000000#pythran export entry() #runas entry() import simple_case_import def forward(a): return simple_case_import.imported(a) def entry(): return forward(1) pythran-0.10.0+ds2/pythran/tests/user_defined_import/tiny_project/000077500000000000000000000000001416264035500253355ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/tests/user_defined_import/tiny_project/__init__.py000066400000000000000000000000001416264035500274340ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/tests/user_defined_import/tiny_project/csts.py000066400000000000000000000000371416264035500266630ustar00rootroot00000000000000def return_cst(): return 1 pythran-0.10.0+ds2/pythran/tests/user_defined_import/tiny_project/level/000077500000000000000000000000001416264035500264445ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/tests/user_defined_import/tiny_project/level/dummer.py000066400000000000000000000000371416264035500303070ustar00rootroot00000000000000def twice(n): return 2 * n pythran-0.10.0+ds2/pythran/tests/user_defined_import/tiny_project/level/dummy.py000066400000000000000000000002041416264035500301450ustar00rootroot00000000000000from .. csts import return_cst from ..level.dummer import twice #pythran export yummy() def yummy(): return twice(return_cst()) pythran-0.10.0+ds2/pythran/tests/user_defined_import/user_import_from_pythran.py000066400000000000000000000002341416264035500303350ustar00rootroot00000000000000from user_import_from_pythran_main import bar import user_import_from_pythran_main def foo(a): return bar(a+42) + user_import_from_pythran_main.foo(a) pythran-0.10.0+ds2/pythran/tests/user_defined_import/user_import_from_pythran_main.py000066400000000000000000000003021416264035500313350ustar00rootroot00000000000000import user_import_from_pythran def bar(a): return a+42 def foo(a): return a+2*42 #pythran export entry(int) #runas entry(10) def entry(a): return user_import_from_pythran.foo(a) pythran-0.10.0+ds2/pythran/toolchain.py000066400000000000000000000427511416264035500177770ustar00rootroot00000000000000''' This module contains all the stuff to make your way from python code to a dynamic library, see __init__.py for exported interfaces. ''' from pythran.backend import Cxx, Python from pythran.config import cfg from pythran.cxxgen import PythonModule, Include, Line, Statement from pythran.cxxgen import FunctionBody, FunctionDeclaration, Value, Block from pythran.cxxgen import ReturnStatement from pythran.dist import PythranExtension, PythranBuildExt from pythran.middlend import refine, mark_unexported_functions from pythran.passmanager import PassManager from pythran.tables import pythran_ward from pythran.types import tog from pythran.types.type_dependencies import pytype_to_deps from pythran.types.conversion import pytype_to_ctype from pythran.spec import load_specfile, Spec from pythran.spec import spec_to_string from pythran.syntax import check_specs, check_exports, PythranSyntaxError from pythran.version import __version__ from pythran.utils import cxxid import pythran.frontend as frontend from datetime import datetime from distutils.errors import CompileError from distutils import sysconfig from numpy.distutils.core import setup from tempfile import mkdtemp, NamedTemporaryFile import gast as ast import logging import os.path import shutil import glob import hashlib from functools import reduce import sys logger = logging.getLogger('pythran') def _extract_specs_dependencies(specs): """ Extract types dependencies from specs for each exported signature. """ deps = set() # for each function for signatures in specs.functions.values(): # for each signature for signature in signatures: # for each argument for t in signature: deps.update(pytype_to_deps(t)) # and each capsule for signature in specs.capsules.values(): # for each argument for t in signature: deps.update(pytype_to_deps(t)) # Keep "include" first return sorted(deps, key=lambda x: "include" not in x) def _parse_optimization(optimization): '''Turns an optimization of the form my_optim my_package.my_optim into the associated symbol''' splitted = optimization.split('.') if len(splitted) == 1: splitted = ['pythran', 'optimizations'] + splitted return reduce(getattr, splitted[1:], __import__(splitted[0])) def _write_temp(content, suffix): '''write `content` to a temporary XXX`suffix` file and return the filename. It is user's responsibility to delete when done.''' with NamedTemporaryFile(mode='w', suffix=suffix, delete=False) as out: out.write(content) return out.name def has_argument(module, fname): '''Checks if a given function has arguments''' for n in module.body: if isinstance(n, ast.FunctionDef) and n.name == fname: return [cxxid(arg.id) for arg in n.args.args] return [] def front_middle_end(module_name, code, optimizations=None, module_dir=None, entry_points=None): """Front-end and middle-end compilation steps""" pm = PassManager(module_name, module_dir) # front end ir, docstrings = frontend.parse(pm, code) if entry_points is not None: ir = mark_unexported_functions(ir, entry_points) # middle-end if optimizations is None: optimizations = cfg.get('pythran', 'optimizations').split() optimizations = [_parse_optimization(opt) for opt in optimizations] refine(pm, ir, optimizations) return pm, ir, docstrings # PUBLIC INTERFACE STARTS HERE def generate_py(module_name, code, optimizations=None, module_dir=None): '''python + pythran spec -> py code Prints and returns the optimized python code. ''' pm, ir, _ = front_middle_end(module_name, code, optimizations, module_dir) return pm.dump(Python, ir) def generate_cxx(module_name, code, specs=None, optimizations=None, module_dir=None): '''python + pythran spec -> c++ code returns a PythonModule object and an error checker the error checker can be used to print more detailed info on the origin of a compile error (e.g. due to bad typing) ''' if specs: entry_points = set(specs.keys()) else: entry_points = None pm, ir, docstrings = front_middle_end(module_name, code, optimizations, module_dir, entry_points=entry_points) # back-end content = pm.dump(Cxx, ir) # instantiate the meta program if specs is None: class Generable(object): def __init__(self, content): self.content = content def __str__(self): return str(self.content) generate = __str__ mod = Generable(content) def error_checker(): tog.typecheck(ir) else: # uniform typing if isinstance(specs, dict): specs = Spec(specs, {}) def error_checker(): types = tog.typecheck(ir) check_specs(specs, types) specs.to_docstrings(docstrings) check_exports(pm, ir, specs) if isinstance(code, bytes): code_bytes = code else: code_bytes = code.encode('ascii', 'ignore') metainfo = {'hash': hashlib.sha256(code_bytes).hexdigest(), 'version': __version__, 'date': datetime.now()} mod = PythonModule(module_name, docstrings, metainfo) mod.add_to_includes( Include("pythonic/core.hpp"), Include("pythonic/python/core.hpp"), # FIXME: only include these when needed Include("pythonic/types/bool.hpp"), Include("pythonic/types/int.hpp"), Line("#ifdef _OPENMP\n#include \n#endif") ) mod.add_to_includes(*[Include(inc) for inc in _extract_specs_dependencies(specs)]) mod.add_to_includes(*content.body) mod.add_to_includes( Include("pythonic/python/exception_handler.hpp"), ) def warded(module_name, internal_name): return pythran_ward + '{0}::{1}'.format(module_name, internal_name) for function_name, signatures in specs.functions.items(): internal_func_name = cxxid(function_name) # global variables are functions with no signatures :-) if not signatures: mod.add_global_var(function_name, "{}()()".format(warded(module_name, internal_func_name))) for sigid, signature in enumerate(signatures): numbered_function_name = "{0}{1}".format(internal_func_name, sigid) arguments_types = [pytype_to_ctype(t) for t in signature] arguments_names = has_argument(ir, function_name) arguments = [n for n, _ in zip(arguments_names, arguments_types)] name_fmt = pythran_ward + "{0}::{1}::type{2}" args_list = ", ".join(arguments_types) specialized_fname = name_fmt.format(module_name, internal_func_name, "<{0}>".format(args_list) if arguments_names else "") result_type = "typename %s::result_type" % specialized_fname mod.add_pyfunction( FunctionBody( FunctionDeclaration( Value( result_type, numbered_function_name), [Value(t + '&&', a) for t, a in zip(arguments_types, arguments)]), Block([Statement(""" PyThreadState *_save = PyEval_SaveThread(); try {{ auto res = {0}()({1}); PyEval_RestoreThread(_save); return res; }} catch(...) {{ PyEval_RestoreThread(_save); throw; }} """.format(warded(module_name, internal_func_name), ', '.join(arguments)))]) ), function_name, arguments_types, signature ) for function_name, signature in specs.capsules.items(): internal_func_name = cxxid(function_name) arguments_types = [pytype_to_ctype(t) for t in signature] arguments_names = has_argument(ir, function_name) arguments = [n for n, _ in zip(arguments_names, arguments_types)] name_fmt = pythran_ward + "{0}::{1}::type{2}" args_list = ", ".join(arguments_types) specialized_fname = name_fmt.format(module_name, internal_func_name, "<{0}>".format(args_list) if arguments_names else "") result_type = "typename %s::result_type" % specialized_fname docstring = spec_to_string(function_name, signature) mod.add_capsule( FunctionBody( FunctionDeclaration( Value(result_type, function_name), [Value(t, a) for t, a in zip(arguments_types, arguments)]), Block([ReturnStatement("{0}()({1})".format( warded(module_name, internal_func_name), ', '.join(arguments)))]) ), function_name, docstring ) return mod, error_checker def compile_cxxfile(module_name, cxxfile, output_binary=None, **kwargs): '''c++ file -> native module Return the filename of the produced shared library Raises CompileError on failure ''' builddir = mkdtemp() buildtmp = mkdtemp() extension = PythranExtension(module_name, [cxxfile], **kwargs) try: setup(name=module_name, ext_modules=[extension], cmdclass={"build_ext": PythranBuildExt}, # fake CLI call script_name='setup.py', script_args=['--verbose' if logger.isEnabledFor(logging.INFO) else '--quiet', 'build_ext', '--build-lib', builddir, '--build-temp', buildtmp] ) except SystemExit as e: raise CompileError(str(e)) def copy(src_file, dest_file): # not using shutil.copy because it fails to copy stat across devices with open(src_file, 'rb') as src: with open(dest_file, 'wb') as dest: dest.write(src.read()) ext = sysconfig.get_config_var('EXT_SUFFIX') # Copy all generated files including the module name prefix (.pdb, ...) for f in glob.glob(os.path.join(builddir, module_name + "*")): if f.endswith(ext): if output_binary: output_binary = output_binary.replace('%{ext}', ext) else: output_binary = os.path.join(os.getcwd(), module_name + ext) copy(f, output_binary) else: if output_binary: output_binary = output_binary.replace('%{ext}', '') output_directory = os.path.dirname(output_binary) else: output_directory = os.getcwd() copy(f, os.path.join(output_directory, os.path.basename(f))) shutil.rmtree(builddir) shutil.rmtree(buildtmp) logger.info("Generated module: " + module_name) logger.info("Output: " + output_binary) return output_binary def compile_cxxcode(module_name, cxxcode, output_binary=None, keep_temp=False, **kwargs): '''c++ code (string) -> temporary file -> native module. Returns the generated .so. ''' # Get a temporary C++ file to compile fdpath = _write_temp(cxxcode, '.cpp') output_binary = compile_cxxfile(module_name, fdpath, output_binary, **kwargs) if not keep_temp: # remove tempfile os.remove(fdpath) else: logger.warning("Keeping temporary generated file:" + fdpath) return output_binary def compile_pythrancode(module_name, pythrancode, specs=None, opts=None, cpponly=False, pyonly=False, output_file=None, module_dir=None, **kwargs): '''Pythran code (string) -> c++ code -> native module if `cpponly` is set to true, return the generated C++ filename if `pyonly` is set to true, prints the generated Python filename, unless `output_file` is set otherwise, return the generated native library filename ''' if pyonly: # Only generate the optimized python code content = generate_py(module_name, pythrancode, opts, module_dir) if output_file is None: print(content) return None else: tmp_file = _write_temp(content, '.py') output_file = output_file.format('.py') shutil.move(tmp_file, output_file) logger.info("Generated Python source file: " + output_file) # Autodetect the Pythran spec if not given as parameter from pythran.spec import spec_parser if specs is None: specs = spec_parser(pythrancode) # Generate C++, get a PythonModule object module, error_checker = generate_cxx(module_name, pythrancode, specs, opts, module_dir) if 'ENABLE_PYTHON_MODULE' in kwargs.get('undef_macros', []): module.preamble.insert(0, Line('#undef ENABLE_PYTHON_MODULE')) module.preamble.insert(0, Line('#define PY_MAJOR_VERSION {}'. format(sys.version_info.major))) if cpponly: # User wants only the C++ code tmp_file = _write_temp(str(module), '.cpp') if output_file: output_file = output_file.replace('%{ext}', '.cpp') else: output_file = module_name + ".cpp" shutil.move(tmp_file, output_file) logger.info("Generated C++ source file: " + output_file) else: # Compile to binary try: output_file = compile_cxxcode(module_name, str(module), output_binary=output_file, **kwargs) except CompileError: logger.warning("Compilation error, " "trying hard to find its origin...") error_checker() logger.warning("Nop, I'm going to flood you with C++ errors!") raise return output_file def compile_pythranfile(file_path, output_file=None, module_name=None, cpponly=False, pyonly=False, **kwargs): """ Pythran file -> c++ file -> native module. Returns the generated .so (or .cpp if `cpponly` is set to true). Usage without an existing spec file >>> with open('pythran_test.py', 'w') as fd: ... _ = fd.write('def foo(i): return i ** 2') >>> cpp_path = compile_pythranfile('pythran_test.py', cpponly=True) Usage with an existing spec file: >>> with open('pythran_test.pythran', 'w') as fd: ... _ = fd.write('export foo(int)') >>> so_path = compile_pythranfile('pythran_test.py') Specify the output file: >>> import sysconfig >>> ext = sysconfig.get_config_vars()["SO"] >>> so_path = compile_pythranfile('pythran_test.py', output_file='foo'+ext) """ if not output_file: # derive module name from input file name _, basename = os.path.split(file_path) module_name = module_name or os.path.splitext(basename)[0] else: # derive module name from destination output_file name _, basename = os.path.split(output_file.replace('%{ext}', '')) module_name = module_name or basename.split(".", 1)[0] module_dir = os.path.dirname(file_path) # Look for an extra spec file spec_file = os.path.splitext(file_path)[0] + '.pythran' if os.path.isfile(spec_file): specs = load_specfile(spec_file) kwargs.setdefault('specs', specs) try: with open(file_path) as fd: output_file = compile_pythrancode(module_name, fd.read(), output_file=output_file, cpponly=cpponly, pyonly=pyonly, module_dir=module_dir, **kwargs) except PythranSyntaxError as e: if e.filename is None: e.filename = file_path raise return output_file def test_compile(): '''Simple passthrough compile test. May raises CompileError Exception. ''' code = ''' #include ''' output_file = compile_cxxcode('test', code) output_file and os.remove(output_file) pythran-0.10.0+ds2/pythran/transformations/000077500000000000000000000000001416264035500206655ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/transformations/__init__.py000066400000000000000000000023521416264035500230000ustar00rootroot00000000000000""" This submodule contains all the transformations passes offered in Pythran. This file is just for convenience and turns the import from import transformations.xxxxx.xxxxx into import transformations.xxxxx """ from .expand_builtins import ExpandBuiltins from .expand_globals import ExpandGlobals from .expand_import_all import ExpandImportAll from .expand_imports import ExpandImports from .extract_doc_strings import ExtractDocStrings from .false_polymorphism import FalsePolymorphism from .handle_import import HandleImport from .normalize_compare import NormalizeCompare from .normalize_exception import NormalizeException from .normalize_ifelse import NormalizeIfElse from .normalize_is_none import NormalizeIsNone from .normalize_method_calls import NormalizeMethodCalls from .normalize_return import NormalizeReturn from .normalize_static_if import NormalizeStaticIf, SplitStaticExpression from .normalize_tuples import NormalizeTuples from .remove_comprehension import RemoveComprehension from .remove_lambdas import RemoveLambdas from .remove_nested_functions import RemoveNestedFunctions from .unshadow_parameters import UnshadowParameters from .remove_named_arguments import RemoveNamedArguments from .remove_fstrings import RemoveFStrings pythran-0.10.0+ds2/pythran/transformations/expand_builtins.py000066400000000000000000000027261416264035500244360ustar00rootroot00000000000000""" ExpandBuiltins replaces builtins by their full paths. """ from pythran.analyses import Globals, Locals from pythran.passmanager import Transformation from pythran.syntax import PythranSyntaxError from pythran.tables import MODULES import gast as ast class ExpandBuiltins(Transformation): """ Expands all builtins into full paths. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(): return list()") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(ExpandBuiltins, node) >>> print(pm.dump(backend.Python, node)) def foo(): return builtins.list() """ def __init__(self): Transformation.__init__(self, Locals, Globals) def visit_NameConstant(self, node): self.update = True return ast.Attribute( ast.Name('builtins', ast.Load(), None, None), str(node.value), ast.Load()) def visit_Name(self, node): s = node.id if(isinstance(node.ctx, ast.Load) and s not in self.locals[node] and s not in self.globals and s in MODULES['builtins']): if s == 'getattr': raise PythranSyntaxError("You fool! Trying a getattr?", node) self.update = True return ast.Attribute( ast.Name('builtins', ast.Load(), None, None), s, node.ctx) else: return node pythran-0.10.0+ds2/pythran/transformations/expand_globals.py000066400000000000000000000115031416264035500242210ustar00rootroot00000000000000""" ExpandGlobals replaces globals variables by function call. It also turn globals assignment in function definition. """ from pythran.analyses import LocalNameDeclarations from pythran.passmanager import Transformation from pythran.syntax import PythranSyntaxError from pythran.utils import path_to_attr from pythran import metadata import gast as ast class GlobalTransformer(ast.NodeTransformer): ''' Use assumptions on globals to improve code generation ''' def visit_Call(self, node): # because a list can be a call parameter during global init return node def visit_List(self, node): # because global lists in pythran are static lists return ast.Call(path_to_attr(('builtins', 'pythran', 'static_list')), [ast.Tuple([self.visit(elt) for elt in node.elts], ast.Load())], []) class ExpandGlobals(Transformation): """ Expands all builtins into full paths. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... a = 1 ... def foo(): ... return a''') >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(ExpandGlobals, node) >>> print(pm.dump(backend.Python, node)) def a(): return 1 def foo(): return a() """ def __init__(self): """ Initialize local declaration and constant name to expand. """ self.local_decl = set() self.to_expand = set() super(ExpandGlobals, self).__init__() def visit_Module(self, node): """Turn globals assignment to functionDef and visit function defs. """ module_body = list() symbols = set() # Gather top level assigned variables. for stmt in node.body: if isinstance(stmt, (ast.Import, ast.ImportFrom)): for alias in stmt.names: name = alias.asname or alias.name symbols.add(name) # no warning here elif isinstance(stmt, ast.FunctionDef): if stmt.name in symbols: raise PythranSyntaxError( "Multiple top-level definition of %s." % stmt.name, stmt) else: symbols.add(stmt.name) if not isinstance(stmt, ast.Assign): continue for target in stmt.targets: if not isinstance(target, ast.Name): raise PythranSyntaxError( "Top-level assignment to an expression.", target) if target.id in self.to_expand: raise PythranSyntaxError( "Multiple top-level definition of %s." % target.id, target) if isinstance(stmt.value, ast.Name): if stmt.value.id in symbols: continue # create aliasing between top level symbols self.to_expand.add(target.id) for stmt in node.body: if isinstance(stmt, ast.Assign): # that's not a global var, but a module/function aliasing if all(isinstance(t, ast.Name) and t.id not in self.to_expand for t in stmt.targets): module_body.append(stmt) continue self.local_decl = set() cst_value = GlobalTransformer().visit(self.visit(stmt.value)) for target in stmt.targets: assert isinstance(target, ast.Name) module_body.append( ast.FunctionDef(target.id, ast.arguments([], [], None, [], [], None, []), [ast.Return(value=cst_value)], [], None, None)) metadata.add(module_body[-1].body[0], metadata.StaticReturn()) else: self.local_decl = self.gather( LocalNameDeclarations, stmt) module_body.append(self.visit(stmt)) self.update |= bool(self.to_expand) node.body = module_body return node def visit_Name(self, node): """ Turn global variable used not shadows to function call. We check it is a name from an assignment as import or functions use should not be turn into call. """ if (isinstance(node.ctx, ast.Load) and node.id not in self.local_decl and node.id in self.to_expand): self.update = True return ast.Call(func=node, args=[], keywords=[]) return node pythran-0.10.0+ds2/pythran/transformations/expand_import_all.py000066400000000000000000000021751416264035500247450ustar00rootroot00000000000000""" ExpandImportAll replaces import * by all their modules. """ from pythran.passmanager import Transformation from pythran.tables import MODULES import gast as ast class ExpandImportAll(Transformation): ''' Expands all import when '*' detected >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("from math import *") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(ExpandImportAll, node) >>> print(pm.dump(backend.Python, node)) from math import acos, acosh, asin, asinh, atan, atan2, atanh, ceil, \ copysign, cos, cosh, degrees, e, erf, erfc, exp, expm1, fabs, factorial, \ floor, fmod, frexp, gamma, hypot, isinf, isnan, ldexp, lgamma, log, log10, \ log1p, modf, pi, pow, radians, sin, sinh, sqrt, tan, tanh, trunc ''' def visit_ImportFrom(self, node): for alias in node.names: if alias.name == '*': self.update = True node.names.pop() node.names.extend(ast.alias(fname, None) for fname in sorted(MODULES[node.module])) return node pythran-0.10.0+ds2/pythran/transformations/expand_imports.py000066400000000000000000000124321416264035500242750ustar00rootroot00000000000000""" ExpandImports replaces imports by their full paths. """ from pythran.passmanager import Transformation from pythran.utils import path_to_attr, path_to_node from pythran.conversion import mangle from pythran.syntax import PythranSyntaxError from pythran.analyses import Ancestors import gast as ast class ExpandImports(Transformation): """ Expands all imports into full paths. Attributes ---------- imports : {str} Imported module (python base module name) symbols : {str : (str,)} Matching between used name and real cxx name. Examples -------- >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("from math import cos ; cos(2)") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(ExpandImports, node) >>> print(pm.dump(backend.Python, node)) import math as __pythran_import_math __pythran_import_math.cos(2) >>> node = ast.parse("from os.path import join ; join('a', 'b')") >>> _, node = pm.apply(ExpandImports, node) >>> print(pm.dump(backend.Python, node)) import os as __pythran_import_os __pythran_import_os.path.join('a', 'b') """ def __init__(self): super(ExpandImports, self).__init__(Ancestors) self.imports = set() self.symbols = dict() def visit_Module(self, node): """ Visit the whole module and add all import at the top level. >> import numpy.linalg Becomes >> import numpy """ node.body = [k for k in (self.visit(n) for n in node.body) if k] imports = [ast.Import([ast.alias(i, mangle(i))]) for i in self.imports] node.body = imports + node.body ast.fix_missing_locations(node) return node def visit_Import(self, node): """ Register imported modules and usage symbols. """ for alias in node.names: alias_name = tuple(alias.name.split('.')) self.imports.add(alias_name[0]) if alias.asname: self.symbols[alias.asname] = alias_name else: self.symbols[alias_name[0]] = alias_name[:1] self.update = True return None def visit_ImportFrom(self, node): """ Register imported modules and usage symbols. """ module_path = tuple(node.module.split('.')) self.imports.add(module_path[0]) for alias in node.names: path = module_path + (alias.name,) self.symbols[alias.asname or alias.name] = path self.update = True return None def visit_FunctionDef(self, node): """ Update import context using overwriting name information. Examples -------- >> import foo >> import bar >> def foo(bar): >> print(bar) In this case, neither bar nor foo can be used in the foo function and in future function, foo will not be usable. """ self.symbols.pop(node.name, None) gsymbols = self.symbols.copy() [self.symbols.pop(arg.id, None) for arg in node.args.args] self.generic_visit(node) self.symbols = gsymbols return node def visit_Assign(self, node): """ Update import context using overwriting name information. Examples -------- >> import foo >> def bar(): >> foo = 2 >> print(foo) In this case, foo can't be used after assign. """ if isinstance(node.value, ast.Name) and node.value.id in self.symbols: symbol = path_to_node(self.symbols[node.value.id]) if not getattr(symbol, 'isliteral', lambda: False)(): for target in node.targets: if not isinstance(target, ast.Name): err = "Unsupported module aliasing" raise PythranSyntaxError(err, target) self.symbols[target.id] = self.symbols[node.value.id] return None # this assignment is no longer needed new_node = self.generic_visit(node) # no problem if targets contains a subscript, it is not a new assign. [self.symbols.pop(t.id, None) for t in new_node.targets if isinstance(t, ast.Name)] return new_node def visit_Name(self, node): """ Replace name with full expanded name. Examples -------- >> from numpy.linalg import det >> det(a) Becomes >> numpy.linalg.det(a) """ self.generic_visit(node) if node.id in self.symbols: symbol = path_to_node(self.symbols[node.id]) if not getattr(symbol, 'isliteral', lambda: False)(): parent = self.ancestors[node][-1] blacklist = (ast.Tuple, ast.List, ast.Set, ast.Return) if isinstance(parent, blacklist): raise PythranSyntaxError( "Unsupported module identifier manipulation", node) new_node = path_to_attr(self.symbols[node.id]) new_node.ctx = node.ctx ast.copy_location(new_node, node) return new_node return node pythran-0.10.0+ds2/pythran/transformations/extract_doc_strings.py000066400000000000000000000031371416264035500253130ustar00rootroot00000000000000""" ExtractDocStrings fills a dictionary with doc strings for each function. """ from pythran.passmanager import Transformation from pythran.utils import isstr import gast as ast class ExtractDocStrings(Transformation): ''' Extract Python Doc Strings, removing them from the AST and putting them in a dictionary for later use. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(): 'my doc is cool' ; pass") >>> pm = passmanager.PassManager("test") >>> _ = pm.apply(ExtractDocStrings, node) >>> print(pm.dump(backend.Python, node)) def foo(): pass ''' def __init__(self): super(ExtractDocStrings, self).__init__() self.docstrings = dict() def run(self, node): super(ExtractDocStrings, self).run(node) return self.docstrings def visit_Expr(self, node): 'Remove other top-level strings' if isstr(node.value): return None return node def visit_documented_node(self, key, node): if node.body: first_stmt = node.body[0] if isinstance(first_stmt, ast.Expr): if isstr(first_stmt.value): self.update = True docstring = first_stmt.value.value self.docstrings[key] = docstring node.body.pop(0) return self.generic_visit(node) def visit_Module(self, node): return self.visit_documented_node(None, node) def visit_FunctionDef(self, node): return self.visit_documented_node(node.name, node) pythran-0.10.0+ds2/pythran/transformations/false_polymorphism.py000066400000000000000000000053311416264035500251550ustar00rootroot00000000000000""" FalsePolymorphism try to rename variable to avoid false polymorphism.""" from pythran.passmanager import Transformation from pythran.analyses import DefUseChains, UseDefChains, Identifiers import gast as ast class FalsePolymorphism(Transformation): """ Rename variable when possible to avoid false polymorphism. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(): a = 12; a = 'babar'") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(FalsePolymorphism, node) >>> print(pm.dump(backend.Python, node)) def foo(): a = 12 a_ = 'babar' """ def __init__(self): super(FalsePolymorphism, self).__init__(DefUseChains, UseDefChains) def visit_FunctionDef(self, node): # reset available identifier names # removing local identifiers from the list so that first occurrence can # actually use the slot identifiers = self.gather(Identifiers, node) for def_ in self.def_use_chains.locals[node]: try: identifiers.remove(def_.name()) except KeyError: pass # compute all reachable nodes from each def. This builds a bag of def # that should have the same name visited_defs = set() for def_ in self.def_use_chains.locals[node]: if def_ in visited_defs: continue associated_defs = set() # fill the bag of associated defs, going through users and defs to_process = [def_] while to_process: curr = to_process.pop() if curr in associated_defs: continue if curr.name() != def_.name(): continue associated_defs.add(curr) for u in curr.users(): to_process.append(u) curr_udc = (d for d in self.use_def_chains.get(curr.node, []) if isinstance(d.node, ast.Name)) to_process.extend(curr_udc) visited_defs.update(associated_defs) # find a new identifier local_identifier = def_.name() name = local_identifier while name in identifiers: name += "_" identifiers.add(name) # don't rename first candidate if name == local_identifier: continue # actual renaming of each node in the bag self.update = True for d in associated_defs: dn = d.node if isinstance(dn, ast.Name) and dn.id == local_identifier: dn.id = name return node pythran-0.10.0+ds2/pythran/transformations/handle_import.py000066400000000000000000000174241416264035500240740ustar00rootroot00000000000000"""HandleImport transformation takes care of importing user-defined modules.""" from pythran.passmanager import Transformation from pythran.tables import MODULES, pythran_ward from pythran.syntax import PythranSyntaxError import gast as ast import logging import os logger = logging.getLogger('pythran') def add_filename_field(node, filename): for descendant in ast.walk(node): descendant.filename = filename def mangle_imported_module(module_name): return pythran_ward + "imported__" + module_name.replace('.', '$') + '$' def mangle_imported_function(module_name, func_name): return mangle_imported_module(module_name) + func_name def demangle(name): return name[len(pythran_ward + "imported__"):-1].replace('$', '.') def is_builtin_function(func_name): """Test if a function is a builtin (like len(), map(), ...).""" return func_name in MODULES["builtins"] def is_builtin_module(module_name): """Test if a module is a builtin module (numpy, math, ...).""" module_name = module_name.split(".")[0] return module_name in MODULES def is_mangled_module(name): return name.endswith('$') def getsource(name, module_dir, level): # Try to load py file module_base = name.replace('.', os.path.sep) + '.py' if module_dir is None: assert level <= 0, "Cannot use relative path without module_dir" module_file = module_base else: module_file = os.path.sep.join(([module_dir] + ['..'] * (level - 1) + [module_base])) try: with open(module_file, 'r') as fp: from pythran.frontend import raw_parse node = raw_parse(fp.read()) add_filename_field(node, name + ".py") return node except IOError: raise PythranSyntaxError("Module '{}' not found." .format(name)) class HandleImport(Transformation): """This pass handle user-defined import, mangling name for function from other modules and include them in the current module, patching all call site accordingly. """ def __init__(self): super(HandleImport, self).__init__() self.identifiers = [{}] self.imported = set() self.prefixes = [""] def lookup(self, name): for renaming in reversed(self.identifiers): if name in renaming: return renaming[name] return None def is_imported(self, name): return name in self.imported def visit_Module(self, node): self.imported_stmts = list() self.generic_visit(node) node.body = self.imported_stmts + node.body return node def rename(self, node, attr): prev_name = getattr(node, attr) new_name = self.prefixes[-1] + prev_name setattr(node, attr, new_name) self.identifiers[-1][prev_name] = new_name def rename_top_level_functions(self, node): for stmt in node.body: if isinstance(stmt, ast.FunctionDef): self.rename(stmt, 'name') elif isinstance(stmt, ast.Assign): for target in stmt.targets: if isinstance(target, ast.Name): self.rename(target, 'id') def visit_FunctionDef(self, node): self.identifiers.append({}) self.generic_visit(node) self.identifiers.pop() return node def visit_ListComp(self, node): # change transversal order so that store happens before load for generator in node.generators: self.visit(generator) self.visit(node.elt) return node visit_SetComp = visit_ListComp visit_GeneratorExp = visit_ListComp def visit_DictComp(self, node): for generator in node.generators: self.visit(generator) self.visit(node.key) self.visit(node.value) return node def visit_comprehension(self, node): self.visit(node.iter) for if_ in node.ifs: self.visit(if_) self.visit(node.target) return node def visit_assign(self, node): self.visit(node.value) for target in node.targets: self.visit(target) return node def visit_Assign(self, node): if not isinstance(node.value, ast.Name): return self.visit_assign(node) renaming = self.lookup(node.value.id) if not renaming: return self.visit_assign(node) if not is_mangled_module(renaming): return self.visit_assign(node) if any(not isinstance(target, ast.Name) for target in node.targets): raise PythranSyntaxError("Invalid module assignment", node) return node def visit_Name(self, node): if isinstance(node.ctx, ast.Load): renaming = self.lookup(node.id) if renaming: node.id = renaming elif isinstance(node.ctx, (ast.Store, ast.Param)): self.identifiers[-1][node.id] = node.id elif isinstance(node.ctx, ast.Del): pass else: raise NotImplementedError(node) return node def visit_Attribute(self, node): if not isinstance(node.ctx, ast.Load): return node # is that a module attribute load? root = node.value while isinstance(root, ast.Attribute): root = root.value if not isinstance(root, ast.Name): return node renaming = self.lookup(root.id) if not renaming: return node if not is_mangled_module(renaming): return node base_module = demangle(renaming) if is_builtin_module(base_module): return node renaming = self.lookup(root.id) root = node suffix = "" while isinstance(root, ast.Attribute): root = root.value suffix = '$' + node.attr + suffix return ast.Name(renaming + suffix[1:], node.ctx, None, None) def import_module(self, module_name, module_level): self.imported.add(module_name) module_node = getsource(module_name, self.passmanager.module_dir, module_level) self.prefixes.append(mangle_imported_module(module_name)) self.identifiers.append({}) self.rename_top_level_functions(module_node) self.generic_visit(module_node) self.prefixes.pop() self.identifiers.pop() return module_node.body def visit_ImportFrom(self, node): if node.module == '__future__': return None if is_builtin_module(node.module): for alias in node.names: name = alias.asname or alias.name self.identifiers[-1][name] = name return node else: for alias in node.names: name = alias.asname or alias.name self.identifiers[-1][name] = mangle_imported_function( node.module, alias.name) if self.is_imported(node.module): return None new_stmts = self.import_module(node.module, node.level) self.imported_stmts.extend(new_stmts) return None def visit_Import(self, node): new_aliases = [] for alias in node.names: name = alias.asname or alias.name self.identifiers[-1][name] = mangle_imported_module(alias.name) if alias.name in self.imported: continue if is_builtin_module(alias.name): new_aliases.append(alias) continue new_stmts = self.import_module(alias.name, 0) self.imported_stmts.extend(new_stmts) if new_aliases: node.names = new_aliases return node else: return None pythran-0.10.0+ds2/pythran/transformations/normalize_compare.py000066400000000000000000000104241416264035500247460ustar00rootroot00000000000000""" NormalizeCompare turns complex compare into function calls. """ from pythran.analyses import ImportedIds from pythran.passmanager import Transformation from pythran.utils import path_to_attr import pythran.metadata as metadata import gast as ast def is_trivially_copied(node): try: ast.literal_eval(node) return True except ValueError: pass if isinstance(node, (ast.Name, ast.Attribute)): return True return False class NormalizeCompare(Transformation): ''' Turns multiple compare into a function with proper temporaries. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(a): return 0 < a + 1 < 3") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(NormalizeCompare, node) >>> print(pm.dump(backend.Python, node)) def foo(a): return foo_compare0(a) def foo_compare0(a): $1 = (a + 1) if (0 < $1): pass else: return builtins.False if ($1 < 3): pass else: return builtins.False return builtins.True ''' def visit_Module(self, node): self.compare_functions = list() self.generic_visit(node) node.body.extend(self.compare_functions) self.update |= bool(self.compare_functions) return node def visit_FunctionDef(self, node): self.prefix = node.name self.generic_visit(node) return node def visit_Assert(self, node): # Assume no side effect in asserts function. # This is checked in extended_syntax check. return node def visit_Compare(self, node): node = self.generic_visit(node) if len(node.ops) > 1: # in case we have more than one compare operator # we generate an auxiliary function # that lazily evaluates the needed parameters imported_ids = self.gather(ImportedIds, node) imported_ids = sorted(imported_ids) binded_args = [ast.Name(i, ast.Load(), None, None) for i in imported_ids] # name of the new function forged_name = "{0}_compare{1}".format(self.prefix, len(self.compare_functions)) # call site call = ast.Call( ast.Name(forged_name, ast.Load(), None, None), binded_args, []) # new function arg_names = [ast.Name(i, ast.Param(), None, None) for i in imported_ids] args = ast.arguments(arg_names, [], None, [], [], None, []) body = [] # iteratively fill the body (yeah, feel your body!) if is_trivially_copied(node.left): prev_holder = node.left else: body.append( ast.Assign( [ast.Name('$0', ast.Store(), None, None)], node.left, None)) prev_holder = ast.Name('$0', ast.Load(), None, None) for i, exp in enumerate(node.comparators): if is_trivially_copied(exp): holder = exp else: body.append(ast.Assign([ast.Name('${}'.format(i+1), ast.Store(), None, None)], exp, None)) holder = ast.Name('${}'.format(i+1), ast.Load(), None, None) cond = ast.Compare(prev_holder, [node.ops[i]], [holder]) body.append( ast.If(cond, [ast.Pass()], [ast.Return(path_to_attr(('builtins', 'False')))])) prev_holder = holder body.append(ast.Return(path_to_attr(('builtins', 'True')))) forged_fdef = ast.FunctionDef(forged_name, args, body, [], None, None) metadata.add(forged_fdef, metadata.Local()) self.compare_functions.append(forged_fdef) return call else: return node pythran-0.10.0+ds2/pythran/transformations/normalize_exception.py000066400000000000000000000026761416264035500253300ustar00rootroot00000000000000""" NormalizeException simplifies try blocks. """ from pythran.passmanager import Transformation import gast as ast class NormalizeException(Transformation): ''' Transform else statement in try except block in nested try except. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("try:print('t')\\nexcept: print('x')\\n\ else: print('e')") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(NormalizeException, node) >>> print(pm.dump(backend.Python, node)) try: print('t') try: print('e') except: pass except: print('x') ''' # FIXME : The transformation is incorrect. Else statement should propagate # exception def visit_Try(self, node): if node.orelse: node.body.append( ast.Try( node.orelse, [ast.ExceptHandler(None, None, [ast.Pass()])], [], [] ) ) node.orelse = [] self.update = True if node.finalbody: node.body.extend(node.finalbody) node.finalbody.append(ast.Raise(None, None)) self.update = True node = ast.Try( node.body, [ast.ExceptHandler(None, None, node.finalbody)], [], []) node.finalbody = [] return node pythran-0.10.0+ds2/pythran/transformations/normalize_ifelse.py000066400000000000000000000043651416264035500245760ustar00rootroot00000000000000""" NormalizeIfElse transform early exit in if into if-else. """ from pythran.analyses import Ancestors from pythran.passmanager import Transformation import gast as ast class NormalizeIfElse(Transformation): ''' >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(""" ... def foo(y): ... if y: return 1 ... return 2""") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(NormalizeIfElse, node) >>> print(pm.dump(backend.Python, node)) def foo(y): if y: return 1 else: return 2 >>> node = ast.parse(""" ... def foo(y): ... if y: ... z = y + 1 ... if z: ... return 1 ... else: ... return 3 ... return 2""") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(NormalizeIfElse, node) >>> print(pm.dump(backend.Python, node)) def foo(y): if y: z = (y + 1) if z: return 1 else: return 3 else: return 2 ''' def __init__(self): super(NormalizeIfElse, self).__init__(Ancestors) def check_lasts(self, node): if isinstance(node, (ast.Return, ast.Break, ast.Return)): return True if isinstance(node, ast.If): if not self.check_lasts(node.body[-1]): return False return node.orelse and self.check_lasts(node.orelse[-1]) def visit_If(self, node): self.generic_visit(node) if not self.check_lasts(node.body[-1]): return node parent = self.ancestors[node][-1] for attr in ('body', 'orelse', 'finalbody'): try: body = getattr(parent, attr) index = body.index(node) if index == len(body) - 1: return node if not node.orelse: node.orelse = [] node.orelse.extend(body[index + 1:]) body[index + 1:] = [] self.update = True return node except ValueError: continue except AttributeError: continue return node pythran-0.10.0+ds2/pythran/transformations/normalize_is_none.py000066400000000000000000000057471416264035500247660ustar00rootroot00000000000000""" NormalizeIsNone detects is None patterns. """ from pythran.passmanager import Transformation from pythran.analyses import Ancestors from pythran.syntax import PythranSyntaxError from functools import reduce import gast as ast def is_none(expr): # py3 if isinstance(expr, ast.Constant) and expr.value is None: return True # py2 if not isinstance(expr, ast.Attribute): return False return expr.attr == "None" def is_is_none(expr): if not isinstance(expr, ast.Compare): return None if len(expr.ops) != 1: exprs = [expr.left] + expr.comparators if any(is_none(expr) for expr in exprs): raise PythranSyntaxError("is None in complex condition", expr) return None if not isinstance(expr.ops[0], (ast.Eq, ast.Is)): return None if is_none(expr.left): return expr.comparators[0] if is_none(expr.comparators[0]): return expr.left return None def is_is_not_none(expr): if not isinstance(expr, ast.Compare): return None if len(expr.ops) != 1: exprs = [expr.left] + expr.comparators if any(is_none(expr) for expr in exprs): raise PythranSyntaxError("is None in complex condition", expr) return None if not isinstance(expr.ops[0], (ast.NotEq, ast.IsNot)): return None if is_none(expr.left): return expr.comparators[0] if is_none(expr.comparators[0]): return expr.left return None class NormalizeIsNone(Transformation): table = {ast.And: ast.BitAnd, ast.Or: ast.BitOr} def __init__(self): super(NormalizeIsNone, self).__init__(Ancestors) @staticmethod def match_is_none(node): noned_var = is_is_none(node) if noned_var is None: noned_var = is_is_not_none(node) negated = noned_var is not None else: negated = False return noned_var, negated def visit_BoolOp(self, node): values = list(node.values) self.generic_visit(node) if any(x != y for x, y in zip(values, node.values)): self.update = True expr = reduce(lambda x, y: ast.BinOp(x, NormalizeIsNone.table[type(node.op)](), y), node.values) return expr else: return node def visit_Compare(self, node): self.generic_visit(node) noned_var, negated = self.match_is_none(node) if noned_var is None: return node call = ast.Call( ast.Attribute( ast.Attribute( ast.Name('builtins', ast.Load(), None, None), 'pythran', ast.Load() ), 'is_none', ast.Load()), [noned_var], []) self.update = True if negated: return ast.UnaryOp(ast.Not(), call) else: return call pythran-0.10.0+ds2/pythran/transformations/normalize_method_calls.py000066400000000000000000000260451416264035500257640ustar00rootroot00000000000000""" NormalizeMethodCalls turns built in method calls into function calls. """ from pythran.analyses import Globals, Ancestors from pythran.passmanager import Transformation from pythran.syntax import PythranSyntaxError from pythran.tables import attributes, functions, methods, MODULES from pythran.tables import duplicated_methods from pythran.conversion import mangle, demangle from pythran.utils import isstr import gast as ast from functools import reduce class NormalizeMethodCalls(Transformation): ''' Turns built in method calls into function calls. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("[].append(12)") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(NormalizeMethodCalls, node) >>> print(pm.dump(backend.Python, node)) builtins.list.append([], 12) ''' def __init__(self): Transformation.__init__(self, Globals, Ancestors) self.imports = {'builtins': 'builtins'} self.to_import = set() def visit_Module(self, node): """ When we normalize call, we need to add correct import for method to function transformation. a.max() for numpy array will become: numpy.max(a) so we have to import numpy. """ self.skip_functions = True self.generic_visit(node) self.skip_functions = False self.generic_visit(node) new_imports = self.to_import - self.globals imports = [ast.Import(names=[ast.alias(name=mod[17:], asname=mod)]) for mod in new_imports] node.body = imports + node.body self.update |= bool(imports) return node def visit_FunctionDef(self, node): if self.skip_functions: return node old_imports = self.imports self.imports = old_imports.copy() for arg in node.args.args: self.imports.pop(arg.id, None) self.generic_visit(node) self.imports = old_imports return node def visit_Import(self, node): for alias in node.names: name = alias.asname or alias.name self.imports[name] = name return node def visit_Assign(self, node): # aliasing between modules if isinstance(node.value, ast.Name) and node.value.id in self.imports: for t in node.targets: if isinstance(t, ast.Name): self.imports[t.id] = self.imports[node.value.id] return None else: n = self.generic_visit(node) for t in node.targets: if isinstance(t, ast.Name): self.imports.pop(t.id, None) return n def visit_For(self, node): node.iter = self.visit(node.iter) if isinstance(node.target, ast.Name): self.imports.pop(node.target.id, None) if node.body: node.body = [self.visit(n) for n in node.body] if node.orelse: node.orelse = [self.visit(n) for n in node.orelse] return node def baseobj(self, obj): # Get the most left identifier while isinstance(obj, ast.Attribute): obj = obj.value # Check if it's a module if isinstance(obj, ast.Name) and obj.id in self.imports: return None else: return obj def keyword_based_disambiguification(self, node): assert isinstance(node.func, ast.Attribute) if getattr(node.func.value, 'id', None) != mangle('__dispatch__'): return if not node.keywords: return if node.func.attr not in duplicated_methods: return node_keywords = {kw.arg for kw in node.keywords} for disamb_path, disamb_node in duplicated_methods[node.func.attr]: disamb_args = {arg.id for arg in disamb_node.args.args} if all(kw in disamb_args for kw in node_keywords): node.func = self.attr_to_func(node.func, disamb_path) return def attr_to_func(self, node, mod=None): if mod is None: mod = methods[node.attr][0] # Submodules import full module self.to_import.add(mangle(mod[0])) func = reduce( lambda v, o: ast.Attribute(v, o, ast.Load()), mod[1:] + (node.attr,), ast.Name(mangle(mod[0]), ast.Load(), None, None) ) return func def visit_Attribute(self, node): node = self.generic_visit(node) # method name -> not a getattr if node.attr in methods: # Make sure parent is'nt a call, it's already handled in visit_Call for parent in reversed(self.ancestors.get(node, ())): if isinstance(parent, ast.Attribute): continue if isinstance(parent, ast.Call): return node break # we have a bound method which is not a call obj = self.baseobj(node) if obj is not None: self.update = True mod = methods[node.attr][0] self.to_import.add(mangle(mod[0])) func = self.attr_to_func(node) z = ast.Call( ast.Attribute( ast.Name(mangle('functools'), ast.Load(), None, None), "partial", ast.Load() ), [func, obj], []) return z else: return node # imported module -> not a getattr elif (isinstance(node.value, ast.Name) and node.value.id in self.imports): module_id = self.imports[node.value.id] if node.attr not in MODULES[self.renamer(module_id, MODULES)[1]]: msg = ("`" + node.attr + "' is not a member of " + demangle(module_id) + " or Pythran does not support it") raise PythranSyntaxError(msg, node) node.value.id = module_id # patch module aliasing self.update = True return node # not listed as attributed -> not a getattr elif node.attr not in attributes: return node # A getattr ! else: self.update = True call = ast.Call( ast.Attribute( ast.Name('builtins', ast.Load(), None, None), 'getattr', ast.Load()), [node.value, ast.Constant(node.attr, None)], []) if isinstance(node.ctx, ast.Store): # the only situation where this arises is for real/imag of # a ndarray. As a call is not valid for a store, add a slice # to ends up with a valid lhs assert node.attr in ('real', 'imag'), "only store to imag/real" return ast.Subscript(call, ast.Slice(None, None, None), node.ctx) else: return call @staticmethod def renamer(v, cur_module): """ Rename function path to fit Pythonic naming. """ mname = demangle(v) return v, mname def visit_Call(self, node): """ Transform call site to have normal function call. Examples -------- For methods: >> a = [1, 2, 3] >> a.append(1) Becomes >> __list__.append(a, 1) For functions: >> builtins.dict.fromkeys([1, 2, 3]) Becomes >> builtins.__dict__.fromkeys([1, 2, 3]) """ node = self.generic_visit(node) # Only attributes function can be Pythonic and should be normalized if isinstance(node.func, ast.Attribute): if node.func.attr in methods: # Check object targeted by methods if self.baseobj(node.func) is not None: self.update = True # As it was a methods call, push targeted object as first # arguments and add correct module prefix node.args.insert(0, node.func.value) mod = methods[node.func.attr][0] # Submodules import full module self.to_import.add(mangle(mod[0])) node.func = self.attr_to_func(node.func) # else methods have been called using function syntax if node.func.attr in methods or node.func.attr in functions: # Now, methods and function have both function syntax def rec(path, cur_module): """ Recursively rename path content looking in matching module. Prefers __module__ to module if it exists. This recursion is done as modules are visited top->bottom while attributes have to be visited bottom->top. """ err = "Function path is chained attributes and name" assert isinstance(path, (ast.Name, ast.Attribute)), err if isinstance(path, ast.Attribute): new_node, cur_module = rec(path.value, cur_module) new_id, mname = self.renamer(path.attr, cur_module) return (ast.Attribute(new_node, new_id, ast.Load()), cur_module[mname]) else: new_id, mname = self.renamer(path.id, cur_module) if mname not in cur_module: raise PythranSyntaxError( "Unbound identifier '{}'".format(mname), node) return (ast.Name(new_id, ast.Load(), None, None), cur_module[mname]) # Rename module path to avoid naming issue. node.func.value, _ = rec(node.func.value, MODULES) self.update = True self.keyword_based_disambiguification(node) return node def visit_BinOp(self, node): # replace "str" % (...) by builtins.str.__mod__(...) # the reason why we do this is that % formatting is handled by # a third party library that's relatively costly to load, so using a # function name instead of an operator overload makes it possible to # load it only when needed. The drawback is that % formatting is no # longer supported when lhs is not a literal self.generic_visit(node) if isinstance(node.op, ast.Mod) and isstr(node.left): self.update = True return ast.Call( ast.Attribute( ast.Attribute( ast.Name('builtins', ast.Load(), None, None), 'str', ast.Load()), '__mod__', ast.Load()), [node.left, node.right], []) return node pythran-0.10.0+ds2/pythran/transformations/normalize_return.py000066400000000000000000000035701416264035500246430ustar00rootroot00000000000000""" NormalizeReturn adds return statement where relevant. """ from pythran.analyses import CFG, YieldPoints from pythran.passmanager import Transformation import gast as ast class NormalizeReturn(Transformation): ''' Adds Return statement when they are implicit, and adds the None return value when not set >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(y): print(y)") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(NormalizeReturn, node) >>> print(pm.dump(backend.Python, node)) def foo(y): print(y) return builtins.None ''' def __init__(self): super(NormalizeReturn, self).__init__(CFG) def visit_FunctionDef(self, node): self.yield_points = self.gather(YieldPoints, node) for stmt in node.body: self.visit(stmt) # Look for nodes that have no successors; the predecessors of # the special NIL node are those AST nodes that end control flow # without a return statement. for n in self.cfg.predecessors(CFG.NIL): if not isinstance(n, (ast.Return, ast.Raise)): self.update = True if self.yield_points: node.body.append(ast.Return(None)) else: none = ast.Attribute( ast.Name("builtins", ast.Load(), None, None), 'None', ast.Load()) node.body.append(ast.Return(none)) break return node def visit_Return(self, node): if not node.value and not self.yield_points: none = ast.Attribute(ast.Name("builtins", ast.Load(), None, None), 'None', ast.Load()) node.value = none self.update = True return node pythran-0.10.0+ds2/pythran/transformations/normalize_static_if.py000066400000000000000000000403571416264035500252750ustar00rootroot00000000000000""" NormalizeStaticIf adds support for static guards. """ from pythran.analyses import (ImportedIds, HasReturn, IsAssigned, CFG, HasBreak, HasContinue, DefUseChains, Ancestors, StaticExpressions, HasStaticExpression) from pythran.passmanager import Transformation from pythran.syntax import PythranSyntaxError import gast as ast from copy import deepcopy LOOP_NONE, EARLY_RET, LOOP_BREAK, LOOP_CONT = range(4) def outline(name, formal_parameters, out_parameters, stmts, has_return, has_break, has_cont): args = ast.arguments( [ast.Name(fp, ast.Param(), None, None) for fp in formal_parameters], [], None, [], [], None, []) if isinstance(stmts, ast.expr): assert not out_parameters, "no out parameters with expr" fdef = ast.FunctionDef(name, args, [ast.Return(stmts)], [], None, None) else: fdef = ast.FunctionDef(name, args, stmts, [], None, None) # this is part of a huge trick that plays with delayed type inference # it basically computes the return type based on out parameters, and # the return statement is unconditionally added so if we have other # returns, there will be a computation of the output type based on the # __combined of the regular return types and this one The original # returns have been patched above to have a different type that # cunningly combines with this output tuple # # This is the only trick I found to let pythran compute both the output # variable type and the early return type. But hey, a dirty one :-/ stmts.append( ast.Return( ast.Tuple( [ast.Name(fp, ast.Load(), None, None) for fp in out_parameters], ast.Load() ) ) ) if has_return: pr = PatchReturn(stmts[-1], has_break or has_cont) pr.visit(fdef) if has_break or has_cont: if not has_return: stmts[-1].value = ast.Tuple([ast.Constant(LOOP_NONE, None), stmts[-1].value], ast.Load()) pbc = PatchBreakContinue(stmts[-1]) pbc.visit(fdef) return fdef class PatchReturn(ast.NodeTransformer): def __init__(self, guard, has_break_or_cont): self.guard = guard self.has_break_or_cont = has_break_or_cont def visit_Return(self, node): if node is self.guard: holder = "StaticIfNoReturn" else: holder = "StaticIfReturn" value = node.value return ast.Return( ast.Call( ast.Attribute( ast.Attribute( ast.Name("builtins", ast.Load(), None, None), "pythran", ast.Load()), holder, ast.Load()), [value] if value else [ast.Constant(None, None)], [])) class PatchBreakContinue(ast.NodeTransformer): def __init__(self, guard): self.guard = guard def visit_For(self, _): pass def visit_While(self, _): pass def patch_Control(self, node, flag): new_node = deepcopy(self.guard) ret_val = new_node.value if isinstance(ret_val, ast.Call): if flag == LOOP_BREAK: ret_val.func.attr = "StaticIfBreak" else: ret_val.func.attr = "StaticIfCont" else: new_node.value.elts[0].value = flag return new_node def visit_Break(self, node): return self.patch_Control(node, LOOP_BREAK) def visit_Continue(self, node): return self.patch_Control(node, LOOP_CONT) class NormalizeStaticIf(Transformation): def __init__(self): super(NormalizeStaticIf, self).__init__(StaticExpressions, Ancestors, DefUseChains) def visit_Module(self, node): self.new_functions = [] self.funcs = [] self.cfgs = [] self.generic_visit(node) node.body.extend(self.new_functions) return node def escaping_ids(self, scope_stmt, stmts): 'gather sets of identifiers defined in stmts and used out of it' assigned_nodes = self.gather(IsAssigned, self.make_fake(stmts)) escaping = set() for assigned_node in assigned_nodes: head = self.def_use_chains.chains[assigned_node] for user in head.users(): if scope_stmt not in self.ancestors[user.node]: escaping.add(head.name()) return escaping @staticmethod def make_fake(stmts): return ast.If(ast.Constant(0, None), stmts, []) @staticmethod def make_dispatcher(static_expr, func_true, func_false, imported_ids): dispatcher_args = [static_expr, ast.Name(func_true.name, ast.Load(), None, None), ast.Name(func_false.name, ast.Load(), None, None)] dispatcher = ast.Call( ast.Attribute( ast.Attribute( ast.Name("builtins", ast.Load(), None, None), "pythran", ast.Load()), "static_if", ast.Load()), dispatcher_args, []) actual_call = ast.Call( dispatcher, [ast.Name(ii, ast.Load(), None, None) for ii in imported_ids], []) return actual_call def true_name(self): return "$isstatic{}".format(len(self.new_functions) + 0) def false_name(self): return "$isstatic{}".format(len(self.new_functions) + 1) def visit_FunctionDef(self, node): self.cfgs.append(self.gather(CFG, node)) self.funcs.append(node) onode = self.generic_visit(node) self.funcs.pop() self.cfgs.pop() return onode def visit_IfExp(self, node): self.generic_visit(node) if node.test not in self.static_expressions: return node imported_ids = sorted(self.gather(ImportedIds, node)) func_true = outline(self.true_name(), imported_ids, [], node.body, False, False, False) func_false = outline(self.false_name(), imported_ids, [], node.orelse, False, False, False) self.new_functions.extend((func_true, func_false)) actual_call = self.make_dispatcher(node.test, func_true, func_false, imported_ids) return actual_call def make_control_flow_handlers(self, cont_n, status_n, expected_return, has_cont, has_break): ''' Create the statements in charge of gathering control flow information for the static_if result, and executes the expected control flow instruction ''' if expected_return: assign = cont_ass = [ast.Assign( [ast.Tuple(expected_return, ast.Store())], ast.Name(cont_n, ast.Load(), None, None), None)] else: assign = cont_ass = [] if has_cont: cmpr = ast.Compare(ast.Name(status_n, ast.Load(), None, None), [ast.Eq()], [ast.Constant(LOOP_CONT, None)]) cont_ass = [ast.If(cmpr, deepcopy(assign) + [ast.Continue()], cont_ass)] if has_break: cmpr = ast.Compare(ast.Name(status_n, ast.Load(), None, None), [ast.Eq()], [ast.Constant(LOOP_BREAK, None)]) cont_ass = [ast.If(cmpr, deepcopy(assign) + [ast.Break()], cont_ass)] return cont_ass def visit_If(self, node): if node.test not in self.static_expressions: return self.generic_visit(node) imported_ids = self.gather(ImportedIds, node) assigned_ids_left = self.escaping_ids(node, node.body) assigned_ids_right = self.escaping_ids(node, node.orelse) assigned_ids_both = assigned_ids_left.union(assigned_ids_right) imported_ids.update(i for i in assigned_ids_left if i not in assigned_ids_right) imported_ids.update(i for i in assigned_ids_right if i not in assigned_ids_left) imported_ids = sorted(imported_ids) assigned_ids = sorted(assigned_ids_both) fbody = self.make_fake(node.body) true_has_return = self.gather(HasReturn, fbody) true_has_break = self.gather(HasBreak, fbody) true_has_cont = self.gather(HasContinue, fbody) felse = self.make_fake(node.orelse) false_has_return = self.gather(HasReturn, felse) false_has_break = self.gather(HasBreak, felse) false_has_cont = self.gather(HasContinue, felse) has_return = true_has_return or false_has_return has_break = true_has_break or false_has_break has_cont = true_has_cont or false_has_cont self.generic_visit(node) func_true = outline(self.true_name(), imported_ids, assigned_ids, node.body, has_return, has_break, has_cont) func_false = outline(self.false_name(), imported_ids, assigned_ids, node.orelse, has_return, has_break, has_cont) self.new_functions.extend((func_true, func_false)) actual_call = self.make_dispatcher(node.test, func_true, func_false, imported_ids) # variable modified within the static_if expected_return = [ast.Name(ii, ast.Store(), None, None) for ii in assigned_ids] self.update = True # name for various variables resulting from the static_if n = len(self.new_functions) status_n = "$status{}".format(n) return_n = "$return{}".format(n) cont_n = "$cont{}".format(n) if has_return: cfg = self.cfgs[-1] always_return = all(isinstance(x, (ast.Return, ast.Yield)) for x in cfg[node]) always_return &= true_has_return and false_has_return fast_return = [ast.Name(status_n, ast.Store(), None, None), ast.Name(return_n, ast.Store(), None, None), ast.Name(cont_n, ast.Store(), None, None)] if always_return: return [ast.Assign([ast.Tuple(fast_return, ast.Store())], actual_call, None), ast.Return(ast.Name(return_n, ast.Load(), None, None))] else: cont_ass = self.make_control_flow_handlers(cont_n, status_n, expected_return, has_cont, has_break) cmpr = ast.Compare(ast.Name(status_n, ast.Load(), None, None), [ast.Eq()], [ast.Constant(EARLY_RET, None)]) return [ast.Assign([ast.Tuple(fast_return, ast.Store())], actual_call, None), ast.If(cmpr, [ast.Return(ast.Name(return_n, ast.Load(), None, None))], cont_ass)] elif has_break or has_cont: cont_ass = self.make_control_flow_handlers(cont_n, status_n, expected_return, has_cont, has_break) fast_return = [ast.Name(status_n, ast.Store(), None, None), ast.Name(cont_n, ast.Store(), None, None)] return [ast.Assign([ast.Tuple(fast_return, ast.Store())], actual_call, None)] + cont_ass elif expected_return: return ast.Assign([ast.Tuple(expected_return, ast.Store())], actual_call, None) else: return ast.Expr(actual_call) class SplitStaticExpression(Transformation): def __init__(self): super(SplitStaticExpression, self).__init__(StaticExpressions) def visit_Cond(self, node): ''' generic expression splitting algorithm. Should work for ifexp and if using W(rap) and U(n)W(rap) to manage difference between expr and stmt The idea is to split a BinOp in three expressions: 1. a (possibly empty) non-static expr 2. an expr containing a static expr 3. a (possibly empty) non-static expr Once split, the if body is refactored to keep the semantic, and then recursively split again, until all static expr are alone in a test condition ''' NodeTy = type(node) if NodeTy is ast.IfExp: def W(x): return x def UW(x): return x else: def W(x): return [x] def UW(x): return x[0] has_static_expr = self.gather(HasStaticExpression, node.test) if not has_static_expr: return self.generic_visit(node) if node.test in self.static_expressions: return self.generic_visit(node) if not isinstance(node.test, ast.BinOp): return self.generic_visit(node) before, static = [], [] values = [node.test.right, node.test.left] def has_static_expression(n): return self.gather(HasStaticExpression, n) while values and not has_static_expression(values[-1]): before.append(values.pop()) while values and has_static_expression(values[-1]): static.append(values.pop()) after = list(reversed(values)) test_before = NodeTy(None, None, None) if before: assert len(before) == 1 test_before.test = before[0] test_static = NodeTy(None, None, None) if static: test_static.test = static[0] if len(static) > 1: if after: assert len(after) == 1 after = [ast.BinOp(static[1], node.test.op, after[0])] else: after = static[1:] test_after = NodeTy(None, None, None) if after: assert len(after) == 1 test_after.test = after[0] if isinstance(node.test.op, ast.BitAnd): if after: test_after.body = deepcopy(node.body) test_after.orelse = deepcopy(node.orelse) test_after = W(test_after) else: test_after = deepcopy(node.body) if static: test_static.body = test_after test_static.orelse = deepcopy(node.orelse) test_static = W(test_static) else: test_static = test_after if before: test_before.body = test_static test_before.orelse = node.orelse node = test_before else: node = UW(test_static) elif isinstance(node.test.op, ast.BitOr): if after: test_after.body = deepcopy(node.body) test_after.orelse = deepcopy(node.orelse) test_after = W(test_after) else: test_after = deepcopy(node.orelse) if static: test_static.body = deepcopy(node.body) test_static.orelse = test_after test_static = W(test_static) else: test_static = test_after if before: test_before.body = deepcopy(node.body) test_before.orelse = test_static node = test_before else: node = UW(test_static) else: raise PythranSyntaxError("operator not supported in a static if", node) self.update = True return self.generic_visit(node) visit_If = visit_IfExp = visit_Cond pythran-0.10.0+ds2/pythran/transformations/normalize_tuples.py000066400000000000000000000164041416264035500246400ustar00rootroot00000000000000""" NormalizeTuples removes implicit variable -> tuple conversion. """ from pythran.analyses import Identifiers from pythran.passmanager import Transformation import gast as ast from functools import reduce from collections import OrderedDict from copy import deepcopy class ConvertToTuple(ast.NodeTransformer): def __init__(self, tuple_id, renamings): self.tuple_id = tuple_id self.renamings = renamings def visit_Name(self, node): if node.id in self.renamings: nnode = reduce( lambda x, y: ast.Subscript( x, ast.Constant(y, None), ast.Load()), self.renamings[node.id], ast.Name(self.tuple_id, ast.Load(), None, None) ) nnode.ctx = node.ctx return nnode return node class NormalizeTuples(Transformation): """ Remove implicit tuple -> variable conversion. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(): a=(1,2.) ; i,j = a") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(NormalizeTuples, node) >>> print(pm.dump(backend.Python, node)) def foo(): a = (1, 2.0) i = a[0] j = a[1] """ tuple_name = "__tuple" def __init__(self): Transformation.__init__(self) def get_new_id(self): i = 0 while 1: new_id = "{}{}".format(NormalizeTuples.tuple_name, i) if new_id not in self.ids: self.ids.add(new_id) return new_id else: i += 1 def traverse_tuples(self, node, state, renamings): if isinstance(node, ast.Name): if state: renamings[node.id] = state self.update = True elif isinstance(node, ast.Tuple) or isinstance(node, ast.List): [self.traverse_tuples(n, state + (i,), renamings) for i, n in enumerate(node.elts)] elif isinstance(node, (ast.Subscript, ast.Attribute)): if state: renamings[node] = state self.update = True else: raise NotImplementedError def visit_comprehension(self, node): node = self.generic_visit(node) renamings = OrderedDict() self.traverse_tuples(node.target, (), renamings) if renamings: self.update = True return self.get_new_id(), renamings else: return node def visit_AnyComp(self, node, *fields): for field in fields: setattr(node, field, self.visit(getattr(node, field))) generators = [self.visit(generator) for generator in node.generators] nnode = node for i, g in enumerate(generators): if isinstance(g, tuple): gtarget = "{0}{1}".format(g[0], i) nnode.generators[i].target = ast.Name( gtarget, nnode.generators[i].target.ctx, None, None) nnode = ConvertToTuple(gtarget, g[1]).visit(nnode) self.update = True for field in fields: setattr(node, field, getattr(nnode, field)) node.generators = nnode.generators return node def visit_ListComp(self, node): return self.visit_AnyComp(node, 'elt') def visit_SetComp(self, node): return self.visit_AnyComp(node, 'elt') def visit_DictComp(self, node): return self.visit_AnyComp(node, 'key', 'value') def visit_GeneratorExp(self, node): return self.visit_AnyComp(node, 'elt') def visit_Lambda(self, node): self.generic_visit(node) for i, arg in enumerate(node.args.args): renamings = OrderedDict() self.traverse_tuples(arg, (), renamings) if renamings: nname = self.get_new_id() node.args.args[i] = ast.Name(nname, ast.Param(), None, None) node.body = ConvertToTuple(nname, renamings).visit(node.body) return node def visit_Assign(self, node): self.generic_visit(node) # if the rhs is an identifier, we don't need to duplicate it # otherwise, better duplicate it... no_tmp = isinstance(node.value, (ast.Name, ast.Attribute)) extra_assign = [] if no_tmp else [node] for i, t in enumerate(node.targets): if isinstance(t, ast.Tuple) or isinstance(t, ast.List): renamings = OrderedDict() self.traverse_tuples(t, (), renamings) if renamings: if no_tmp: gstore = deepcopy(node.value) else: gstore = ast.Name(self.get_new_id(), ast.Store(), None, None) gload = deepcopy(gstore) gload.ctx = ast.Load() node.targets[i] = gstore for rename, state in renamings.items(): nnode = reduce( lambda x, y: ast.Subscript( x, ast.Constant(y, None), ast.Load()), state, gload) if isinstance(rename, str): extra_assign.append( ast.Assign( [ast.Name(rename, ast.Store(), None, None)], nnode, None)) else: extra_assign.append(ast.Assign([rename], nnode, None)) return extra_assign or node def visit_For(self, node): target = node.target if isinstance(target, ast.Tuple) or isinstance(target, ast.List): renamings = OrderedDict() self.traverse_tuples(target, (), renamings) if renamings: gtarget = self.get_new_id() node.target = ast.Name(gtarget, node.target.ctx, None, None) for rename, state in renamings.items(): nnode = reduce( lambda x, y: ast.Subscript( x, ast.Constant(y, None), ast.Load()), state, ast.Name(gtarget, ast.Load(), None, None)) if isinstance(rename, str): node.body.insert(0, ast.Assign( [ast.Name(rename, ast.Store(), None, None)], nnode, None) ) else: node.body.insert(0, ast.Assign([rename], nnode, None)) self.generic_visit(node) return node def visit_FunctionDef(self, node): self.ids = self.gather(Identifiers, node) return self.generic_visit(node) pythran-0.10.0+ds2/pythran/transformations/remove_comprehension.py000066400000000000000000000131161416264035500254670ustar00rootroot00000000000000""" RemoveComprehension turns list comprehension into function calls. """ from pythran.analyses import ImportedIds from pythran.passmanager import Transformation import pythran.metadata as metadata import gast as ast from functools import reduce class RemoveComprehension(Transformation): """ Turns all list comprehension from a node into new function calls. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("[x*x for x in (1,2,3)]") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(RemoveComprehension, node) >>> print(pm.dump(backend.Python, node)) list_comprehension0() def list_comprehension0(): __target = builtins.list() for x in (1, 2, 3): builtins.list.append(__target, (x * x)) return __target """ def __init__(self): self.count = 0 Transformation.__init__(self) @staticmethod def nest_reducer(x, g): """ Create a ast.For node from a comprehension and another node. g is an ast.comprehension. x is the code that have to be executed. Examples -------- >> [i for i in range(2)] Becomes >> for i in range(2): >> ... x code with if clauses ... It is a reducer as it can be call recursively for mutli generator. Ex : >> [i, j for i in range(2) for j in range(4)] """ def wrap_in_ifs(node, ifs): """ Wrap comprehension content in all possibles if clauses. Examples -------- >> [i for i in range(2) if i < 3 if 0 < i] Becomes >> for i in range(2): >> if i < 3: >> if 0 < i: >> ... the code from `node` ... Note the nested ifs clauses. """ return reduce(lambda n, if_: ast.If(if_, [n], []), ifs, node) return ast.For(g.target, g.iter, [wrap_in_ifs(x, g.ifs)], [], None) def visit_AnyComp(self, node, comp_type, *path): self.update = True node.elt = self.visit(node.elt) name = "{0}_comprehension{1}".format(comp_type, self.count) self.count += 1 args = self.gather(ImportedIds, node) self.count_iter = 0 starget = "__target" body = reduce(self.nest_reducer, reversed(node.generators), ast.Expr( ast.Call( reduce(lambda x, y: ast.Attribute(x, y, ast.Load()), path[1:], ast.Name(path[0], ast.Load(), None, None)), [ast.Name(starget, ast.Load(), None, None), node.elt], [], ) ) ) # add extra metadata to this node metadata.add(body, metadata.Comprehension(starget)) init = ast.Assign( [ast.Name(starget, ast.Store(), None, None)], ast.Call( ast.Attribute( ast.Name('builtins', ast.Load(), None, None), comp_type, ast.Load() ), [], [],), None) result = ast.Return(ast.Name(starget, ast.Load(), None, None)) sargs = [ast.Name(arg, ast.Param(), None, None) for arg in args] fd = ast.FunctionDef(name, ast.arguments(sargs, [], None, [], [], None, []), [init, body, result], [], None, None) metadata.add(fd, metadata.Local()) self.ctx.module.body.append(fd) return ast.Call( ast.Name(name, ast.Load(), None, None), [ast.Name(arg.id, ast.Load(), None, None) for arg in sargs], [], ) # no sharing ! def visit_ListComp(self, node): return self.visit_AnyComp(node, "list", "builtins", "list", "append") def visit_SetComp(self, node): return self.visit_AnyComp(node, "set", "builtins", "set", "add") def visit_DictComp(self, node): # this is a quickfix to match visit_AnyComp signature # potential source of improvement there! node.elt = ast.List( [ast.Tuple([node.key, node.value], ast.Load())], ast.Load() ) return self.visit_AnyComp(node, "dict", "__dispatch__", "update") def visit_GeneratorExp(self, node): self.update = True node.elt = self.visit(node.elt) name = "generator_expression{0}".format(self.count) self.count += 1 args = self.gather(ImportedIds, node) self.count_iter = 0 body = reduce(self.nest_reducer, reversed(node.generators), ast.Expr(ast.Yield(node.elt)) ) sargs = [ast.Name(arg, ast.Param(), None, None) for arg in args] fd = ast.FunctionDef(name, ast.arguments(sargs, [], None, [], [], None, []), [body], [], None, None) metadata.add(fd, metadata.Local()) self.ctx.module.body.append(fd) return ast.Call( ast.Name(name, ast.Load(), None, None), [ast.Name(arg.id, ast.Load(), None, None) for arg in sargs], [], ) # no sharing ! pythran-0.10.0+ds2/pythran/transformations/remove_fstrings.py000066400000000000000000000035021416264035500244530ustar00rootroot00000000000000"""Turns f-strings to format syntax with modulus""" import gast as ast from pythran.passmanager import Transformation from pythran.syntax import PythranSyntaxError class RemoveFStrings(Transformation, ast.NodeTransformer): """Turns f-strings to format syntax with modulus >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("f'a = {1+1:4d}; b = {b:s};'") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(RemoveFStrings, node) >>> print(pm.dump(backend.Python, node)) ('a = %4d; b = %s;' % ((1 + 1), b)) """ def visit_JoinedStr(self, node): if len(node.values) == 1 and not isinstance( node.values[0], ast.FormattedValue ): # f-strings with no reference to variable (like `f"bar"`, see #1767) return node.values[0] if not any( isinstance(value, ast.FormattedValue) for value in node.values ): # nothing to do (not a f-string) return node base_str = "" elements = [] for value in node.values: if isinstance(value, ast.Constant): base_str += value.value.replace("%", "%%") elif isinstance(value, ast.FormattedValue): base_str += "%" if value.format_spec is None: raise PythranSyntaxError( "f-strings without format specifier not supported", value ) base_str += value.format_spec.values[0].value elements.append(value.value) else: raise NotImplementedError return ast.BinOp( left=ast.Constant(value=base_str, kind=None), op=ast.Mod(), right=ast.Tuple(elts=elements, ctx=ast.Load()), ) pythran-0.10.0+ds2/pythran/transformations/remove_lambdas.py000066400000000000000000000063571416264035500242320ustar00rootroot00000000000000""" RemoveLambdas turns lambda into regular functions. """ from pythran.analyses import GlobalDeclarations, ImportedIds from pythran.passmanager import Transformation from pythran.tables import MODULES from pythran.conversion import mangle import pythran.metadata as metadata from copy import copy import gast as ast class _LambdaRemover(ast.NodeTransformer): def __init__(self, parent, prefix): super(_LambdaRemover, self).__init__() self.prefix = prefix self.parent = parent def __getattr__(self, attr): return getattr(self.parent, attr) def visit_Lambda(self, node): if MODULES['functools'] not in self.global_declarations.values(): import_ = ast.Import([ast.alias('functools', mangle('functools'))]) self.imports.append(import_) functools_module = MODULES['functools'] self.global_declarations[mangle('functools')] = functools_module self.generic_visit(node) forged_name = "{0}_lambda{1}".format( self.prefix, len(self.lambda_functions)) ii = self.gather(ImportedIds, node) ii.difference_update(self.lambda_functions) # remove current lambdas binded_args = [ast.Name(iin, ast.Load(), None, None) for iin in sorted(ii)] node.args.args = ([ast.Name(iin, ast.Param(), None, None) for iin in sorted(ii)] + node.args.args) forged_fdef = ast.FunctionDef( forged_name, copy(node.args), [ast.Return(node.body)], [], None, None) metadata.add(forged_fdef, metadata.Local()) self.lambda_functions.append(forged_fdef) self.global_declarations[forged_name] = forged_fdef proxy_call = ast.Name(forged_name, ast.Load(), None, None) if binded_args: return ast.Call( ast.Attribute( ast.Name(mangle('functools'), ast.Load(), None, None), "partial", ast.Load() ), [proxy_call] + binded_args, []) else: return proxy_call class RemoveLambdas(Transformation): """ Turns lambda into top-level functions. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(y): lambda x:y+x") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(RemoveLambdas, node) >>> print(pm.dump(backend.Python, node)) import functools as __pythran_import_functools def foo(y): __pythran_import_functools.partial(foo_lambda0, y) def foo_lambda0(y, x): return (y + x) """ def __init__(self): super(RemoveLambdas, self).__init__(GlobalDeclarations) def visit_Module(self, node): self.lambda_functions = list() self.imports = list() self.generic_visit(node) node.body = self.imports + node.body + self.lambda_functions self.update |= bool(self.imports) or bool(self.lambda_functions) return node def visit_FunctionDef(self, node): lr = _LambdaRemover(self, node.name) node.body = [lr.visit(n) for n in node.body] return node pythran-0.10.0+ds2/pythran/transformations/remove_named_arguments.py000066400000000000000000000120131416264035500257620ustar00rootroot00000000000000""" RemoveNamedArguments turns named arguments into regular ones. """ from pythran.analyses import Aliases from pythran.passmanager import Transformation from pythran.syntax import PythranSyntaxError from pythran.tables import MODULES import gast as ast from copy import deepcopy def handle_special_calls(func_alias, node): if func_alias is MODULES['numpy']['arange']: if len(node.args) == 1: node.args.insert(0, ast.Constant(0, None)) class RemoveNamedArguments(Transformation): ''' Replace call with named arguments to regular calls >>> import gast as ast >>> from pythran import passmanager, backend >>> code = 'def foo(x, y): return x + y\\ndef bar(z): return foo(y=z, x=0)' >>> node = ast.parse(code) >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(RemoveNamedArguments, node) >>> print(pm.dump(backend.Python, node)) def foo(x, y): return (x + y) def bar(z): return foo(0, z) ''' def __init__(self): super(RemoveNamedArguments, self).__init__(Aliases) def handle_keywords(self, func, node, offset=0): ''' Gather keywords to positional argument information Assumes the named parameter exist, raises a KeyError otherwise ''' func_argument_names = {} for i, arg in enumerate(func.args.args[offset:]): assert isinstance(arg, ast.Name) func_argument_names[arg.id] = i func_argument_kwonly_names = {} for i, arg in enumerate(func.args.kwonlyargs): assert isinstance(arg, ast.Name) func_argument_kwonly_names[arg.id] = i nargs = len(func.args.args) - offset defaults = func.args.defaults keywords = {func_argument_names[kw.arg]: kw.value for kw in node.keywords if kw.arg not in func_argument_kwonly_names} keywords_only = [] nb_kw = len(node.keywords) for i, kw in enumerate(list(reversed(node.keywords))): if kw.arg in func_argument_kwonly_names: keywords_only.append((func_argument_kwonly_names[kw.arg], kw.value)) node.keywords.pop(nb_kw - i - 1) keywords_only = [v for _, v in sorted(keywords_only)] extra_keyword_offset = max(keywords.keys()) if keywords else 0 node.args.extend([None] * (1 + extra_keyword_offset - len(node.args))) replacements = {} for index, arg in enumerate(node.args): if arg is None: if index in keywords: replacements[index] = deepcopy(keywords[index]) else: # must be a default value replacements[index] = deepcopy(defaults[index - nargs]) if not keywords_only: return replacements node.args.append(ast.Call( ast.Attribute( ast.Attribute( ast.Name("builtins", ast.Load(), None, None), "pythran", ast.Load()), "kwonly", ast.Load()), [], []) ) node.args.extend(keywords_only) return replacements def visit_Call(self, node): if node.keywords: self.update = True aliases = self.aliases[node.func] assert aliases, "at least one alias" # all aliases should have the same structural type... # call to self.handle_keywords raises an exception otherwise try: replacements = {} for func_alias in aliases: handle_special_calls(func_alias, node) if func_alias is None: # aliasing computation failed pass elif isinstance(func_alias, ast.Call): # nested function # func_alias looks like functools.partial(foo, a) # so we reorder using alias for 'foo' offset = len(func_alias.args) - 1 call = func_alias.args[0] for func_alias in self.aliases[call]: replacements = self.handle_keywords(func_alias, node, offset) else: replacements = self.handle_keywords(func_alias, node) # if we reach this point, we should have a replacement # candidate, or nothing structural typing issues would have # raised an exception in handle_keywords if replacements: for index, value in replacements.items(): node.args[index] = value node.keywords = [] except KeyError as ve: err = ("function uses an unknown (or unsupported) keyword " "argument `{}`".format(ve.args[0])) raise PythranSyntaxError(err, node) return self.generic_visit(node) pythran-0.10.0+ds2/pythran/transformations/remove_nested_functions.py000066400000000000000000000077141416264035500261770ustar00rootroot00000000000000""" RemoveNestedFunctions turns nested function into top-level functions. """ from pythran.analyses import GlobalDeclarations, ImportedIds from pythran.passmanager import Transformation from pythran.tables import MODULES from pythran.conversion import mangle import pythran.metadata as metadata import gast as ast class _NestedFunctionRemover(ast.NodeTransformer): def __init__(self, parent): ast.NodeTransformer.__init__(self) self.parent = parent self.identifiers = set(self.global_declarations.keys()) def __getattr__(self, attr): return getattr(self.parent, attr) def visit_FunctionDef(self, node): self.update = True if MODULES['functools'] not in self.global_declarations.values(): import_ = ast.Import([ast.alias('functools', mangle('functools'))]) self.ctx.module.body.insert(0, import_) functools_module = MODULES['functools'] self.global_declarations[mangle('functools')] = functools_module self.ctx.module.body.append(node) former_name = node.name seed = 0 new_name = "pythran_{}{}" while new_name.format(former_name, seed) in self.identifiers: seed += 1 new_name = new_name.format(former_name, seed) self.identifiers.add(new_name) ii = self.gather(ImportedIds, node) binded_args = [ast.Name(iin, ast.Load(), None, None) for iin in sorted(ii)] node.args.args = ([ast.Name(iin, ast.Param(), None, None) for iin in sorted(ii)] + node.args.args) metadata.add(node, metadata.Local()) class Renamer(ast.NodeTransformer): def visit_Call(self, node): self.generic_visit(node) if (isinstance(node.func, ast.Name) and node.func.id == former_name): node.func.id = new_name node.args = ( [ast.Name(iin, ast.Load(), None, None) for iin in sorted(ii)] + node.args ) return node Renamer().visit(node) node.name = new_name self.global_declarations[node.name] = node proxy_call = ast.Name(new_name, ast.Load(), None, None) new_node = ast.Assign( [ast.Name(former_name, ast.Store(), None, None)], ast.Call( ast.Attribute( ast.Name(mangle('functools'), ast.Load(), None, None), "partial", ast.Load() ), [proxy_call] + binded_args, [], ), None) self.generic_visit(node) return new_node class RemoveNestedFunctions(Transformation): """ Replace nested function by top-level functions. Also add a call to a bind intrinsic that generates a local function with some arguments binded. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(x):\\n def bar(y): return x+y\\n bar(12)") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(RemoveNestedFunctions, node) >>> print(pm.dump(backend.Python, node)) import functools as __pythran_import_functools def foo(x): bar = __pythran_import_functools.partial(pythran_bar0, x) bar(12) def pythran_bar0(x, y): return (x + y) """ def __init__(self): super(RemoveNestedFunctions, self).__init__(GlobalDeclarations) def visit_Module(self, node): # keep original node as it's updated by _NestedFunctionRemover for stmt in node.body: self.visit(stmt) return node def visit_FunctionDef(self, node): nfr = _NestedFunctionRemover(self) node.body = [nfr.visit(stmt) for stmt in node.body] self.update |= nfr.update return node pythran-0.10.0+ds2/pythran/transformations/unshadow_parameters.py000066400000000000000000000040641416264035500253160ustar00rootroot00000000000000""" UnshadowParameters prevents the shadow parameter phenomenon """ from pythran.analyses import Identifiers from pythran.passmanager import Transformation import gast as ast class UnshadowParameters(Transformation): ''' Prevents parameter shadowing by creating new variable. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(a): a = 1") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(UnshadowParameters, node) >>> print(pm.dump(backend.Python, node)) def foo(a): a_ = a a_ = 1 ''' def __init__(self): Transformation.__init__(self, Identifiers) def visit_FunctionDef(self, node): self.argsid = {arg.id for arg in node.args.args} self.renaming = {} [self.visit(n) for n in node.body] # do it twice to make sure all renaming are done [self.visit(n) for n in node.body] for k, v in self.renaming.items(): node.body.insert( 0, ast.Assign( [ast.Name(v, ast.Store(), None, None)], ast.Name(k, ast.Load(), None, None), None) ) self.update |= bool(self.renaming) return node def update_name(self, node): if isinstance(node, ast.Name) and node.id in self.argsid: if node.id not in self.renaming: new_name = node.id while new_name in self.identifiers: new_name = new_name + "_" self.renaming[node.id] = new_name def visit_Assign(self, node): for target in node.targets: self.update_name(target) try: self.generic_visit(node) except AttributeError: pass return node def visit_AugAssign(self, node): self.update_name(node.target) return self.generic_visit(node) def visit_Name(self, node): if node.id in self.renaming: node.id = self.renaming[node.id] return node pythran-0.10.0+ds2/pythran/types/000077500000000000000000000000001416264035500166005ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/types/__init__.py000066400000000000000000000000671416264035500207140ustar00rootroot00000000000000""" Submodule handling type management en Pythran. """ pythran-0.10.0+ds2/pythran/types/conversion.py000066400000000000000000000117421416264035500213440ustar00rootroot00000000000000""" Module to convert Python type to Pythonic type. """ from numpy import int8, int16, int32, int64, intp, intc from numpy import uint8, uint16, uint32, uint64, uintp, uintc from numpy import float64, float32, complex64, complex128 import numpy from pythran.typing import List, Dict, Set, Tuple, NDArray, Pointer, Fun PYTYPE_TO_CTYPE_TABLE = { numpy.uint: 'npy_uint', # complex: 'std::complex', bool: 'bool', int: 'long', float: 'double', str: 'pythonic::types::str', slice: 'pythonic::types::slice', type(None): 'pythonic::types::none_type', intc: 'int', intp: 'npy_intp', int64: 'npy_int64', int32: 'npy_int32', int16: 'npy_int16', int8: 'npy_int8', uintc: 'unsigned', uintp: 'npy_uintp', uint64: 'npy_uint64', uint32: 'npy_uint32', uint16: 'npy_uint16', uint8: 'npy_uint8', float64: 'double', float32: 'float', complex128: 'std::complex', complex64: 'std::complex', } try: from numpy import float128, complex256 PYTYPE_TO_CTYPE_TABLE[float128] = 'long double' PYTYPE_TO_CTYPE_TABLE[complex256] = 'std::complex' except ImportError: pass TYPE_TO_SUFFIX = { int: "L", } def pytype_to_ctype(t): """ Python -> pythonic type binding. """ if isinstance(t, List): return 'pythonic::types::list<{0}>'.format( pytype_to_ctype(t.__args__[0]) ) elif isinstance(t, Set): return 'pythonic::types::set<{0}>'.format( pytype_to_ctype(t.__args__[0]) ) elif isinstance(t, Dict): tkey, tvalue = t.__args__ return 'pythonic::types::dict<{0},{1}>'.format(pytype_to_ctype(tkey), pytype_to_ctype(tvalue)) elif isinstance(t, Tuple): return 'decltype(pythonic::types::make_tuple({0}))'.format( ", ".join('std::declval<{}>()'.format(pytype_to_ctype(p)) for p in t.__args__) ) elif isinstance(t, NDArray): dtype = pytype_to_ctype(t.__args__[0]) ndim = len(t.__args__) - 1 shapes = ','.join(('long' if s.stop == -1 or s.stop is None else 'std::integral_constant'.format( s.stop) ) for s in t.__args__[1:]) pshape = 'pythonic::types::pshape<{0}>'.format(shapes) arr = 'pythonic::types::ndarray<{0},{1}>'.format( dtype, pshape) if t.__args__[1].start == -1: return 'pythonic::types::numpy_texpr<{0}>'.format(arr) elif any(s.step is not None and s.step < 0 for s in t.__args__[1:]): slices = ", ".join(['pythonic::types::normalized_slice'] * ndim) return 'pythonic::types::numpy_gexpr<{0},{1}>'.format(arr, slices) else: return arr elif isinstance(t, Pointer): return 'pythonic::types::pointer<{0}>'.format( pytype_to_ctype(t.__args__[0]) ) elif isinstance(t, Fun): return 'pythonic::types::cfun<{0}({1})>'.format( pytype_to_ctype(t.__args__[-1]), ", ".join(pytype_to_ctype(arg) for arg in t.__args__[:-1]), ) elif t in PYTYPE_TO_CTYPE_TABLE: return PYTYPE_TO_CTYPE_TABLE[t] else: raise NotImplementedError("{0}:{1}".format(type(t), t)) def pytype_to_pretty_type(t): """ Python -> docstring type. """ if isinstance(t, List): return '{0} list'.format(pytype_to_pretty_type(t.__args__[0])) elif isinstance(t, Set): return '{0} set'.format(pytype_to_pretty_type(t.__args__[0])) elif isinstance(t, Dict): tkey, tvalue = t.__args__ return '{0}:{1} dict'.format(pytype_to_pretty_type(tkey), pytype_to_pretty_type(tvalue)) elif isinstance(t, Tuple): return '({0})'.format( ", ".join(pytype_to_pretty_type(p) for p in t.__args__) ) elif isinstance(t, NDArray): dtype = pytype_to_pretty_type(t.__args__[0]) ndim = len(t.__args__) - 1 arr = '{0}[{1}]'.format( dtype, ','.join(':' if s.stop in (-1, None) else str(s.stop) for s in t.__args__[1:])) # it's a transpose! if t.__args__[1].start == -1: return '{} order(F)'.format(arr) elif any(s.step is not None and s.step < 0 for s in t.__args__[1:]): return '{0}[{1}]'.format(dtype, ','.join(['::'] * ndim)) else: return arr elif isinstance(t, Pointer): dtype = pytype_to_pretty_type(t.__args__[0]) return '{}*'.format(dtype) elif isinstance(t, Fun): rtype = pytype_to_pretty_type(t.__args__[-1]) argtypes = [pytype_to_pretty_type(arg) for arg in t.__args__[:-1]] return '{}({})'.format(rtype, ", ".join(argtypes)) elif t in PYTYPE_TO_CTYPE_TABLE: return t.__name__ else: raise NotImplementedError("{0}:{1}".format(type(t), t)) pythran-0.10.0+ds2/pythran/types/reorder.py000066400000000000000000000104361416264035500206200ustar00rootroot00000000000000""" Reorder top-level functions to prevent circular type dependencies. """ import gast as ast from pythran.analyses import OrderedGlobalDeclarations from pythran.passmanager import Transformation from pythran.syntax import PythranSyntaxError from pythran.types.type_dependencies import TypeDependencies import pythran.graph as graph def topological_sort(G, nbunch): # nonrecursive version seen = set() order = [] explored = set() if nbunch is None: nbunch = G.nodes() for v in nbunch: # process all vertices in G if v in explored: continue fringe = [v] # nodes yet to look at while fringe: w = fringe[-1] # depth first search if w in explored: # already looked down this branch fringe.pop() continue seen.add(w) # mark as seen # Check successors for cycles and for new nodes new_nodes = [] for n in G[w]: if n not in explored: if n in seen: # CYCLE !! raise graph.Unfeasible( "Graph contains a cycle at %s." % n) new_nodes.append(n) if new_nodes: # Add new_nodes to fringe fringe.extend(new_nodes) else: # No new nodes so w is fully explored explored.add(w) order.append(w) fringe.pop() # done considering this node return list(reversed(order)) class Reorder(Transformation): """ Reorder top-level functions to prevent circular type dependencies. """ def __init__(self): """ Trigger others analysis informations. """ super(Reorder, self).__init__(TypeDependencies, OrderedGlobalDeclarations) def prepare(self, node): """ Format type dependencies information to use if for reordering. """ super(Reorder, self).prepare(node) candidates = self.type_dependencies.successors( TypeDependencies.NoDeps) # We first select function which may have a result without calling any # others functions. # Then we check if no loops type dependencies exists. If it exists, we # can safely remove the dependency as it could be compute without this # information. # As we can compute type for this function, successors can potentially # be computed # FIXME: This is false in some cases # # def bar(i): # if i > 0: # return foo(i) # else: # return [] # # def foo(i): # return [len(bar(i-1)) + len(bar(i - 2))] # # If we check for function without deps first, we will pick bar and say # it returns empty list while candidates: new_candidates = list() for n in candidates: # remove edges that imply a circular dependency for p in list(self.type_dependencies.predecessors(n)): if graph.has_path(self.type_dependencies, n, p): self.type_dependencies.remove_edge(p, n) if n not in self.type_dependencies.successors(n): new_candidates.extend(self.type_dependencies.successors(n)) candidates = new_candidates def visit_Module(self, node): """ Keep everything but function definition then add sorted functions. Most of the time, many function sort work so we use function calldepth as a "sort hint" to simplify typing. """ newbody = list() olddef = list() for stmt in node.body: if isinstance(stmt, ast.FunctionDef): olddef.append(stmt) else: newbody.append(stmt) try: newdef = topological_sort( self.type_dependencies, self.ordered_global_declarations) newdef = [f for f in newdef if isinstance(f, ast.FunctionDef)] except graph.Unfeasible: raise PythranSyntaxError("Infinite function recursion") assert set(newdef) == set(olddef), "A function have been lost..." node.body = newbody + newdef self.update = True return node pythran-0.10.0+ds2/pythran/types/signature.py000066400000000000000000000101431416264035500211520ustar00rootroot00000000000000from pythran.typing import List, Dict, Set, Fun, TypeVar from pythran.typing import Union, Iterable def type_dependencies(t): if isinstance(t, TypeVar): return {t} else: return set().union(*[type_dependencies(arg) for arg in getattr(t, '__args__', [])]) def dep_builder(type_var, ppal_index, index, t, self, node): if isinstance(t, TypeVar): if t is type_var: # FIXME: this is the second part of the hack below, # FIXME: there's no reason why self.result[node.args[index]] # FIXME: would still be valid in translated context return lambda arg: (arg if index == ppal_index else self.result[node.args[index]]) elif isinstance(t, (List, Set, Iterable, Dict)): return lambda arg: self.builder.IteratorContentType( dep_builder(type_var, ppal_index, index, t.__args__[0], self, node)(arg)) assert False, t class InfeasibleCombiner(Exception): pass def path_to(self, t, deps_builders, node): if isinstance(t, TypeVar): if t in deps_builders: return deps_builders[t] else: raise InfeasibleCombiner() if isinstance(t, List): return lambda arg: self.builder.ListType( path_to(self, t.__args__[0], deps_builders, node)(arg)) if isinstance(t, Set): return lambda arg: self.builder.SetType( path_to(self, t.__args__[0], deps_builders, node)(arg)) if isinstance(t, Dict): return lambda arg: self.builder.DictType( path_to(self, t.__args__[0], deps_builders, node)(arg), path_to(self, t.__args__[1], deps_builders, node)(arg), ) if isinstance(t, Fun): raise InfeasibleCombiner() if isinstance(t, Iterable): # FIXME? raise InfeasibleCombiner() assert False, (t, t.mro()) def build_unary_op(deps, args, self, node): # FIXME: this is a hack, because only the fist dep gets translated # FIXME: in case of interprocedural translation # FIXME: this was the original behavior... ppal_index = sorted(deps.values())[0][0][0] deps_builders = {dep: dep_builder(dep, ppal_index, *src[0], self=self, node=node) for dep, src in deps.items()} return path_to(self, args[0], deps_builders, node), ppal_index def build_combiner(signature, deps): sig_args = signature.__args__[:-1] def combiner(self, node): if deps and len(node.args) == len(sig_args): try: unary_op, main_index = build_unary_op(deps, sig_args, self, node) self.combine( node.args[0], node.args[main_index], unary_op=unary_op, register=True, aliasing_type=True) except InfeasibleCombiner: pass return combiner def extract_combiner(signature): if not isinstance(signature, (Fun, Union)): return None if type(signature) is Union: combiners = [extract_combiner(up) for up in signature.__args__] combiners = [cb for cb in combiners if cb] def combiner(self, node): for cb in combiners: cb(self, node) return combiner args = signature.__args__[:-1] if not args: return None deps = type_dependencies(args[0]) if not deps: return None deps_src = dict() for i, arg in enumerate(args[1:]): arg_deps = type_dependencies(arg) common_deps = deps.intersection(arg_deps) for common_dep in common_deps: deps_src.setdefault(common_dep, []).append((i + 1, arg)) return build_combiner(signature, deps_src) pythran-0.10.0+ds2/pythran/types/tog.py000077500000000000000000001372561416264035500177640ustar00rootroot00000000000000# adapted from # http://smallshire.org.uk/sufficientlysmall/2010/04/11/\ # a-hindley-milner-type-inference-implementation-in-python/ import gast as ast from copy import deepcopy from numpy import floating, integer, complexfloating from pythran.tables import MODULES, attributes import pythran.typing as typing from pythran.syntax import PythranSyntaxError from pythran.utils import isnum class PythranTypeError(PythranSyntaxError): "A new type to distinguish general syntax errors from typing issues" class InferenceError(Exception): "Raised if the type inference algorithm cannot infer types successfully" symbol_of = { ast.And: 'and', ast.Or: 'or', ast.Add: '+', ast.Sub: '-', ast.Mult: '*', ast.Div: '/', ast.Mod: '%', ast.Pow: '**', ast.LShift: '<<', ast.RShift: '>>', ast.BitOr: '|', ast.BitXor: '^', ast.BitAnd: '&', ast.FloorDiv: '//', ast.Invert: '~', ast.MatMult: '@', ast.Not: '!', ast.UAdd: '+', ast.USub: '-', } NoneType_ = type(None) # =======================================================# # Types and type constructors class TypeVariable(object): """A type variable standing for an arbitrary type. All type variables have a unique id, but names are only assigned lazily, when required. """ _cached_names = {} def __init__(self): self.instance = None self.name = None def __str__(self): if self.instance: return str(self.instance) else: return 'T{}'.format( TypeVariable._cached_names.setdefault( self, len(TypeVariable._cached_names) ) ) class TypeOperator(object): """An n-ary type constructor which builds a new type from old""" def __init__(self, name, types): self.name = name self.types = types def __str__(self): num_types = len(self.types) if num_types == 0: return self.name elif self.name == 'fun': return 'Callable[[{0}], {1}]'.format( ', '.join(map(str, self.types[:-1])), self.types[-1]) elif self.name == 'option': return 'Option[{0}]'.format(self.types[0]) else: return "{0}[{1}]" .format(self.name.capitalize(), ', '.join(map(str, self.types))) class Collection(TypeOperator): def __init__(self, holder_type, key_type, value_type, iter_type): super(Collection, self).__init__("collection", [holder_type, key_type, value_type, iter_type]) def __str__(self): t0 = prune(self.types[0]) if isinstance(t0, TypeVariable): if isinstance(prune(self.types[1]), TypeVariable): return 'Iterable[{}]'.format(self.types[3]) else: return 'Collection[{}, {}]'.format(self.types[1], self.types[2]) if isinstance(t0, TypeOperator) and t0.name == 'traits': if all(isinstance(prune(t), TypeVariable) for t in t0.types): return 'Collection[{}, {}]'.format(self.types[1], self.types[2]) elif all(isinstance(prune(t), TypeVariable) for t in t0.types[:1] + t0.types[2:]): t01 = prune(t0.types[1]) if isinstance(t01, TypeOperator) and t01.name == LenTrait.name: return 'Sized' t00 = prune(t0.types[0]) if isinstance(t00, TypeOperator): type_trait = t00.name if type_trait == 'list': return 'List[{}]'.format(self.types[2]) if type_trait == 'set': return 'Set[{}]'.format(self.types[2]) if type_trait == 'dict': return 'Dict[{}, {}]'.format(self.types[1], self.types[2]) if type_trait == 'str': return 'str' if type_trait == 'file': return 'IO[str]' if type_trait == 'tuple': return 'Tuple[{}]'.format(', '.join(map(str, self.types[1:]))) if type_trait == 'array': t01 = prune(t0.types[1]) hasnolen = (isinstance(t01, TypeOperator) and t01.name == NoLenTrait.name) if hasnolen: return str(self.types[2]) def rec(n): pn = prune(n) if isinstance(pn, Collection): traits = prune(pn.types[0]) # a scalar or array? if isinstance(traits, TypeVariable): return pn.types[3], 0 len_trait = prune(traits.types[1]) # an array? haslen = (isinstance(len_trait, TypeOperator) and len_trait.name == LenTrait.name) if haslen: t, n = rec(pn.types[3]) return t, n + 1 # a scalar or array? else: return pn.types[2], 0 else: return pn, 0 t, n = rec(self) if isinstance(t, TypeVariable): return 'Array[{} d+, {}]'.format(n, t) else: return 'Array[{}d, {}]'.format(n, t) if type_trait == 'gen': return 'Generator[{}]'.format(self.types[2]) return super(Collection, self).__str__() def TupleTrait(of_types): return TypeOperator('tuple', of_types) ListTrait = TypeOperator('list', []) SetTrait = TypeOperator('set', []) DictTrait = TypeOperator('dict', []) StrTrait = TypeOperator('str', []) FileTrait = TypeOperator('file', []) ArrayTrait = TypeOperator('array', []) GenerableTrait = TypeOperator('gen', []) LenTrait = TypeOperator("len", []) NoLenTrait = TypeOperator("no_len", []) SliceTrait = TypeOperator("slice", []) NoSliceTrait = TypeOperator("no_slice", []) def File(): return Collection(Traits([FileTrait, NoLenTrait, NoSliceTrait]), InvalidKey, Str(), Str()) def List(of_type): return Collection(Traits([ListTrait, LenTrait, SliceTrait]), Integer(), of_type, of_type) def Set(of_type): return Collection(Traits([SetTrait, LenTrait, NoSliceTrait]), InvalidKey, of_type, of_type) def Dict(key_type, value_type): return Collection(Traits([DictTrait, LenTrait, NoSliceTrait]), key_type, value_type, key_type) def Str(rec=6): Next = Str(rec - 1) if rec else TypeVariable() return Collection(Traits([StrTrait, LenTrait, SliceTrait]), Integer(), Next, Next) def Array(of_type, dim): return Collection(Traits([ArrayTrait, LenTrait, SliceTrait]), AnyType, AnyType, Array(of_type, dim - 1) if dim > 1 else of_type) def Iterable(of_type, dim): return Collection(Traits([TypeVariable(), LenTrait, SliceTrait]), AnyType, AnyType, Iterable(of_type, dim - 1) if dim > 1 else of_type) def Generator(of_type): return Collection(Traits([GenerableTrait, NoLenTrait, NoSliceTrait]), InvalidKey, of_type, of_type) def Tuple(of_types): return Collection(Traits([TupleTrait(of_types), LenTrait, SliceTrait]), Integer(), TypeVariable(), TypeVariable()) class Scalar(TypeOperator): def __init__(self, types=None): if not isinstance(types, list): dtype = types if dtype == 'complex': types = [ComplexTrait, TypeVariable(), TypeVariable(), TypeVariable()] elif dtype == 'float': types = [TypeVariable(), FloatTrait, TypeVariable(), TypeVariable()] elif dtype == 'int': types = [TypeVariable(), TypeVariable(), IntegerTrait, TypeVariable()] elif dtype == 'bool': types = [TypeVariable(), TypeVariable(), TypeVariable(), BoolTrait] else: assert dtype is None types = [TypeVariable(), TypeVariable(), TypeVariable(), TypeVariable()] super(Scalar, self).__init__('scalar', types) def __str__(self): if isinstance(prune(self.types[0]), TypeOperator): return 'complex' if isinstance(prune(self.types[1]), TypeOperator): return 'float' if isinstance(prune(self.types[2]), TypeOperator): return 'int' if isinstance(prune(self.types[3]), TypeOperator): return 'bool' return 'Scalar' def Complex(): return Collection(Traits([ArrayTrait, NoLenTrait, NoSliceTrait]), InvalidKey, Scalar('complex'), InvalidKey) def Float(): return Collection(Traits([ArrayTrait, NoLenTrait, NoSliceTrait]), InvalidKey, Scalar('float'), InvalidKey) def Integer(): return Collection(Traits([ArrayTrait, NoLenTrait, NoSliceTrait]), InvalidKey, Scalar('int'), InvalidKey) def Bool(): return Collection(Traits([ArrayTrait, NoLenTrait, NoSliceTrait]), InvalidKey, Scalar('bool'), InvalidKey) def DType(): return Collection(Traits([ArrayTrait, NoLenTrait, NoSliceTrait]), InvalidKey, Scalar(), InvalidKey) def Function(from_types, to_type): """A binary type constructor which builds function types""" return TypeOperator('fun', list(from_types) + [to_type]) def OptionType(of_type): return TypeOperator("option", [of_type]) def Traits(of_types): return TypeOperator("traits", of_types) ExceptionType = TypeOperator("exception", []) # Basic types are constructed with a null type constructor IntegerTrait = TypeOperator("int", []) # any integer FloatTrait = TypeOperator("float", []) # any float ComplexTrait = TypeOperator("complex", []) BoolTrait = TypeOperator("bool", []) InvalidKey = TypeOperator("invalid-key", []) # for non-indexable collection NoneType = TypeOperator("none", []) AnyType = TypeOperator("any", []) InvalidType = TypeOperator("invalid-type", []) Slice = TypeOperator("slice", []) # slice def is_none(t): pt = prune(t) return isinstance(pt, TypeOperator) and pt.name == "none" def is_option_type(t): pt = prune(t) return isinstance(pt, TypeOperator) and pt.name == "option" def maybe_array_type(t): pt = prune(t) if isinstance(pt, TypeVariable): return True # maybe an array :-/ if isinstance(pt, TypeOperator) and pt.name == "collection": st = prune(pt.types[0]) if isinstance(st, TypeOperator) and st.name == "traits": tt = prune(st.types[0]) if isinstance(tt, TypeVariable): return True # maybe return isinstance(tt, TypeOperator) and tt.name == "array" return False def is_test_is_none(node): if not isinstance(node, ast.Compare): return False left = node.left comparators = node.comparators ops = node.ops if len(ops) != 1: return False op = ops[0] if type(op) not in (ast.Is, ast.Eq): return False comparator = comparators[0] if not isinstance(comparator, ast.Attribute): return False return comparator.attr == 'None' and isinstance(left, ast.Name) def is_tuple_type(t): pt = prune(t) if isinstance(pt, TypeOperator) and pt.name == "collection": st = prune(pt.types[0]) if isinstance(st, TypeOperator) and st.name == "traits": tt = prune(st.types[0]) return isinstance(tt, TypeOperator) and tt.name == "tuple" return False def is_getattr(node): if not isinstance(node, ast.Call): return False if not isinstance(node.func, ast.Attribute): return False return node.func.attr == 'getattr' class MultiType(object): """A binary type constructor which builds function types""" def __init__(self, types): self.name = 'multitype' self.types = types def __str__(self): return '\n'.join(sorted(map(str, self.types))) def tr(t): def rec_tr(t, env): if isinstance(t, typing.TypeVar): if t in env: return env[t] else: env[t] = TypeVariable() return env[t] elif t is typing.Any: return TypeVariable() elif isinstance(t, NoneType_): return NoneType elif t is bool: return Bool() elif issubclass(t, slice): return Slice elif issubclass(t, (complex, complexfloating)): return Complex() elif issubclass(t, (float, floating)): return Float() elif issubclass(t, (int, integer)): return Integer() elif issubclass(t, NoneType_): return NoneType elif t is str: return Str() elif isinstance(t, typing.Generator): return Generator(rec_tr(t.__args__[0], env)) elif isinstance(t, typing.List): return List(rec_tr(t.__args__[0], env)) elif isinstance(t, typing.Optional): return OptionType(rec_tr(t.__args__[0], env)) elif isinstance(t, typing.Set): return Set(rec_tr(t.__args__[0], env)) elif isinstance(t, typing.Dict): return Dict(rec_tr(t.__args__[0], env), rec_tr(t.__args__[1], env)) elif isinstance(t, typing.Tuple): return Tuple([rec_tr(tp, env) for tp in t.__args__]) elif isinstance(t, typing.NDArray): return Array(rec_tr(t.__args__[0], env), len(t.__args__[1:])) elif isinstance(t, typing.Pointer): return Array(rec_tr(t.__args__[0], env), 1) elif isinstance(t, typing.Union): return MultiType([rec_tr(ut, env) for ut in t.__args__]) elif t is typing.File: return File() elif isinstance(t, typing.Iterable): return Collection(TypeVariable(), TypeVariable(), TypeVariable(), rec_tr(t.__args__[0], env)) elif t is typing.Sized: return Collection( Traits([TypeVariable(), LenTrait, TypeVariable()]), TypeVariable(), TypeVariable(), TypeVariable() ) elif isinstance(t, typing.Fun): return Function([rec_tr(at, env) for at in t.__args__[:-1]], rec_tr(t.__args__[-1], env)) else: raise NotImplementedError(t) if isinstance(t, dict): return t elif hasattr(t, 'signature'): return rec_tr(t.signature, {}) else: return rec_tr(t, {}) #### def analyse_body(body, env, non_generic): # first step to gather global symbols for stmt in body: if isinstance(stmt, ast.FunctionDef): new_type = TypeVariable() env[stmt.name] = new_type # second to perform local inference for stmt in body: analyse(stmt, env, non_generic) class HasYield(ast.NodeVisitor): def __init__(self): super(HasYield, self).__init__() self.has_yield = False def visit_FunctionDef(self, node): pass def visit_Yield(self, node): self.has_yield = True def analyse(node, env, non_generic=None): """Computes the type of the expression given by node. The type of the node is computed in the context of the context of the supplied type environment env. Data types can be introduced into the language simply by having a predefined set of identifiers in the initial environment. Environment; this way there is no need to change the syntax or more importantly, the type-checking program when extending the language. Args: node: The root of the abstract syntax tree. env: The type environment is a mapping of expression identifier names to type assignments. non_generic: A set of non-generic variables, or None Returns: The computed type of the expression. Raises: InferenceError: The type of the expression could not be inferred, PythranTypeError: InferenceError with user friendly message + location """ if non_generic is None: non_generic = set() # expr if isinstance(node, ast.Name): if isinstance(node.ctx, (ast.Store)): new_type = TypeVariable() non_generic.add(new_type) env[node.id] = new_type return get_type(node.id, env, non_generic) elif isinstance(node, ast.Constant): if isinstance(node.value, str): return Str() elif isinstance(node.value, int): return Integer() elif isinstance(node.value, float): return Float() elif isinstance(node.value, complex): return Complex() elif node.value is None: return NoneType else: raise NotImplementedError elif isinstance(node, ast.Compare): left_type = analyse(node.left, env, non_generic) comparators_type = [analyse(comparator, env, non_generic) for comparator in node.comparators] ops_type = [analyse(op, env, non_generic) for op in node.ops] prev_type = left_type result_type = TypeVariable() for op_type, comparator_type in zip(ops_type, comparators_type): try: unify(Function([prev_type, comparator_type], result_type), op_type) prev_type = comparator_type except InferenceError: raise PythranTypeError( "Invalid comparison, between `{}` and `{}`".format( prev_type, comparator_type ), node) return result_type elif isinstance(node, ast.Call): if is_getattr(node): self_type = analyse(node.args[0], env, non_generic) attr_name = node.args[1].value _, attr_signature = attributes[attr_name] attr_type = tr(attr_signature) result_type = TypeVariable() try: unify(Function([self_type], result_type), attr_type) except InferenceError: if isinstance(prune(attr_type), MultiType): msg = 'no attribute found, tried:\n{}'.format(attr_type) else: msg = 'tried {}'.format(attr_type) raise PythranTypeError( "Invalid attribute for getattr call with self" "of type `{}`, {}".format(self_type, msg), node) else: fun_type = analyse(node.func, env, non_generic) arg_types = [analyse(arg, env, non_generic) for arg in node.args] result_type = TypeVariable() try: unify(Function(arg_types, result_type), fun_type) except InferenceError: # recover original type fun_type = analyse(node.func, env, non_generic) if isinstance(prune(fun_type), MultiType): msg = 'no overload found, tried:\n{}'.format(fun_type) else: msg = 'tried {}'.format(fun_type) raise PythranTypeError( "Invalid argument type for function call to " "`Callable[[{}], ...]`, {}" .format(', '.join('{}'.format(at) for at in arg_types), msg), node) return result_type elif isinstance(node, ast.IfExp): test_type = analyse(node.test, env, non_generic) unify(Function([test_type], Bool()), tr(MODULES['builtins']['bool'])) if is_test_is_none(node.test): none_id = node.test.left.id body_env = env.copy() body_env[none_id] = NoneType else: none_id = None body_env = env body_type = analyse(node.body, body_env, non_generic) if none_id: orelse_env = env.copy() if is_option_type(env[none_id]): orelse_env[none_id] = prune(env[none_id]).types[0] else: orelse_env[none_id] = TypeVariable() else: orelse_env = env orelse_type = analyse(node.orelse, orelse_env, non_generic) try: return merge_unify(body_type, orelse_type) except InferenceError: raise PythranTypeError( "Incompatible types from different branches:" "`{}` and `{}`".format( body_type, orelse_type ), node ) elif isinstance(node, ast.UnaryOp): operand_type = analyse(node.operand, env, non_generic) op_type = analyse(node.op, env, non_generic) result_type = TypeVariable() try: unify(Function([operand_type], result_type), op_type) return result_type except InferenceError: raise PythranTypeError( "Invalid operand for `{}`: `{}`".format( symbol_of[type(node.op)], operand_type ), node ) elif isinstance(node, ast.BinOp): left_type = analyse(node.left, env, non_generic) op_type = analyse(node.op, env, non_generic) right_type = analyse(node.right, env, non_generic) result_type = TypeVariable() try: unify(Function([left_type, right_type], result_type), op_type) except InferenceError: raise PythranTypeError( "Invalid operand for `{}`: `{}` and `{}`".format( symbol_of[type(node.op)], left_type, right_type), node ) return result_type elif isinstance(node, ast.Pow): return tr(MODULES['numpy']['power']) elif isinstance(node, ast.Sub): return tr(MODULES['operator']['sub']) elif isinstance(node, (ast.USub, ast.UAdd)): return tr(MODULES['operator']['pos']) elif isinstance(node, (ast.Eq, ast.NotEq, ast.Lt, ast.LtE, ast.Gt, ast.GtE, ast.Is, ast.IsNot)): return tr(MODULES['operator']['eq']) elif isinstance(node, (ast.In, ast.NotIn)): contains_sig = tr(MODULES['operator']['contains']) contains_sig.types[:-1] = reversed(contains_sig.types[:-1]) return contains_sig elif isinstance(node, ast.Add): return tr(MODULES['operator']['add']) elif isinstance(node, ast.Mult): return tr(MODULES['operator']['mul']) elif isinstance(node, ast.MatMult): return tr(MODULES['operator']['matmul']) elif isinstance(node, (ast.Div, ast.FloorDiv)): return tr(MODULES['operator']['floordiv']) elif isinstance(node, ast.Mod): return tr(MODULES['operator']['mod']) elif isinstance(node, (ast.LShift, ast.RShift)): return tr(MODULES['operator']['lshift']) elif isinstance(node, (ast.BitXor, ast.BitAnd, ast.BitOr)): return tr(MODULES['operator']['lshift']) elif isinstance(node, ast.List): new_type = TypeVariable() for elt in node.elts: elt_type = analyse(elt, env, non_generic) try: unify(new_type, elt_type) except InferenceError: raise PythranTypeError( "Incompatible list element type `{}` and `{}`".format( new_type, elt_type), node ) return List(new_type) elif isinstance(node, ast.Set): new_type = TypeVariable() for elt in node.elts: elt_type = analyse(elt, env, non_generic) try: unify(new_type, elt_type) except InferenceError: raise PythranTypeError( "Incompatible set element type `{}` and `{}`".format( new_type, elt_type), node ) return Set(new_type) elif isinstance(node, ast.Dict): new_key_type = TypeVariable() for key in node.keys: key_type = analyse(key, env, non_generic) try: unify(new_key_type, key_type) except InferenceError: raise PythranTypeError( "Incompatible dict key type `{}` and `{}`".format( new_key_type, key_type), node ) new_value_type = TypeVariable() for value in node.values: value_type = analyse(value, env, non_generic) try: unify(new_value_type, value_type) except InferenceError: raise PythranTypeError( "Incompatible dict value type `{}` and `{}`".format( new_value_type, value_type), node ) return Dict(new_key_type, new_value_type) elif isinstance(node, ast.Tuple): return Tuple([analyse(elt, env, non_generic) for elt in node.elts]) elif isinstance(node, ast.Slice): def unify_int_or_none(t, name): try: unify(t, Integer()) except InferenceError: try: unify(t, NoneType) except InferenceError: raise PythranTypeError( "Invalid slice {} type `{}`, expecting int or None" .format(name, t) ) if node.lower: lower_type = analyse(node.lower, env, non_generic) unify_int_or_none(lower_type, 'lower bound') else: lower_type = Integer() if node.upper: upper_type = analyse(node.upper, env, non_generic) unify_int_or_none(upper_type, 'upper bound') else: upper_type = Integer() if node.step: step_type = analyse(node.step, env, non_generic) unify_int_or_none(step_type, 'step') else: step_type = Integer() return Slice elif isinstance(node, ast.Subscript): new_type = TypeVariable() value_type = prune(analyse(node.value, env, non_generic)) try: slice_type = prune(analyse(node.slice, env, non_generic)) except PythranTypeError as e: raise PythranTypeError(e.msg, node) if isinstance(node.slice, ast.Tuple): nbslice = len(node.slice.elts) dtype = TypeVariable() try: unify(Array(dtype, nbslice), clone(value_type)) except InferenceError: raise PythranTypeError( "Dimension mismatch when slicing `{}`".format(value_type), node) return TypeVariable() # FIXME else: # handle tuples in a special way num = isnum(node.slice) if num and is_tuple_type(value_type): try: unify(prune(prune(value_type.types[0]).types[0]) .types[node.slice.value], new_type) return new_type except IndexError: raise PythranTypeError( "Invalid tuple indexing, " "out-of-bound index `{}` for type `{}`".format( node.slice.value, value_type), node) try: unify(tr(MODULES['operator']['getitem']), Function([value_type, slice_type], new_type)) except InferenceError: raise PythranTypeError( "Invalid subscripting of `{}` by `{}`".format( value_type, slice_type), node) return new_type return new_type elif isinstance(node, ast.Attribute): from pythran.utils import attr_to_path obj, path = attr_to_path(node) if obj.signature is typing.Any: return TypeVariable() else: return tr(obj) # stmt elif isinstance(node, ast.Import): for alias in node.names: if alias.name not in MODULES: raise NotImplementedError("unknown module: %s " % alias.name) if alias.asname is None: target = alias.name else: target = alias.asname env[target] = tr(MODULES[alias.name]) return env elif isinstance(node, ast.ImportFrom): if node.module not in MODULES: raise NotImplementedError("unknown module: %s" % node.module) for alias in node.names: if alias.name not in MODULES[node.module]: raise NotImplementedError( "unknown function: %s in %s" % (alias.name, node.module)) if alias.asname is None: target = alias.name else: target = alias.asname env[target] = tr(MODULES[node.module][alias.name]) return env elif isinstance(node, ast.FunctionDef): ftypes = [] for i in range(1 + len(node.args.defaults)): new_env = env.copy() new_non_generic = non_generic.copy() # reset return special variables new_env.pop('@ret', None) new_env.pop('@gen', None) hy = HasYield() for stmt in node.body: hy.visit(stmt) new_env['@gen'] = hy.has_yield arg_types = [] istop = len(node.args.args) - i for arg in node.args.args[:istop]: arg_type = TypeVariable() new_env[arg.id] = arg_type new_non_generic.add(arg_type) arg_types.append(arg_type) for arg, expr in zip(node.args.args[istop:], node.args.defaults[-i:]): arg_type = analyse(expr, new_env, new_non_generic) new_env[arg.id] = arg_type analyse_body(node.body, new_env, new_non_generic) result_type = new_env.get('@ret', NoneType) if new_env['@gen']: result_type = Generator(result_type) ftype = Function(arg_types, result_type) ftypes.append(ftype) if len(ftypes) == 1: ftype = ftypes[0] env[node.name] = ftype else: env[node.name] = MultiType(ftypes) return env elif isinstance(node, ast.Module): analyse_body(node.body, env, non_generic) return env elif isinstance(node, (ast.Pass, ast.Break, ast.Continue)): return env elif isinstance(node, ast.Expr): analyse(node.value, env, non_generic) return env elif isinstance(node, ast.Delete): for target in node.targets: if isinstance(target, ast.Name): if target.id in env: del env[target.id] else: raise PythranTypeError( "Invalid del: unbound identifier `{}`".format( target.id), node) else: analyse(target, env, non_generic) return env elif isinstance(node, ast.Print): if node.dest is not None: analyse(node.dest, env, non_generic) for value in node.values: analyse(value, env, non_generic) return env elif isinstance(node, ast.Assign): defn_type = analyse(node.value, env, non_generic) for target in node.targets: target_type = analyse(target, env, non_generic) try: unify(target_type, defn_type) except InferenceError: raise PythranTypeError( "Invalid assignment from type `{}` to type `{}`".format( target_type, defn_type), node) return env elif isinstance(node, ast.AugAssign): # FIMXE: not optimal: evaluates type of node.value twice fake_target = deepcopy(node.target) fake_target.ctx = ast.Load() fake_op = ast.BinOp(fake_target, node.op, node.value) ast.copy_location(fake_op, node) res_type = analyse(fake_op, env, non_generic) target_type = analyse(node.target, env, non_generic) try: unify(target_type, res_type) except InferenceError: raise PythranTypeError( "Invalid update operand for `{}`: `{}` and `{}`".format( symbol_of[type(node.op)], res_type, target_type ), node ) return env elif isinstance(node, ast.Raise): return env # TODO elif isinstance(node, ast.Return): if env['@gen']: return env if node.value is None: ret_type = NoneType else: ret_type = analyse(node.value, env, non_generic) if '@ret' in env: try: ret_type = merge_unify(env['@ret'], ret_type) except InferenceError: raise PythranTypeError( "function may returns with incompatible types " "`{}` and `{}`".format(env['@ret'], ret_type), node ) env['@ret'] = ret_type return env elif isinstance(node, ast.Yield): assert env['@gen'] assert node.value is not None if node.value is None: ret_type = NoneType else: ret_type = analyse(node.value, env, non_generic) if '@ret' in env: try: ret_type = merge_unify(env['@ret'], ret_type) except InferenceError: raise PythranTypeError( "function may yields incompatible types " "`{}` and `{}`".format(env['@ret'], ret_type), node ) env['@ret'] = ret_type return env elif isinstance(node, ast.For): iter_type = analyse(node.iter, env, non_generic) target_type = analyse(node.target, env, non_generic) unify(Collection(TypeVariable(), TypeVariable(), TypeVariable(), target_type), iter_type) analyse_body(node.body, env, non_generic) analyse_body(node.orelse, env, non_generic) return env elif isinstance(node, ast.If): test_type = analyse(node.test, env, non_generic) unify(Function([test_type], Bool()), tr(MODULES['builtins']['bool'])) body_env = env.copy() body_non_generic = non_generic.copy() if is_test_is_none(node.test): none_id = node.test.left.id body_env[none_id] = NoneType else: none_id = None analyse_body(node.body, body_env, body_non_generic) orelse_env = env.copy() orelse_non_generic = non_generic.copy() if none_id: if is_option_type(env[none_id]): orelse_env[none_id] = prune(env[none_id]).types[0] else: orelse_env[none_id] = TypeVariable() analyse_body(node.orelse, orelse_env, orelse_non_generic) for var in body_env: if var not in env: if var in orelse_env: try: new_type = merge_unify(body_env[var], orelse_env[var]) except InferenceError: raise PythranTypeError( "Incompatible types from different branches for " "`{}`: `{}` and `{}`".format( var, body_env[var], orelse_env[var] ), node ) else: new_type = body_env[var] env[var] = new_type for var in orelse_env: if var not in env: # may not be unified by the prev loop if a del occured if var in body_env: new_type = merge_unify(orelse_env[var], body_env[var]) else: new_type = orelse_env[var] env[var] = new_type if none_id: try: new_type = merge_unify(body_env[none_id], orelse_env[none_id]) except InferenceError: msg = ("Inconsistent types while merging values of `{}` from " "conditional branches: `{}` and `{}`") err = msg.format(none_id, body_env[none_id], orelse_env[none_id]) raise PythranTypeError(err, node) env[none_id] = new_type return env elif isinstance(node, ast.While): test_type = analyse(node.test, env, non_generic) unify(Function([test_type], Bool()), tr(MODULES['builtins']['bool'])) analyse_body(node.body, env, non_generic) analyse_body(node.orelse, env, non_generic) return env elif isinstance(node, ast.Try): analyse_body(node.body, env, non_generic) for handler in node.handlers: analyse(handler, env, non_generic) analyse_body(node.orelse, env, non_generic) analyse_body(node.finalbody, env, non_generic) return env elif isinstance(node, ast.ExceptHandler): if(node.name): new_type = ExceptionType non_generic.add(new_type) if node.name.id in env: unify(env[node.name.id], new_type) else: env[node.name.id] = new_type analyse_body(node.body, env, non_generic) return env elif isinstance(node, ast.Assert): if node.msg: analyse(node.msg, env, non_generic) analyse(node.test, env, non_generic) return env elif isinstance(node, ast.UnaryOp): operand_type = analyse(node.operand, env, non_generic) return_type = TypeVariable() op_type = analyse(node.op, env, non_generic) unify(Function([operand_type], return_type), op_type) return return_type elif isinstance(node, ast.Invert): return MultiType([Function([Bool()], Integer()), Function([Integer()], Integer())]) elif isinstance(node, ast.Not): return tr(MODULES['builtins']['bool']) elif isinstance(node, ast.BoolOp): op_type = analyse(node.op, env, non_generic) value_types = [analyse(value, env, non_generic) for value in node.values] for value_type in value_types: unify(Function([value_type], Bool()), tr(MODULES['builtins']['bool'])) return_type = TypeVariable() prev_type = value_types[0] for value_type in value_types[1:]: unify(Function([prev_type, value_type], return_type), op_type) prev_type = value_type return return_type elif isinstance(node, (ast.And, ast.Or)): x_type = TypeVariable() return MultiType([ Function([x_type, x_type], x_type), Function([TypeVariable(), TypeVariable()], TypeVariable()), ]) raise RuntimeError("Unhandled syntax node {0}".format(type(node))) def get_type(name, env, non_generic): """Get the type of identifier name from the type environment env. Args: name: The identifier name env: The type environment mapping from identifier names to types non_generic: A set of non-generic TypeVariables Raises: ParseError: Raised if name is an undefined symbol in the type environment. """ if name in env: if isinstance(env[name], MultiType): return clone(env[name]) return fresh(env[name], non_generic) else: print("W: Undefined symbol {0}".format(name)) return TypeVariable() def fresh(t, non_generic): """Makes a copy of a type expression. The type t is copied. The generic variables are duplicated and the non_generic variables are shared. Args: t: A type to be copied. non_generic: A set of non-generic TypeVariables """ mappings = {} # A mapping of TypeVariables to TypeVariables def freshrec(tp): p = prune(tp) if isinstance(p, TypeVariable): if is_generic(p, non_generic): if p not in mappings: mappings[p] = TypeVariable() return mappings[p] else: return p elif isinstance(p, dict): return p # module elif isinstance(p, Collection): return Collection(*[freshrec(x) for x in p.types]) elif isinstance(p, Scalar): return Scalar([freshrec(x) for x in p.types]) elif isinstance(p, TypeOperator): return TypeOperator(p.name, [freshrec(x) for x in p.types]) elif isinstance(p, MultiType): return MultiType([freshrec(x) for x in p.types]) else: assert False, "missing freshrec case {}".format(type(p)) return freshrec(t) def clone(t): if isinstance(t, MultiType): return MultiType([clone(tp) for tp in t.types]) else: return fresh(t, {}) def unify(t1, t2): """Unify the two types t1 and t2. Makes the types t1 and t2 the same. Args: t1: The first type to be made equivalent t2: The second type to be be equivalent Returns: None Raises: InferenceError: Raised if the types cannot be unified. """ a = prune(t1) b = prune(t2) if isinstance(a, TypeVariable): if a != b: if occurs_in_type(a, b): raise InferenceError("recursive unification") a.instance = b elif isinstance(b, TypeVariable): unify(b, a) elif isinstance(a, TypeOperator) and a.name == 'any': return elif isinstance(b, TypeOperator) and b.name == 'any': return elif isinstance(a, TypeOperator) and isinstance(b, TypeOperator): if len(a.types) != len(b.types): raise InferenceError("Type length differ") else: if a.name != b.name: raise InferenceError("Type name differ") try: for p, q in zip(a.types, b.types): unify(p, q) except InferenceError: raise elif isinstance(a, MultiType) and isinstance(b, MultiType): if len(a.types) != len(b.types): raise InferenceError("Type lenght differ") for p, q in zip(a.types, b.types): unify(p, q) elif isinstance(b, MultiType): return unify(b, a) elif isinstance(a, MultiType): types = [] for t in a.types: try: t_clone = fresh(t, {}) b_clone = fresh(b, {}) unify(t_clone, b_clone) types.append(t) except InferenceError: pass if types: if len(types) == 1: unify(clone(types[0]), b) else: # too many overloads are found, # so extract as many information as we can, # and leave the remaining over-approximated def try_unify(t, ts): if isinstance(t, TypeVariable): return if any(isinstance(tp, TypeVariable) for tp in ts): return if any(len(tp.types) != len(t.types) for tp in ts): return for i, tt in enumerate(t.types): its = [prune(tp.types[i]) for tp in ts] if any(isinstance(it, TypeVariable) for it in its): continue it0 = its[0] it0ntypes = len(it0.types) if all(((it.name == it0.name) and (len(it.types) == it0ntypes)) for it in its): ntypes = [TypeVariable() for _ in range(it0ntypes)] new_tt = TypeOperator(it0.name, ntypes) new_tt.__class__ = it0.__class__ unify(tt, new_tt) try_unify(prune(tt), [prune(it) for it in its]) try_unify(b, types) else: raise InferenceError("No overload") else: raise RuntimeError("Not unified {} and {}".format(type(a), type(b))) def merge_unify(t1, t2): p1 = prune(t1) p2 = prune(t2) if is_none(p1) and is_none(p2): return p1 if is_none(p1): if is_option_type(p2): return p2 else: return OptionType(p2) if is_none(p2): return merge_unify(p2, p1) if is_option_type(p1) and is_option_type(p2): unify(p1.types[0], p2.types[0]) return p1 if is_option_type(p1): unify(p1.types[0], p2) return p1 if is_option_type(p2): return merge_unify(p2, p1) unify(p1, p2) return p1 def prune(t): """Returns the currently defining instance of t. As a side effect, collapses the list of type instances. The function Prune is used whenever a type expression has to be inspected: it will always return a type expression which is either an uninstantiated type variable or a type operator; i.e. it will skip instantiated variables, and will actually prune them from expressions to remove long chains of instantiated variables. Args: t: The type to be pruned Returns: An uninstantiated TypeVariable or a TypeOperator """ if isinstance(t, TypeVariable): if t.instance is not None: t.instance = prune(t.instance) return t.instance return t def is_generic(v, non_generic): """Checks whether a given variable occurs in a list of non-generic variables Note that a variables in such a list may be instantiated to a type term, in which case the variables contained in the type term are considered non-generic. Note: Must be called with v pre-pruned Args: v: The TypeVariable to be tested for genericity non_generic: A set of non-generic TypeVariables Returns: True if v is a generic variable, otherwise False """ return not occurs_in(v, non_generic) def occurs_in_type(v, type2): """Checks whether a type variable occurs in a type expression. Note: Must be called with v pre-pruned Args: v: The TypeVariable to be tested for type2: The type in which to search Returns: True if v occurs in type2, otherwise False """ pruned_type2 = prune(type2) if pruned_type2 == v: return True elif isinstance(pruned_type2, TypeOperator): return occurs_in(v, pruned_type2.types) return False def occurs_in(t, types): """Checks whether a types variable occurs in any other types. Args: t: The TypeVariable to be tested for types: The sequence of types in which to search Returns: True if t occurs in any of types, otherwise False """ return any(occurs_in_type(t, t2) for t2 in types) def typecheck(node): types = analyse(node, {'builtins': MODULES['builtins']}) return types pythran-0.10.0+ds2/pythran/types/type_dependencies.py000066400000000000000000000361551416264035500226530ustar00rootroot00000000000000""" Module to manage dependencies between pythran types. """ import gast as ast import itertools import os from pythran.analyses import GlobalDeclarations from pythran.errors import PythranInternalError from pythran.passmanager import ModuleAnalysis from pythran.types.conversion import PYTYPE_TO_CTYPE_TABLE from pythran.utils import get_variable from pythran.typing import List, Set, Dict, NDArray, Tuple, Pointer, Fun from pythran.graph import DiGraph def pytype_to_deps_hpp(t): """python -> pythonic type hpp filename.""" if isinstance(t, List): return {'list.hpp'}.union(pytype_to_deps_hpp(t.__args__[0])) elif isinstance(t, Set): return {'set.hpp'}.union(pytype_to_deps_hpp(t.__args__[0])) elif isinstance(t, Dict): tkey, tvalue = t.__args__ return {'dict.hpp'}.union(pytype_to_deps_hpp(tkey), pytype_to_deps_hpp(tvalue)) elif isinstance(t, Tuple): return {'tuple.hpp'}.union(*[pytype_to_deps_hpp(elt) for elt in t.__args__]) elif isinstance(t, NDArray): out = {'ndarray.hpp'} # it's a transpose! if t.__args__[1].start == -1: out.add('numpy_texpr.hpp') return out.union(pytype_to_deps_hpp(t.__args__[0])) elif isinstance(t, Pointer): return {'pointer.hpp'}.union(pytype_to_deps_hpp(t.__args__[0])) elif isinstance(t, Fun): return {'cfun.hpp'}.union(*[pytype_to_deps_hpp(a) for a in t.__args__]) elif t in PYTYPE_TO_CTYPE_TABLE: return {'{}.hpp'.format(t.__name__)} else: raise NotImplementedError("{0}:{1}".format(type(t), t)) def pytype_to_deps(t): """ python -> pythonic type header full path. """ res = set() for hpp_dep in pytype_to_deps_hpp(t): res.add(os.path.join('pythonic', 'types', hpp_dep)) res.add(os.path.join('pythonic', 'include', 'types', hpp_dep)) return res class TypeDependencies(ModuleAnalysis): """ Gathers the callees of each function required for type inference. This analyse produces a directed graph with functions as nodes and edges between nodes when a function might call another. Check usual behavior. >>> import gast as ast >>> from pythran import passmanager >>> pm = passmanager.PassManager("test") >>> node = ast.parse(''' ... def foo(n): ... return 1 if copy(n) else copy(n) ... def copy(n): ... return n == 2''') >>> res = pm.gather(TypeDependencies, node) >>> len(res.edges) 3 foo result depend on : NoDeps and copy copy result depend on : NoDeps Check that content assignment is a dependency. >>> node = ast.parse(''' ... def foo(n): ... n[1] = copy(n) ... return 1 if copy(n) else n ... def copy(n): ... return n == 2''') >>> res = pm.gather(TypeDependencies, node) >>> len(res.edges) 3 foo result depend on : NoDeps and copy copy result depend on : NoDeps Check augassign add a dependencies but don't remove the old one. >>> node = ast.parse(''' ... def bar(n): ... return n ... def foo(n): ... n[1] = copy(n) ... n[1] += bar(1) ... return 1 if copy(n) else n ... def copy(n): ... return n == 2''') >>> res = pm.gather(TypeDependencies, node) >>> len(res.edges) 5 bar result depend on : NoDeps foo result depend on : NoDeps, bar and copy copy depend on : NoDeps Check a if statement handle both branches >>> node = ast.parse(''' ... def bar(n): ... return n ... def foo(n): ... if n: ... n = bar() ... else: ... n = 4 ... return 1 or n''') >>> res = pm.gather(TypeDependencies, node) >>> len(res.edges) 3 Check we do not add everything from a conditional statement. >>> node = ast.parse(''' ... def bar(n): ... return n ... def foo(n): ... if n: ... n = bar() ... n = 3 ... else: ... n = 4 ... return 1 or n''') >>> res = pm.gather(TypeDependencies, node) >>> len(res.edges) 2 bar result depend on : NoDeps foo result depend on : NoDeps only Check dependency on for target variable >>> node = ast.parse(''' ... def bar(n): ... return builtins.range(n) ... def foo(n): ... for i in bar(n): ... i = 2 ... return i''') >>> res = pm.gather(TypeDependencies, node) >>> len(res.edges) 2 bar result depend on : NoDeps foo result depend on : NoDeps Check dependency on for target variable with no deps if we don't start >>> node = ast.parse(''' ... def bar(n): ... return builtins.range(n) ... def foo(n): ... i = 4 ... for i in bar(n): ... pass ... return i''') >>> res = pm.gather(TypeDependencies, node) >>> len(res.edges) 3 bar result depend on : NoDeps foo result depend on : NoDeps and bar Check dependency on for target variable with deps >>> node = ast.parse(''' ... def bar(n): ... return builtins.range(n) ... def foo(n): ... for i in bar(n): ... pass ... return i''') >>> res = pm.gather(TypeDependencies, node) >>> len(res.edges) 2 bar result depend on : NoDeps foo result depend on : NoDeps and bar Check conditional without else branch. >>> node = ast.parse(''' ... def foo(n): ... res = 3 ... if n: ... res = foo(n - 1) ... return res''') >>> res = pm.gather(TypeDependencies, node) >>> len(res.edges) 2 foo result depend on : NoDeps and foo FIXME : We should use CFG to perform better function dependencies. Check conditional without break >> node = ast.parse(''' .. def bar2(n): .. return builtins.range(n) .. def bar(n): .. return builtins.range(n) .. def foo(n): .. for i in bar(n): .. if i: .. j = bar(n) .. break .. j = bar2(n) .. return j''') >> res = pm.gather(TypeDependencies, node) >> len(res.edges) 4 bar result depend on : NoDeps bar2 result depend on : NoDeps foo result depend on : bar ad bar2 """ NoDeps = "None" def __init__(self): """ Create empty result graph and gather global declarations. """ self.result = DiGraph() self.current_function = None self.naming = dict() # variable to dependencies for current function. # variable to dependencies for current conditional statement self.in_cond = dict() ModuleAnalysis.__init__(self, GlobalDeclarations) def prepare(self, node): """ Add nodes for each global declarations in the result graph. No edges are added as there are no type builtin type dependencies. """ super(TypeDependencies, self).prepare(node) for v in self.global_declarations.values(): self.result.add_node(v) self.result.add_node(TypeDependencies.NoDeps) def visit_any_conditionnal(self, node1, node2): """ Set and restore the in_cond variable before visiting subnode. Compute correct dependencies on a value as both branch are possible path. """ true_naming = false_naming = None try: tmp = self.naming.copy() for expr in node1: self.visit(expr) true_naming = self.naming self.naming = tmp except KeyError: pass try: tmp = self.naming.copy() for expr in node2: self.visit(expr) false_naming = self.naming self.naming = tmp except KeyError: pass if true_naming and not false_naming: self.naming = true_naming elif false_naming and not true_naming: self.naming = false_naming elif true_naming and false_naming: self.naming = false_naming for k, v in true_naming.items(): if k not in self.naming: self.naming[k] = v else: for dep in v: if dep not in self.naming[k]: self.naming[k].append(dep) def visit_FunctionDef(self, node): """ Initialize variable for the current function to add edges from calls. We compute variable to call dependencies and add edges when returns are reach. """ # Ensure there are no nested functions. assert self.current_function is None self.current_function = node self.naming = dict() self.in_cond = False # True when we are in a if, while or for self.generic_visit(node) self.current_function = None def visit_Return(self, node): """ Add edge from all possible callee to current function. Gather all the function call that led to the creation of the returned expression and add an edge to each of this function. When visiting an expression, one returns a list of frozensets. Each element of the list is linked to a possible path, each element of a frozenset is linked to a dependency. """ if not node.value: # Yielding function can't return values return for dep_set in self.visit(node.value): if dep_set: for dep in dep_set: self.result.add_edge(dep, self.current_function) else: self.result.add_edge(TypeDependencies.NoDeps, self.current_function) visit_Yield = visit_Return def visit_Assign(self, node): """ In case of assignment assign value depend on r-value type dependencies. It is valid for subscript, `a[i] = foo()` means `a` type depend on `foo` return type. """ value_deps = self.visit(node.value) for target in node.targets: name = get_variable(target) if isinstance(name, ast.Name): self.naming[name.id] = value_deps def visit_AugAssign(self, node): """ AugAssigned value depend on r-value type dependencies. It is valid for subscript, `a[i] += foo()` means `a` type depend on `foo` return type and previous a types too. """ args = (self.naming[get_variable(node.target).id], self.visit(node.value)) merge_dep = list({frozenset.union(*x) for x in itertools.product(*args)}) self.naming[get_variable(node.target).id] = merge_dep def visit_For(self, node): """ Handle iterator variable in for loops. Iterate variable may be the correct one at the end of the loop. """ body = node.body if node.target.id in self.naming: body = [ast.Assign(targets=[node.target], value=node.iter, type_comment=None)] + body self.visit_any_conditionnal(body, node.orelse) else: iter_dep = self.visit(node.iter) self.naming[node.target.id] = iter_dep self.visit_any_conditionnal(body, body + node.orelse) def visit_BoolOp(self, node): """ Return type may come from any boolop operand. """ return sum((self.visit(value) for value in node.values), []) def visit_BinOp(self, node): """ Return type depend from both operand of the binary operation. """ args = [self.visit(arg) for arg in (node.left, node.right)] return list({frozenset.union(*x) for x in itertools.product(*args)}) def visit_UnaryOp(self, node): """ Return type depend on operand only. """ return self.visit(node.operand) @staticmethod def visit_Lambda(_): """ Lambda have to be remove before. """ assert False def visit_IfExp(self, node): """ Return value depend on both if branch. """ return self.visit(node.body) + self.visit(node.orelse) @staticmethod def visit_Compare(_): """ Comparison return a bool so there are no dependencies. """ return [frozenset()] def visit_Call(self, node): """ Function call depend on all function use in the call. >> a = foo(bar(c) or foobar(d)) Return type depend on [foo, bar] or [foo, foobar] """ args = [self.visit(arg) for arg in node.args] func = self.visit(node.func) params = args + [func or []] return list({frozenset.union(*p) for p in itertools.product(*params)}) @staticmethod def visit_Constant(_): """ Return no dependencies on others functions. """ return [frozenset()] @staticmethod def visit_Attribute(_): """ Return no dependencies on others functions. """ return [frozenset()] def visit_Subscript(self, node): """ Return dependencies of the subscripted value. a = foo()[0] means `a` have a dependency on `foo` return type. """ return self.visit(node.value) def visit_Name(self, node): """ Return dependencies for given variable. It have to be register first. """ if node.id in self.naming: return self.naming[node.id] elif node.id in self.global_declarations: return [frozenset([self.global_declarations[node.id]])] elif isinstance(node.ctx, ast.Param): deps = [frozenset()] self.naming[node.id] = deps return deps else: raise PythranInternalError("Variable '{}' used before assignment" "".format(node.id)) def visit_List(self, node): """ List construction depend on each elements type dependency. """ if node.elts: return list(set(sum([self.visit(elt) for elt in node.elts], []))) else: return [frozenset()] visit_Set = visit_List def visit_Dict(self, node): """ Dict construction depend on each element/value type dependency.""" if node.keys: items = node.keys + node.values return list(set(sum([self.visit(item) for item in items], []))) else: return [frozenset()] visit_Tuple = visit_List @staticmethod def visit_Slice(_): """ Slice are not part of return type dependency information. """ assert False @staticmethod def visit_Index(_): """ Index are not part of return type dependency information. """ assert False def visit_If(self, node): """ Both if branches may be evaluate first. """ return self.visit_any_conditionnal(node.body, node.orelse) def visit_While(self, node): """ Both while branches may be evaluate first. """ return self.visit_any_conditionnal(node.body, node.orelse) def visit_ExceptHandler(self, node): """ Exception may declare a new variable. """ if node.name: self.naming[node.name.id] = [frozenset()] for stmt in node.body: self.visit(stmt) pythran-0.10.0+ds2/pythran/types/types.py000066400000000000000000000616371416264035500203330ustar00rootroot00000000000000''' This module performs the return type inference, according to symbolic types, It then reorders function declarations according to the return type deps. * type_all generates a node -> type binding ''' from pythran.analyses import LazynessAnalysis, StrictAliases, YieldPoints from pythran.analyses import LocalNodeDeclarations, Immediates, RangeValues from pythran.config import cfg from pythran.cxxtypes import TypeBuilder, ordered_set from pythran.intrinsic import UserFunction, Class from pythran.passmanager import ModuleAnalysis from pythran.tables import operator_to_lambda, MODULES from pythran.types.conversion import pytype_to_ctype from pythran.types.reorder import Reorder from pythran.utils import attr_to_path, cxxid, isnum, isextslice, ispowi from collections import defaultdict from functools import partial import gast as ast import operator from functools import reduce from copy import deepcopy import types class UnboundableRValue(Exception): pass class Types(ModuleAnalysis): """ Infer symbolic type for all AST node. """ def __init__(self): class TypeResult(dict): def __init__(self): self.builder = TypeBuilder() def copy(self): other = TypeResult() other.update(self.items()) other.builder = self.builder return other self.result = TypeResult() self.builder = self.result.builder self.result["bool"] = self.builder.NamedType("bool") self.combiners = defaultdict(UserFunction) self.current_global_declarations = dict() self.max_recompute = 1 # max number of use to be lazy ModuleAnalysis.__init__(self, Reorder, StrictAliases, LazynessAnalysis, Immediates, RangeValues) self.curr_locals_declaration = None def combined(self, *types): if len(types) == 1: return next(iter(types)) return self.builder.CombinedTypes(*types) def prepare(self, node): """ Initialise values to prepare typing computation. Reorder functions to avoid dependencies issues and prepare typing computation setting typing values for Pythonic functions. """ def register(name, module): """ Recursively save function typing and combiners for Pythonic.""" for fname, function in module.items(): if isinstance(function, dict): register(name + "::" + fname, function) else: tname = 'pythonic::{0}::functor::{1}'.format(name, fname) self.result[function] = self.builder.NamedType(tname) self.combiners[function] = function if isinstance(function, Class): register(name + "::" + fname, function.fields) for mname, module in MODULES.items(): register(mname, module) super(Types, self).prepare(node) def run(self, node): super(Types, self).run(node) for head in self.current_global_declarations.values(): if head not in self.result: self.result[head] = "pythonic::types::none_type" return self.result def register(self, ptype): """register ptype as a local typedef""" # Too many of them leads to memory burst if len(self.typedefs) < cfg.getint('typing', 'max_combiner'): self.typedefs.append(ptype) return True return False def node_to_id(self, n, depth=()): if isinstance(n, ast.Name): return (n.id, depth) elif isinstance(n, ast.Subscript): if isinstance(n.slice, ast.Slice): return self.node_to_id(n.value, depth) else: index = n.slice.value if isnum(n.slice) else None return self.node_to_id(n.value, depth + (index,)) # use alias information if any elif isinstance(n, ast.Call): for alias in self.strict_aliases[n]: if alias is n: # no specific alias info continue try: return self.node_to_id(alias, depth) except UnboundableRValue: continue raise UnboundableRValue() def isargument(self, node): """ checks whether node aliases to a parameter.""" try: node_id, _ = self.node_to_id(node) return (node_id in self.name_to_nodes and any(isinstance(n, ast.Name) and isinstance(n.ctx, ast.Param) for n in self.name_to_nodes[node_id])) except UnboundableRValue: return False def combine(self, node, othernode, op=None, unary_op=None, register=False, aliasing_type=False): """ Change `node` typing with combination of `node` and `othernode`. Parameters ---------- aliasing_type : bool All node aliasing to `node` have to be updated too. """ if self.result[othernode] is self.builder.UnknownType: if node not in self.result: self.result[node] = self.builder.UnknownType return if aliasing_type: self.combine_(node, othernode, op or operator.add, unary_op or (lambda x: x), register) for a in self.strict_aliases[node]: self.combine_(a, othernode, op or operator.add, unary_op or (lambda x: x), register) else: self.combine_(node, othernode, op or operator.add, unary_op or (lambda x: x), register) def combine_(self, node, othernode, op, unary_op, register): try: # This comes from an assignment,so we must check where the value is # assigned if register: try: node_id, depth = self.node_to_id(node) if depth: node = ast.Name(node_id, ast.Load(), None, None) former_unary_op = unary_op # update the type to reflect container nesting def merge_container_type(ty, index): # integral index make it possible to correctly # update tuple type if isinstance(index, int): kty = self.builder.NamedType( 'std::integral_constant' .format(index)) return self.builder.IndexableContainerType(kty, ty) else: return self.builder.ContainerType(ty) def unary_op(x): return reduce(merge_container_type, depth, former_unary_op(x)) # patch the op, as we no longer apply op, # but infer content op = self.combined self.name_to_nodes[node_id].append(node) except UnboundableRValue: pass # only perform inter procedural combination upon stage 0 if register and self.isargument(node) and self.stage == 0: node_id, _ = self.node_to_id(node) if node not in self.result: self.result[node] = unary_op(self.result[othernode]) assert self.result[node], "found an alias with a type" parametric_type = self.builder.PType(self.current, self.result[othernode]) if self.register(parametric_type): current_function = self.combiners[self.current] def translator_generator(args, op, unary_op): ''' capture args for translator generation''' def interprocedural_type_translator(s, n): translated_othernode = ast.Name( '__fake__', ast.Load(), None, None) s.result[translated_othernode] = ( parametric_type.instanciate( s.current, [s.result[arg] for arg in n.args])) # look for modified argument for p, effective_arg in enumerate(n.args): formal_arg = args[p] if formal_arg.id == node_id: translated_node = effective_arg break try: s.combine(translated_node, translated_othernode, op, unary_op, register=True, aliasing_type=True) except NotImplementedError: pass # this may fail when the effective # parameter is an expression except UnboundLocalError: pass # this may fail when translated_node # is a default parameter return interprocedural_type_translator translator = translator_generator( self.current.args.args, op, unary_op) # deferred combination current_function.add_combiner(translator) else: new_type = unary_op(self.result[othernode]) UnknownType = self.builder.UnknownType if node not in self.result or self.result[node] is UnknownType: self.result[node] = new_type else: if isinstance(self.result[node], tuple): raise UnboundableRValue self.result[node] = op(self.result[node], new_type) except UnboundableRValue: pass def visit_FunctionDef(self, node): self.delayed_types = set() self.curr_locals_declaration = self.gather( LocalNodeDeclarations, node) self.current = node self.typedefs = list() self.name_to_nodes = defaultdict(ordered_set) for arg in node.args.args: self.name_to_nodes[arg.id].append(arg) self.yield_points = self.gather(YieldPoints, node) # two stages, one for inter procedural propagation self.stage = 0 self.generic_visit(node) visited_names = {} for delayed_node in self.delayed_types: delayed_type = self.result[delayed_node] all_types = ordered_set(self.result[ty] for ty in self.name_to_nodes[delayed_node.id]) final_type = self.combined(*all_types) delayed_type.final_type = final_type visited_names[delayed_node.id] = final_type # and one for backward propagation # but this step is generally costly if cfg.getboolean('typing', 'enable_two_steps_typing'): self.stage = 1 self.generic_visit(node) # propagate type information through all aliases for name, nodes in self.name_to_nodes.items(): all_types = ordered_set(self.result[ty] for ty in nodes) final_type = self.combined(*all_types) for n in nodes: if isinstance(self.result[n], self.builder.LType): self.result[n].final_type = final_type else: self.result[n] = final_type self.current_global_declarations[node.name] = node # return type may be unset if the function always raises return_type = self.result.get( node, self.builder.NamedType("pythonic::types::none_type")) self.result[node] = self.builder.Returnable(return_type), self.typedefs for k in self.gather(LocalNodeDeclarations, node): self.result[k] = self.get_qualifier(k)(self.result[k]) def get_qualifier(self, node): lazy_res = self.lazyness_analysis[node.id] return (self.builder.Lazy if lazy_res <= self.max_recompute else self.builder.Assignable) def visit_Return(self, node): """ Compute return type and merges with others possible return type.""" self.generic_visit(node) # No merge are done if the function is a generator. if not self.yield_points: assert node.value, "Values were added in each return statement." self.combine(self.current, node.value) def visit_Yield(self, node): """ Compute yield type and merges it with others yield type. """ self.generic_visit(node) self.combine(self.current, node.value) def visit_Assign(self, node): self.visit(node.value) for t in node.targets: # We don't support subscript aliasing self.combine(t, node.value, register=True, aliasing_type=isinstance(t, ast.Name)) if t in self.curr_locals_declaration: self.result[t] = self.get_qualifier(t)(self.result[t]) if isinstance(t, ast.Subscript): if self.visit_AssignedSubscript(t): for alias in self.strict_aliases[t.value]: fake = ast.Subscript(alias, t.value, ast.Store()) self.combine(fake, node.value, register=True) def visit_AugAssign(self, node): self.visit(node.value) if isinstance(node.target, ast.Subscript): if self.visit_AssignedSubscript(node.target): for alias in self.strict_aliases[node.target.value]: fake = ast.Subscript(alias, node.target.value, ast.Store()) # We don't check more aliasing as it is a fake node. self.combine(fake, node.value, lambda x, y: x + self.builder.ExpressionType( operator_to_lambda[type(node.op)], (x, y)), register=True) # We don't support aliasing on subscript self.combine(node.target, node.value, lambda x, y: x + self.builder.ExpressionType( operator_to_lambda[type(node.op)], (x, y)), register=True, aliasing_type=isinstance(node.target, ast.Name)) def visit_For(self, node): self.visit(node.iter) self.combine(node.target, node.iter, unary_op=self.builder.IteratorContentType, aliasing_type=True, register=True) for n in node.body + node.orelse: self.visit(n) def visit_BoolOp(self, node): """ Merge BoolOp operand type. BoolOp are "and" and "or" and may return any of these results so all operands should have the combinable type. """ # Visit subnodes self.generic_visit(node) # Merge all operands types. [self.combine(node, value) for value in node.values] def visit_BinOp(self, node): if ispowi(node): self.visit(node.op) self.visit(node.left) cty = "std::integral_constant" % (node.right.value) self.result[node.right] = self.builder.NamedType(cty) else: self.generic_visit(node) def F(x, y): return self.builder.ExpressionType( operator_to_lambda[type(node.op)], (x, y)) self.combine(node, node.left, F) self.combine(node, node.right, F) def visit_UnaryOp(self, node): self.generic_visit(node) def f(x): return self.builder.ExpressionType( operator_to_lambda[type(node.op)], (x,)) self.combine(node, node.operand, unary_op=f) def visit_IfExp(self, node): self.generic_visit(node) for n in (node.body, node.orelse): self.combine(node, n) def visit_Compare(self, node): self.generic_visit(node) all_compare = list(zip(node.ops, node.comparators)) def unary_op(x, op=None): return self.builder.ExpressionType( operator_to_lambda[type(op)], (self.result[node.left], x)) for op, comp in all_compare: self.combine(node, comp, unary_op=partial(unary_op, op=op)) def visit_Call(self, node): self.generic_visit(node) func = node.func for alias in self.strict_aliases[func]: # this comes from a bind if isinstance(alias, ast.Call): a0 = alias.args[0] # by construction of the bind construct assert len(self.strict_aliases[a0]) == 1 bounded_function = next(iter(self.strict_aliases[a0])) fake_name = deepcopy(a0) fake_node = ast.Call(fake_name, alias.args[1:] + node.args, []) self.combiners[bounded_function].combiner(self, fake_node) # handle backward type dependencies from function calls else: self.combiners[alias].combiner(self, node) UnknownType = self.builder.UnknownType # recurring nightmare def last_chance(): # maybe we can get saved if we have a hint about # the called function return type for alias in self.strict_aliases[func]: if alias is self.current and alias in self.result: # great we have a (partial) type information self.result[node] = self.result[alias] return self.result[node] = UnknownType if self.result[node.func] is UnknownType: return last_chance() if any(self.result[arg] is UnknownType for arg in node.args): return last_chance() # special handler for getattr: use the attr name as an enum member if (isinstance(func, ast.Attribute) and func.attr == 'getattr'): def F(_): return self.builder.GetAttr(self.result[node.args[0]], node.args[1].value) # default behavior else: def F(f): return self.builder.ReturnType( f, [self.result[arg] for arg in node.args]) # op is used to drop previous value there self.combine(node, func, op=lambda x, y: y, unary_op=F) def visit_Constant(self, node): """ Set the pythonic constant type. """ ty = type(node.value) if ty is str and len(node.value) == 1: sty = 'pythonic::types::chr' else: sty = pytype_to_ctype(ty) if node in self.immediates: sty = "std::integral_constant<%s, %s>" % (sty, str(node.value).lower()) self.result[node] = self.builder.NamedType(sty) def visit_Attribute(self, node): """ Compute typing for an attribute node. """ obj, path = attr_to_path(node) # If no type is given, use a decltype if obj.isliteral(): typename = pytype_to_ctype(obj.signature) self.result[node] = self.builder.NamedType(typename) else: path = '::'.join(map(cxxid, path)) + '{}' self.result[node] = self.builder.DeclType(path) def visit_Slice(self, node): """ Set slicing type using continuous information if provided. Also visit subnodes as they may contains relevant typing information. """ self.generic_visit(node) if node.step is None or (isnum(node.step) and node.step.value == 1): if all(self.range_values[p].low >= 0 for p in (node.lower, node.upper)): ntype = "pythonic::types::fast_contiguous_slice" else: ntype = "pythonic::types::contiguous_slice" self.result[node] = self.builder.NamedType(ntype) else: self.result[node] = self.builder.NamedType( 'pythonic::types::slice') def visit_Subscript(self, node): self.visit(node.value) # type of a[1:2, 3, 4:1] is the type of: declval(a)(slice, long, slice) if isextslice(node.slice): self.visit(node.slice) def f(t): def et(a, *b): return "{0}({1})".format(a, ", ".join(b)) dim_types = tuple(self.result[d] for d in node.slice.elts) return self.builder.ExpressionType(et, (t,) + dim_types) elif isnum(node.slice) and node.slice.value >= 0: # type of a[2] is the type of an elements of a # this special case is to make type inference easier # for the back end compiler def f(t): return self.builder.ElementType(node.slice.value, t) else: # type of a[i] is the return type of the matching function self.visit(node.slice) def f(x): return self.builder.ExpressionType( "{0}[{1}]".format, (x, self.result[node.slice])) f and self.combine(node, node.value, unary_op=f) def visit_AssignedSubscript(self, node): if isinstance(node.slice, ast.Slice): return False elif isextslice(node.slice): return False else: self.visit(node.slice) self.combine(node.value, node.slice, unary_op=self.builder.IndexableType, aliasing_type=True, register=True) return True def delayed(self, node): fallback_type = self.combined(*[self.result[n] for n in self.name_to_nodes[node.id]]) self.delayed_types.add(node) return self.builder.LType(fallback_type, node) def visit_Name(self, node): if node.id in self.name_to_nodes: self.result[node] = self.delayed(node) elif node.id in self.current_global_declarations: newtype = self.builder.NamedType( self.current_global_declarations[node.id].name) if node not in self.result: self.result[node] = newtype else: self.result[node] = self.builder.UnknownType def visit_List(self, node): """ Define list type from all elements type (or empty_list type). """ self.generic_visit(node) if node.elts: for elt in node.elts: self.combine(node, elt, unary_op=self.builder.ListType) else: self.result[node] = self.builder.NamedType( "pythonic::types::empty_list") def visit_Set(self, node): """ Define set type from all elements type (or empty_set type). """ self.generic_visit(node) if node.elts: for elt in node.elts: self.combine(node, elt, unary_op=self.builder.SetType) else: self.result[node] = self.builder.NamedType( "pythonic::types::empty_set") def visit_Dict(self, node): """ Define set type from all elements type (or empty_dict type). """ self.generic_visit(node) if node.keys: for key, value in zip(node.keys, node.values): value_type = self.result[value] self.combine(node, key, unary_op=partial(self.builder.DictType, of_val=value_type)) else: self.result[node] = self.builder.NamedType( "pythonic::types::empty_dict") def visit_ExceptHandler(self, node): if node.type and node.name: if not isinstance(node.type, ast.Tuple): tname = self.builder.NamedType( 'pythonic::types::{0}'.format(node.type.attr)) self.result[node.type] = tname self.combine(node.name, node.type, aliasing_type=True, register=True) for n in node.body: self.visit(n) def visit_Tuple(self, node): self.generic_visit(node) types = [self.result[elt] for elt in node.elts] self.result[node] = self.builder.TupleType(types) def visit_arguments(self, node): for i, arg in enumerate(node.args): self.result[arg] = self.builder.ArgumentType(i) for n in node.defaults: self.visit(n) pythran-0.10.0+ds2/pythran/typing.py000066400000000000000000000037271416264035500173310ustar00rootroot00000000000000class FunMeta(type): def __getitem__(cls, item): return Fun(tuple(item[0]) + (item[1],)) class DictMeta(type): def __getitem__(cls, item): return Dict(item) class UnionMeta(type): def __getitem__(cls, item): return Union(item) class SetMeta(type): def __getitem__(cls, item): return Set(item) class ListMeta(type): def __getitem__(cls, item): return List(item) class IterableMeta(type): def __getitem__(cls, item): return Iterable(item) class GeneratorMeta(type): def __getitem__(cls, item): return Generator(item) class TupleMeta(type): def __getitem__(cls, item): return Tuple(item) class OptionalMeta(type): def __getitem__(cls, item): return Optional(item) class NDArrayMeta(type): def __getitem__(cls, item): return NDArray(item) class PointerMeta(type): def __getitem__(cls, item): return Pointer(item) class Type(type): def __new__(cls, args): return type.__new__( cls, cls.__name__, (object,), {'__args__': args if isinstance(args, tuple) else (args,)} ) def __init__(self, *args, **kwargs): pass class Fun(Type, metaclass=FunMeta): pass class Dict(Type, metaclass=DictMeta): pass class Union(Type, metaclass=UnionMeta): pass class Set(Type, metaclass=SetMeta): pass class List(Type, metaclass=ListMeta): pass class Iterable(Type, metaclass=IterableMeta): pass class Generator(Type, metaclass=GeneratorMeta): pass class Tuple(Type, metaclass=TupleMeta): pass class Optional(Type, metaclass=OptionalMeta): pass class NDArray(Type, metaclass=NDArrayMeta): pass class Pointer(Type, metaclass=PointerMeta): pass class TypeVar(object): def __init__(self, name): self.__name__ = name class Sized(object): pass class Any(object): pass class File(object): pass pythran-0.10.0+ds2/pythran/unparse.py000066400000000000000000000443671416264035500175010ustar00rootroot00000000000000""" This code is extracted from the python source tree, and thus under the PSF License. Usage: unparse.py """ import pythran.metadata as metadata import pythran.openmp as openmp from pythran.utils import isnum import gast as ast import os import sys import io # Large float and imaginary literals get turned into infinities in the AST. # We unparse those infinities to INFSTR. INFSTR = "(1e1000 ** 2)" def interleave(inter, f, seq): """Call f on each item in seq, calling inter() in between. """ seq = iter(seq) try: f(next(seq)) except StopIteration: pass else: for x in seq: inter() f(x) class Unparser: """Methods in this class recursively traverse an AST and output source code for the abstract syntax; original formatting is disregarded. """ def __init__(self, tree, file=sys.stdout): """Unparser(tree, file=sys.stdout) -> None. Print the source for tree to file.""" self.f = file self.future_imports = [] self._indent = 0 self.line_marker = "" self.dispatch(tree) self.f.write("") self.f.flush() def fill(self, text=""): "Indent a piece of text, according to the current indentation level" self.f.write(self.line_marker + " " * self._indent + text) self.line_marker = "\n" def write(self, text): "Append a piece of text to the current line." self.f.write(text) def enter(self): "Print ':', and increase the indentation." self.write(":") self._indent += 1 def leave(self): """Decrease the indentation level.""" self._indent -= 1 def dispatch(self, tree): """Dispatcher function, dispatching tree type T to method _T.""" # display omp directive in python dump for omp in metadata.get(tree, openmp.OMPDirective): deps = list() for dep in omp.deps: old_file = self.f self.f = io.StringIO() self.dispatch(dep) deps.append(self.f.getvalue()) self.f = old_file directive = omp.s.format(*deps) self._Expr(ast.Expr(ast.Constant(directive, None))) if isinstance(tree, list): for t in tree: self.dispatch(t) return meth = getattr(self, "_" + tree.__class__.__name__) meth(tree) # ############# Unparsing methods ###################### # There should be one method per concrete grammar type # # Constructors should be grouped by sum type. Ideally, # # this would follow the order in the grammar, but # # currently doesn't. # # ###################################################### def _Module(self, tree): # Goes through each top-level statement. If the special __init__() # function is found, add a call to it because it's a special Pythran # feature. has_init = False for stmt in tree.body: self.dispatch(stmt) if (type(stmt) is ast.FunctionDef and stmt.name == '__init__'): has_init = True # Call __init__() in which top statements are moved. if has_init: self.fill("__init__()") # pythran specific def _ContainerOf(self, co): if co.index == co.index: # always valid except for UnknownIndex self.write('|[{}]='.format(co.index)) self.dispatch(co.containee) self.write('|') else: self.write('|') self.dispatch(co.containee) self.write('|') def _UnboundValueType(self, ui): self.write('') # stmt def _Expr(self, tree): self.fill() self.dispatch(tree.value) def _Import(self, t): self.fill("import ") interleave(lambda: self.write(", "), self.dispatch, t.names) def _ImportFrom(self, t): # A from __future__ import may affect unparsing, so record it. if t.module and t.module == '__future__': self.future_imports.extend(n.name for n in t.names) self.fill("from ") self.write("." * t.level) if t.module: self.write(t.module) self.write(" import ") interleave(lambda: self.write(", "), self.dispatch, t.names) def _Assign(self, t): self.fill() for target in t.targets: self.dispatch(target) self.write(" = ") self.dispatch(t.value) def _AugAssign(self, t): self.fill() self.dispatch(t.target) self.write(" " + self.binop[t.op.__class__.__name__] + "= ") self.dispatch(t.value) def _Return(self, t): self.fill("return") if t.value: self.write(" ") self.dispatch(t.value) def _Pass(self, t): self.fill("pass") def _Break(self, t): self.fill("break") def _Continue(self, t): self.fill("continue") def _Delete(self, t): self.fill("del ") interleave(lambda: self.write(", "), self.dispatch, t.targets) def _Assert(self, t): self.fill("assert ") self.dispatch(t.test) if t.msg: self.write(", ") self.dispatch(t.msg) def _Exec(self, t): self.fill("exec ") self.dispatch(t.body) if t.globals: self.write(" in ") self.dispatch(t.globals) if t.locals: self.write(", ") self.dispatch(t.locals) def _NameConstant(self, t): self.write('{}'.format(t.value)) def _Print(self, t): # Assume from __future__ import print_function self.fill("print") do_comma = False self.write("(") for e in t.values: if do_comma: self.write(", ") else: do_comma = True self.dispatch(e) if t.dest: self.write(", file=") self.dispatch(t.dest) if not t.nl: self.write(", end=''") self.write(")") def _Global(self, t): self.fill("global ") interleave(lambda: self.write(", "), self.write, t.names) def _Yield(self, t): self.write("(") self.write("yield") if t.value: self.write(" ") self.dispatch(t.value) self.write(")") def _Raise(self, t): self.fill('raise ') if t.exc: self.dispatch(t.exc) if t.cause: self.write("from ") self.dispatch(t.cause) def _Try(self, t): self.fill("try") self.enter() self.dispatch(t.body) self.leave() for ex in t.handlers: self.dispatch(ex) if t.orelse: self.fill("else") self.enter() self.dispatch(t.orelse) self.leave() def _ExceptHandler(self, t): self.fill("except") if t.type: self.write(" ") self.dispatch(t.type) if t.name: self.write(" as ") self.dispatch(t.name) self.enter() self.dispatch(t.body) self.leave() def _ClassDef(self, t): for deco in t.decorator_list: self.fill("@") self.dispatch(deco) self.fill("class " + t.name) if t.bases: self.write("(") for a in t.bases: self.dispatch(a) self.write(", ") self.write(")") self.enter() self.dispatch(t.body) self.leave() def _FunctionDef(self, t): for deco in t.decorator_list: self.fill("@") self.dispatch(deco) self.fill("def " + t.name + "(") self.dispatch(t.args) self.write(")") self.enter() self.dispatch(t.body) self.leave() def _For(self, t): self.fill("for ") self.dispatch(t.target) self.write(" in ") self.dispatch(t.iter) self.enter() self.dispatch(t.body) self.leave() if t.orelse: self.fill("else") self.enter() self.dispatch(t.orelse) self.leave() def _If(self, t): self.fill("if ") self.dispatch(t.test) self.enter() self.dispatch(t.body) self.leave() # collapse nested ifs into equivalent elifs. while (t.orelse and len(t.orelse) == 1 and isinstance(t.orelse[0], ast.If)): t = t.orelse[0] self.fill("elif ") self.dispatch(t.test) self.enter() self.dispatch(t.body) self.leave() # final else if t.orelse: self.fill("else") self.enter() self.dispatch(t.orelse) self.leave() def _While(self, t): self.fill("while ") self.dispatch(t.test) self.enter() self.dispatch(t.body) self.leave() if t.orelse: self.fill("else") self.enter() self.dispatch(t.orelse) self.leave() def _With(self, t): self.fill("with ") self.dispatch(t.context_expr) if t.optional_vars: self.write(" as ") self.dispatch(t.optional_vars) self.enter() self.dispatch(t.body) self.leave() # expr def _Str(self, tree): # if from __future__ import unicode_literals is in effect, # then we want to output string literals using a 'b' prefix # and unicode literals with no prefix. if "unicode_literals" not in self.future_imports: self.write(repr(tree.value)) elif isinstance(tree.value, str): self.write("b" + repr(tree.value)) else: assert False, "shouldn't get here" def _Name(self, t): self.write(t.id) def _Repr(self, t): self.write("`") self.dispatch(t.value) self.write("`") def _Num(self, t): repr_n = repr(t.value) # Parenthesize negative numbers, to avoid turning (-1)**2 into -1**2. if repr_n.startswith("-"): self.write("(") # Substitute overflowing decimal literal for AST infinities. self.write(repr_n.replace("inf", INFSTR)) if repr_n.startswith("-"): self.write(")") def _Constant(self, t): if isinstance(t, str): return self._Str(t) elif t is None or isinstance(t, bool): return self._NameConstant(t) else: return self._Num(t) def _List(self, t): self.write("[") interleave(lambda: self.write(", "), self.dispatch, t.elts) self.write("]") def _ListComp(self, t): self.write("[") self.dispatch(t.elt) for gen in t.generators: self.dispatch(gen) self.write("]") def _GeneratorExp(self, t): self.write("(") self.dispatch(t.elt) for gen in t.generators: self.dispatch(gen) self.write(")") def _SetComp(self, t): self.write("{") self.dispatch(t.elt) for gen in t.generators: self.dispatch(gen) self.write("}") def _DictComp(self, t): self.write("{") self.dispatch(t.key) self.write(": ") self.dispatch(t.value) for gen in t.generators: self.dispatch(gen) self.write("}") def _comprehension(self, t): self.write(" for ") self.dispatch(t.target) self.write(" in ") self.dispatch(t.iter) for if_clause in t.ifs: self.write(" if ") self.dispatch(if_clause) def _IfExp(self, t): self.write("(") self.dispatch(t.body) self.write(" if ") self.dispatch(t.test) self.write(" else ") self.dispatch(t.orelse) self.write(")") def _Set(self, t): assert(t.elts) # should be at least one element self.write("{") interleave(lambda: self.write(", "), self.dispatch, t.elts) self.write("}") def _Dict(self, t): self.write("{") def write_pair(pair): k, v = pair self.dispatch(k) self.write(": ") self.dispatch(v) interleave(lambda: self.write(", "), write_pair, zip(t.keys, t.values)) self.write("}") def _Tuple(self, t): self.write("(") if len(t.elts) == 1: (elt,) = t.elts self.dispatch(elt) self.write(",") else: interleave(lambda: self.write(", "), self.dispatch, t.elts) self.write(")") unop = {"Invert": "~", "Not": "not", "UAdd": "+", "USub": "-"} def _UnaryOp(self, t): self.write("(") self.write(self.unop[t.op.__class__.__name__]) self.write(" ") # If we're applying unary minus to a number, parenthesize the number. # This is necessary: -2147483648 is different from -(2147483648) on # a 32-bit machine (the first is an int, the second a long), and # -7j is different from -(7j). (The first has real part 0.0, the # second has real part -0.0.) if isinstance(t.op, ast.USub) and isnum(t.operand): self.write("(") self.dispatch(t.operand) self.write(")") else: self.dispatch(t.operand) self.write(")") binop = {"Add": "+", "Sub": "-", "Mult": "*", "Div": "/", "Mod": "%", "LShift": "<<", "RShift": ">>", "BitOr": "|", "BitXor": "^", "BitAnd": "&", "FloorDiv": "//", "Pow": "**", "MatMult": "@"} def _BinOp(self, t): self.write("(") self.dispatch(t.left) self.write(" " + self.binop[t.op.__class__.__name__] + " ") self.dispatch(t.right) self.write(")") cmpops = {"Eq": "==", "NotEq": "!=", "Lt": "<", "LtE": "<=", "Gt": ">", "GtE": ">=", "Is": "is", "IsNot": "is not", "In": "in", "NotIn": "not in"} def _Compare(self, t): self.write("(") self.dispatch(t.left) for o, e in zip(t.ops, t.comparators): self.write(" " + self.cmpops[o.__class__.__name__] + " ") self.dispatch(e) self.write(")") boolops = {ast.And: 'and', ast.Or: 'or'} def _BoolOp(self, t): self.write("(") s = " %s " % self.boolops[t.op.__class__] interleave(lambda: self.write(s), self.dispatch, t.values) self.write(")") def _Attribute(self, t): self.dispatch(t.value) # Special case: 3.__abs__() is a syntax error, so if t.value # is an integer literal then we need to either parenthesize # it or add an extra space to get 3 .__abs__(). if isnum(t.value) and isinstance(t.value.value, int): self.write(" ") self.write(".") self.write(t.attr) def _Call(self, t): self.dispatch(t.func) self.write("(") comma = False for e in t.args: if comma: self.write(", ") else: comma = True self.dispatch(e) for e in t.keywords: if comma: self.write(", ") else: comma = True self.dispatch(e) self.write(")") def _Starred(self, t): self.write('*') self.dispatch(self.value) def _Subscript(self, t): self.dispatch(t.value) self.write("[") self.dispatch(t.slice) self.write("]") # slice def _Ellipsis(self, t): self.write("...") def _Index(self, t): self.dispatch(t.value) def _Slice(self, t): if t.lower: self.dispatch(t.lower) self.write(":") if t.upper: self.dispatch(t.upper) if t.step: self.write(":") self.dispatch(t.step) def _ExtSlice(self, t): interleave(lambda: self.write(', '), self.dispatch, t.dims) # others def _arguments(self, t): first = True # normal arguments defaults = [None] * (len(t.args) - len(t.defaults)) + t.defaults for a, d in zip(t.args, defaults): if first: first = False else: self.write(", ") self.dispatch(a), if d: self.write("=") self.dispatch(d) # varargs if t.vararg: if first: first = False else: self.write(", ") self.write("*") self.write(t.vararg) # kwargs if t.kwarg: if first: first = False else: self.write(", ") self.write("**" + t.kwarg) def _keyword(self, t): if t.arg: self.write(t.arg) else: self.write('**') self.write("=") self.dispatch(t.value) def _Lambda(self, t): self.write("(") self.write("lambda ") self.dispatch(t.args) self.write(": ") self.dispatch(t.body) self.write(")") def _alias(self, t): self.write(t.name) if t.asname: self.write(" as " + t.asname) def roundtrip(filename, output=sys.stdout): with open(filename, "r") as pyfile: source = pyfile.read() tree = compile(source, filename, "exec", ast.PyCF_ONLY_AST) Unparser(tree, output) def testdir(a): try: names = [n for n in os.listdir(a) if n.endswith('.py')] except OSError: sys.stderr.write("Directory not readable: %s" % a) else: for n in names: fullname = os.path.join(a, n) if os.path.isfile(fullname): output = io.StringIO() print('Testing %s' % fullname) try: roundtrip(fullname, output) except Exception as e: print(' Failed to compile, exception is %s' % repr(e)) elif os.path.isdir(fullname): testdir(fullname) def main(args): if args[0] == '--testdir': for a in args[1:]: testdir(a) else: for a in args: roundtrip(a) if __name__ == '__main__': main(sys.argv[1:]) pythran-0.10.0+ds2/pythran/utils.py000066400000000000000000000102721416264035500171500ustar00rootroot00000000000000""" Common function use for AST manipulation. """ import gast as ast from pythran.tables import MODULES from pythran.conversion import mangle, demangle from functools import reduce from contextlib import contextmanager def isstr(node): return isinstance(node, ast.Constant) and isinstance(node.value, str) def isintegral(node): return isinstance(node, ast.Constant) and isinstance(node.value, (int, bool)) def isnum(node): return isinstance(node, ast.Constant) and isinstance(node.value, (int, float, bool)) def isextslice(node): if not isinstance(node, ast.Tuple): return False return any(isinstance(elt, ast.Slice) for elt in node.elts) def ispowi(node): if not isinstance(node.op, ast.Pow): return False if not isintegral(node.right): return False return node.right.value >= 0 def attr_to_path(node): """ Compute path and final object for an attribute node """ def get_intrinsic_path(modules, attr): """ Get function path and intrinsic from an ast.Attribute. """ if isinstance(attr, ast.Name): return modules[demangle(attr.id)], (demangle(attr.id),) elif isinstance(attr, ast.Attribute): module, path = get_intrinsic_path(modules, attr.value) return module[attr.attr], path + (attr.attr,) obj, path = get_intrinsic_path(MODULES, node) if not obj.isliteral(): path = path[:-1] + ('functor', path[-1]) return obj, ('pythonic', ) + path def path_to_attr(path): """ Transform path to ast.Attribute. >>> import gast as ast >>> path = ('builtins', 'my', 'constant') >>> value = path_to_attr(path) >>> ref = ast.Attribute( ... value=ast.Attribute(value=ast.Name(id="builtins", ... ctx=ast.Load(), ... annotation=None, ... type_comment=None), ... attr="my", ctx=ast.Load()), ... attr="constant", ctx=ast.Load()) >>> ast.dump(ref) == ast.dump(value) True """ return reduce(lambda hpath, last: ast.Attribute(hpath, last, ast.Load()), path[1:], ast.Name(mangle(path[0]), ast.Load(), None, None)) def path_to_node(path): """ Retrieve a symbol in MODULES based on its path >>> path = ('math', 'pi') >>> path_to_node(path) #doctest: +ELLIPSIS """ if len(path) == 1: return MODULES[path[0]] else: return path_to_node(path[:-1])[path[-1]] def isattr(node): return (isinstance(node, ast.Call) and getattr(node.func, 'attr', None) == 'getattr') def get_variable(assignable): """ Return modified variable name. >>> import gast as ast >>> ref = ast.Subscript( ... value=ast.Subscript( ... value=ast.Name('a', ast.Load(), None, None), ... slice=ast.Name('i', ast.Load(), None, None), ... ctx=ast.Load()), ... slice=ast.Name('j', ast.Load(), None, None), ... ctx=ast.Load()) >>> ast.dump(get_variable(ref)) "Name(id='a', ctx=Load(), annotation=None, type_comment=None)" """ msg = "Only name and subscript can be assigned." assert isinstance(assignable, (ast.Name, ast.Subscript)), msg while isinstance(assignable, ast.Subscript) or isattr(assignable): if isattr(assignable): assignable = assignable.args[0] else: assignable = assignable.value return assignable def pythran_builtin(name): return MODULES['builtins']['pythran'][name] def pythran_builtin_path(name): assert name in MODULES['builtins']['pythran'] return ('builtins', 'pythran', name) def pythran_builtin_attr(name): return path_to_attr(pythran_builtin_path(name)) def cxxid(name): from pythran.tables import cxx_keywords return name + '_' * (name in cxx_keywords) @contextmanager def pushpop(l, v): l.append(v) yield l.pop() pythran-0.10.0+ds2/pythran/version.py000066400000000000000000000002111416264035500174650ustar00rootroot00000000000000__version__ = '0.10.0' __url__ = 'https://github.com/serge-sans-paille/pythran' __descr__ = 'Ahead of Time compiler for numeric kernels' pythran-0.10.0+ds2/requirements.txt000066400000000000000000000000521416264035500172300ustar00rootroot00000000000000ply>=3.4 gast~=0.5.0 numpy beniget~=0.4.0 pythran-0.10.0+ds2/setup.py000066400000000000000000000141161416264035500154640ustar00rootroot00000000000000from __future__ import print_function # Preliminary checks that cannot be done by setuptools # like... the setuptools dependency itself! try: import setuptools except ImportError: print() print("*****************************************************") print("* Setuptools must be installed before running setup *") print("*****************************************************") print() raise from setuptools.command.build_py import build_py from setuptools.command.develop import develop from setuptools import setup import logging import os import shutil import sys # It appears old versions of setuptools are not supported, see # https://github.com/serge-sans-paille/pythran/issues/489 from distutils.version import LooseVersion MinimalSetuptoolsVersion = LooseVersion("12.0.5") if LooseVersion(setuptools.__version__) < MinimalSetuptoolsVersion: msg = "Setuptools version is {}, but must be at least {}".format( setuptools.__version__, MinimalSetuptoolsVersion) print() print("*" * (len(msg) + 4)) print("*", msg, "*") print("*" * (len(msg) + 4)) print() raise ImportError("setuptools") if sys.version_info.major < 3: print() print("****************************************************") print("* Python 2 has reached end-of-support *") print("****************************************************") print("* *") print("* Last Pythran version supporting Python2 is 0.9.5 *") print("* *") print("****************************************************") print() logger = logging.getLogger("pythran") logger.addHandler(logging.StreamHandler()) versionfile = os.path.join('pythran', 'version.py') exec(open(versionfile).read()) class BuildWithThirdParty(build_py): """ Set up Pythran dependencies. * install boost dependencies * install xsimd dependencies """ third_parties = 'boost', 'xsimd' user_options = build_py.user_options + [ ('no-{}'.format(pkg), None, 'Do not distribute {} headers'.format(pkg)) for pkg in third_parties ] def initialize_options(self): build_py.initialize_options(self) for pkg in BuildWithThirdParty.third_parties: setattr(self, 'no_' + pkg, None) def copy_pkg(self, pkg, src_only=False): "Install boost deps from the third_party directory" if getattr(self, 'no_' + pkg) is None: print('Copying', pkg, 'dependencies') to_copy = pkg, else: return src = os.path.join('third_party', *to_copy) # copy to the build tree if not src_only: target = os.path.join(self.build_lib, 'pythran', *to_copy) shutil.rmtree(target, True) shutil.copytree(src, target) # copy them to the source tree too, needed for sdist target = os.path.join('pythran', *to_copy) shutil.rmtree(target, True) shutil.copytree(src, target) def run(self, *args, **kwargs): # regular build done by parent class build_py.run(self, *args, **kwargs) if not self.dry_run: # compatibility with the parent options for pkg in BuildWithThirdParty.third_parties: self.copy_pkg(pkg) class DevelopWithThirdParty(develop, BuildWithThirdParty): def initialize_options(self): develop.initialize_options(self) BuildWithThirdParty.initialize_options(self) def run(self, *args, **kwargs): if not self.dry_run: # compatibility with the parent options for pkg in BuildWithThirdParty.third_parties: self.copy_pkg(pkg, src_only=True) develop.run(self, *args, **kwargs) # Cannot use glob here, as the files may not be generated yet boost_headers = ['boost/' + '*/' * i + '*.hpp' for i in range(1, 20)] xsimd_headers = ['xsimd/' + '*/' * i + '*.hpp' for i in range(1, 20)] pythonic_headers = ['*/' * i + '*.hpp' for i in range(9)] + ['patch/*'] # read longdescr from README def longdescr(readme_path): with open(readme_path) as readme: lines = list(readme) start_index = lines.index('What is it?\n') stop_index = lines.index('Installation\n') long_description = "".join(lines[start_index + 2: stop_index]) return long_description setup(name='pythran', version=__version__, description=__descr__, long_description=longdescr("README.rst"), author='Serge Guelton', author_email='serge.guelton@telecom-bretagne.eu', url=__url__, packages=['pythran', 'pythran.analyses', 'pythran.transformations', 'pythran.optimizations', 'omp', 'pythran/pythonic', 'pythran.types'], package_data={'pythran': (['pythran*.cfg'] + boost_headers + xsimd_headers), 'pythran/pythonic': pythonic_headers}, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', 'Operating System :: POSIX :: Linux', 'Operating System :: MacOS', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: C++', 'Topic :: Software Development :: Compilers', 'Topic :: Software Development :: Code Generators' ], license="BSD 3-Clause", install_requires=open('requirements.txt').read().splitlines(), entry_points={'console_scripts': ['pythran = pythran.run:run', 'pythran-config = pythran.config:run']}, extras_require={ 'doc': open('docs/requirements.txt').read().splitlines(), }, test_suite="pythran.tests.test_cases", cmdclass={'build_py': BuildWithThirdParty, 'develop': DevelopWithThirdParty}) pythran-0.10.0+ds2/website/000077500000000000000000000000001416264035500154115ustar00rootroot00000000000000pythran-0.10.0+ds2/website/Makefile000066400000000000000000000022121416264035500170460ustar00rootroot00000000000000TARGET=pythrandoc SED=sed ZIP=zip CP=cp LN_S=ln -s PAGES=MANUAL CLI SUPPORT DEVGUIDE TUTORIAL INTERNAL LICENSE AUTHORS TODO Changelog all:index.rst $(patsubst %, %.rst, $(PAGES)) pythran.png PYTHONPATH=..:$$PYTHONPATH sphinx-build . $(TARGET) dist:all rm -f $(TARGET).zip cd $(TARGET) && $(ZIP) -r ../$(TARGET).zip * index.rst:../README.rst Makefile $(SED) -r $< $(patsubst %, -e 's/(%)/:doc:`\1`/g', $(PAGES)) \ -e 's,http://pythonhosted.org/pythran/,.. toctree::\n\t:maxdepth: 1\n\n$(patsubst %, \t%\n, $(PAGES)),g' \ > $@ MANUAL.rst:../doc/MANUAL.rst $(LN_S) $< CLI.rst:../doc/CLI.rst $(LN_S) $< DEVGUIDE.rst:../doc/DEVGUIDE.rst $(LN_S) $< TUTORIAL.rst:../doc/TUTORIAL.rst $(LN_S) $< INTERNAL.rst:../doc/INTERNAL.rst $(LN_S) $< LICENSE.rst:../LICENSE Makefile echo "=======\nLICENSE\n=======\n\n" > $@ cat $< >> $@ AUTHORS.rst:../AUTHORS $(LN_S) $< $@ TODO.rst:../TODO $(LN_S) $< $@ SUPPORT.rst:support.py ../pythran/tables.py ./$< > $@ Changelog.rst:../Changelog echo "=========\nChangelog\n=========\n\n" > $@ cat $< >> $@ pythran.png:../logo.svg Makefile convert -density 1200 -resize x80 -background none $< $@ pythran-0.10.0+ds2/website/support.py000077500000000000000000000021771416264035500175110ustar00rootroot00000000000000#!/usr/bin/env python from pythran import tables TITLE = "Supported Modules and Functions" DEPTHS = '=*-+:~#.^"`' print(DEPTHS[0]*len(TITLE)) print(TITLE) print(DEPTHS[0]*len(TITLE)) print("") def format_name(name): if name.endswith('_') and not name.startswith('_'): name = name[:-1] return name def isiterable(obj): return hasattr(obj, '__iter__') def dump_entry(entry_name, entry_value, depth): if isiterable(entry_value): print(entry_name) print(DEPTHS[depth] * len(entry_name)) print("") sym_entries, sub_entries = [], [] for sym in entry_value: w = sub_entries if isiterable(entry_value[sym]) else sym_entries w.append(sym) for k in sorted(sym_entries): dump_entry(format_name(k), entry_value[k], depth + 1) print("") for k in sorted(sub_entries): dump_entry(format_name(k), entry_value[k], depth + 1) print("") else: print(entry_name) for MODULE in sorted(tables.MODULES): if MODULE != '__dispatch__': dump_entry(format_name(MODULE), tables.MODULES[MODULE], 1)

::value ? I : 0), P>::type; }; template struct copy_new_axis_helper; template <> struct copy_new_axis_helper<0> { template typename std::enable_if< (0 != std::tuple_size::value) && std::tuple_element<0, S2>::type::value, sutils::push_front_t>>::type doit(S0 s, S1 const &shape, S2 const &new_axis, std::integral_constant) { return {std::tuple_cat(std::tuple>(), s.values)}; } template typename std::enable_if< (0 != std::tuple_size::value) && !std::tuple_element<0, S2>::type::value, sutils::push_front_t::type>>::type doit(S0 s, S1 const &shape, S2 const &new_axis, std::integral_constant) { return { std::tuple_cat(std::make_tuple(shape.template shape<0>()), s.values)}; } template typename std::enable_if< (0 == std::tuple_size::value), sutils::push_front_t::type>>::type doit(S0 s, S1 const &shape, S2 const &new_axis, std::integral_constant) { return { std::tuple_cat(std::make_tuple(shape.template shape()), s.values)}; } }; template struct copy_new_axis_helper { template auto doit(S0 s, S1 const &shape, S2 const &new_axis, std::integral_constant) -> typename std::enable_if< (I < std::tuple_size::value) && safe_tuple_element::type::value, decltype(copy_new_axis_helper{}.doit( sutils::push_front_t>(), shape, new_axis, std::integral_constant()))>::type { return copy_new_axis_helper{}.doit( sutils::push_front_t>( std::tuple_cat(std::tuple>(), s.values)), shape, new_axis, std::integral_constant()); } template auto doit(S0 s, S1 const &shape, S2 const &new_axis, std::integral_constant) -> typename std::enable_if< (I >= std::tuple_size::value), decltype(copy_new_axis_helper{}.doit( sutils::push_front_t::type>(), shape, new_axis, std::integral_constant < size_t, J == 0 ? J : J - 1 > ()))>::type { return copy_new_axis_helper{}.doit( sutils::push_front_t< S0, typename std::tuple_element::type>( std::tuple_cat(std::make_tuple(shape.template shape()), s.values)), shape, new_axis, std::integral_constant < size_t, J == 0 ? J : J - 1 > ()); } template auto doit(S0 s, S1 const &shape, S2 const &new_axis, std::integral_constant) -> typename std::enable_if< (I < std::tuple_size::value) && !safe_tuple_element::type::value, decltype(copy_new_axis_helper{}.doit( sutils::push_front_t::type>(), shape, new_axis, std::integral_constant < size_t, J == 0 ? J : J - 1 > ()))>::type { return copy_new_axis_helper{}.doit( sutils::push_front_t< S0, typename std::tuple_element::type>( std::tuple_cat(std::make_tuple(shape.template shape()), s.values)), shape, new_axis, std::integral_constant < size_t, J == 0 ? J : J - 1 > ()); } }; template auto copy_new_axis(S1 const &shape, S2 const &new_axis) -> decltype(copy_new_axis_helper{}.doit( types::pshape<>(), shape, new_axis, std::integral_constant())) { return copy_new_axis_helper{}.doit( types::pshape<>(), shape, new_axis, std::integral_constant()); } } namespace types { namespace details { template void init_shape(S &res, E const &e, utils::int_<1>) { sutils::assign(std::get::value - 1>(res), e.size()); } template void init_shape(S &res, E const &e, utils::int_) { sutils::assign(std::get::value - L>(res), e.size()); init_shape(res, e[0], utils::int_{}); } } template bool operator==(T const &self, pshape const &other) { return sutils::equals(self, other); } template bool operator==(pshape const &self, T const &other) { return sutils::equals(self, other); } template bool operator==(pshape const &self, pshape const &other) { return sutils::equals(self, other); } template bool operator!=(T const &self, pshape const &other) { return !sutils::equals(self, other); } template bool operator!=(pshape const &self, T const &other) { return !sutils::equals(self, other); } template bool operator!=(pshape const &self, pshape const &other) { return !sutils::equals(self, other); } } PYTHONIC_NS_END #ifdef ENABLE_PYTHON_MODULE #include "pythonic/include/utils/seq.hpp" #include "pythonic/include/utils/fwd.hpp" #include "pythonic/python/core.hpp" PYTHONIC_NS_BEGIN template struct to_python> { static PyObject *convert(std::pair const &t); }; template struct to_python> { static PyObject *convert(types::pshape const &t); }; template struct to_python> { template static PyObject *do_convert(std::tuple const &t, utils::index_sequence); static PyObject *convert(std::tuple const &t); }; template struct to_python> { template static PyObject *do_convert(types::array const &t, utils::index_sequence); static PyObject *convert(types::array const &t); }; template struct to_python> { template static PyObject *do_convert(types::static_list const &t, utils::index_sequence); static PyObject *convert(types::static_list const &t); }; template struct from_python> { template static bool do_is_convertible(PyObject *obj, typename utils::index_sequence); static bool is_convertible(PyObject *obj); template static std::tuple do_convert(PyObject *obj, typename utils::index_sequence); static std::tuple convert(PyObject *obj); }; template struct from_python> { static bool is_convertible(PyObject *obj); template static types::array do_convert(PyObject *obj, typename utils::index_sequence); static types::array convert(PyObject *obj); }; PYTHONIC_NS_END #endif #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/uint16.hpp000066400000000000000000000001341416264035500237150ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_UINT16_HPP #define PYTHONIC_INCLUDE_TYPES_UINT16_HPP #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/uint32.hpp000066400000000000000000000001341416264035500237130ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_UINT32_HPP #define PYTHONIC_INCLUDE_TYPES_UINT32_HPP #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/uint64.hpp000066400000000000000000000001341416264035500237200ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_UINT64_HPP #define PYTHONIC_INCLUDE_TYPES_UINT64_HPP #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/uint8.hpp000066400000000000000000000001321416264035500236340ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_UINT8_HPP #define PYTHONIC_INCLUDE_TYPES_UINT8_HPP #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/uintc.hpp000066400000000000000000000001321416264035500237070ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_UINTC_HPP #define PYTHONIC_INCLUDE_TYPES_UINTC_HPP #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/uintp.hpp000066400000000000000000000001321416264035500237240ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_UINTP_HPP #define PYTHONIC_INCLUDE_TYPES_UINTP_HPP #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/variant_functor.hpp000066400000000000000000000115721416264035500260030ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_VARIANT_FUNCTOR_HPP #define PYTHONIC_INCLUDE_TYPES_VARIANT_FUNCTOR_HPP #include "pythonic/include/utils/meta.hpp" #include "pythonic/include/types/combined.hpp" #include PYTHONIC_NS_BEGIN namespace types { /* Variant functor is a generic wrapper for functor object, for which we *don't know a priori the common signature, * because of of the variadic operator(). * * The trick is to allocate a piece of memory large enough to hold any of *the functor, then maintain, for each functor type, a pointer to that type. * There can be only one pointer for each variadic set to a non null value *(based on the preallocated memory buffer). * * When calling the functor operator(), the code iterates (linearly) on each *pointer && call the operator() of this pointer. */ template struct variant_functor; namespace details { template struct variant_functor_impl; template struct variant_functor_impl { Type *fun = nullptr; variant_functor_impl() = default; variant_functor_impl(variant_functor_impl const &) = delete; ~variant_functor_impl(); variant_functor_impl(char mem[], Type const &t); variant_functor_impl(char mem[], variant_functor_impl const &t); template variant_functor_impl(char mem[], variant_functor_impl const &t); template variant_functor_impl( char mem[], variant_functor_impl const &t); template variant_functor_impl(char mem[], OtherType const &t); variant_functor_impl &operator=(variant_functor_impl const &) = delete; void assign(char mem[], variant_functor_impl const &); void assign(char mem[], variant_functor const &); void assign(char mem[], Type const &); template void assign(char mem[], OtherType const &); template void assign(char mem[], variant_functor_impl const &); template void assign(char mem[], variant_functor const &); template void assign(char mem[], variant_functor_impl const &); template void assign(char mem[], variant_functor const &); template auto operator()(Args &&... args) -> decltype(std::declval()(std::forward(args)...)); template auto operator()(Args &&... args) const -> decltype(std::declval()(std::forward(args)...)); }; template struct variant_functor_impl { variant_functor_impl head; variant_functor_impl tail; variant_functor_impl() = default; variant_functor_impl(variant_functor_impl const &) = delete; template variant_functor_impl(char mem[], OtherTypes const &... t); template variant_functor_impl(char mem[], variant_functor_impl const &t); variant_functor_impl &operator=(variant_functor_impl const &) = delete; void assign(char mem[], variant_functor_impl const &); template void assign(char mem[], OtherType const &); template auto operator()(Args &&... args) -> typename __combined< decltype(std::declval()(args...)), decltype(std::declval()(args...))...>::type; template auto operator()(Args &&... args) const -> typename __combined< decltype(std::declval()(args...)), decltype(std::declval()(args...))...>::type; }; } template struct variant_functor : details::variant_functor_impl { using callable = void; // memory used to initialize the actual functor // default construction cannot be used because generator are not // default-constructible char mem[utils::max_element::value]; variant_functor() = default; variant_functor(variant_functor const &); variant_functor &operator=(variant_functor const &); template variant_functor &operator=(OtherType const &); template variant_functor &operator=(variant_functor const &); template variant_functor(OtherTypes const &... t); template variant_functor(variant_functor const &t); }; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/types/vectorizable_type.hpp000066400000000000000000000056111416264035500263260ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_TYPES_VECTORIZABLE_TYPE_HPP #define PYTHONIC_INCLUDE_TYPES_VECTORIZABLE_TYPE_HPP PYTHONIC_NS_BEGIN namespace types { /* types used during vectorization specialization */ struct vectorize { }; struct novectorize { }; struct novectorize_nobroadcast { }; struct vectorizer { template static auto vbegin(E &&expr) -> decltype(std::forward(expr).vbegin(vectorize{})) { return std::forward(expr).vbegin(vectorize{}); } template static auto vend(E &&expr) -> decltype(std::forward(expr).vend(vectorize{})) { return std::forward(expr).vend(vectorize{}); } }; struct vectorize_nobroadcast { }; struct vectorizer_nobroadcast { template static auto vbegin(E &&expr) -> decltype(std::forward(expr).vbegin(vectorize_nobroadcast{})) { return std::forward(expr).vbegin(vectorize_nobroadcast{}); } template static auto vend(E &&expr) -> decltype(std::forward(expr).vend(vectorize_nobroadcast{})) { return std::forward(expr).vend(vectorize_nobroadcast{}); } }; template struct is_vectorizable_dtype { static const bool value = is_dtype::value && !std::is_same::value && !std::is_same::value && !std::is_same>::value; }; /* trait to check if is T is an array-like type that supports vectorization */ template ::value> struct is_vectorizable_array; template struct is_vectorizable_array : std::false_type { }; template struct is_vectorizable_array { static const bool value = T::is_vectorizable; }; template struct is_vectorizable { static const bool value = std::conditional::value, is_vectorizable_dtype, is_vectorizable_array>::type::value; }; template struct is_vector_op; template struct numpy_expr; } namespace utils { template bool no_broadcast(types::numpy_expr const &arg) { return arg.no_broadcast(); } template bool no_broadcast_ex(types::numpy_expr const &arg) { return arg.no_broadcast_ex(); } template bool no_broadcast_vectorize(types::numpy_expr const &arg) { return arg.no_broadcast_vectorize(); } template constexpr bool no_broadcast(Arg const &arg) { return true; } template constexpr bool no_broadcast_ex(Arg const &arg) { return true; } template constexpr bool no_broadcast_vectorize(Arg const &arg) { return true; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/utils/000077500000000000000000000000001416264035500220545ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/include/utils/array_helper.hpp000066400000000000000000000021671416264035500252500ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_UTILS_ARRAY_HELPER_HPP #define PYTHONIC_INCLUDE_UTILS_ARRAY_HELPER_HPP #include "pythonic/include/types/tuple.hpp" PYTHONIC_NS_BEGIN /* recursively return the value at the position given by `indices' in * the `self' "array like". It may be a sub array instead of real value. * indices[0] is the coordinate for the first dimension && indices[M-1] * is for the last one. */ template struct nget { template auto operator()(A &&self, types::array const &indices) -> decltype(nget()(std::forward(self)[0], indices)); template auto fast(A &&self, types::array const &indices) -> decltype(nget().fast(std::forward(self).fast(0), indices)); }; template <> struct nget<0> { template auto operator()(A &&self, types::array const &indices) -> decltype(std::forward(self)[indices[M - 1]]); template auto fast(A &&self, types::array const &indices) -> decltype(std::forward(self).fast(indices[M - 1])); }; PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/utils/broadcast_copy.hpp000066400000000000000000000031311416264035500255570ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_UTILS_BROADCAST_COPY_HPP #define PYTHONIC_INCLUDE_UTILS_BROADCAST_COPY_HPP #include "pythonic/include/types/tuple.hpp" #ifdef _OPENMP #include // as a macro so that an enlightened user can modify this variable :-) #ifndef PYTHRAN_OPENMP_MIN_ITERATION_COUNT #define PYTHRAN_OPENMP_MIN_ITERATION_COUNT 1000 #endif #endif PYTHONIC_NS_BEGIN namespace utils { /* helper function to get the dimension of an array * yields 0 for scalar types */ template struct dim_of { static const size_t value = T::value; }; template struct dim_of, void> { static const size_t value = 1 + dim_of::value; }; template struct dim_of::value>::type> { static const size_t value = 0; }; #define SPECIALIZE_DIM_OF(TYPE) \ template <> \ struct dim_of { \ static const size_t value = 0; \ } SPECIALIZE_DIM_OF(std::complex); SPECIALIZE_DIM_OF(std::complex); #undef SPECIALIZE_DIM_OF template E &broadcast_copy(E &self, F const &other); template E &broadcast_update(E &self, F const &other); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/utils/functor.hpp000066400000000000000000000037061416264035500242530ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_UTILS_FUNCTOR_HPP #define PYTHONIC_INCLUDE_UTILS_FUNCTOR_HPP #include // create a function named `name' using function `f' #define DEFINE_FUNCTOR_2(name, f) \ namespace functor \ { \ struct name { \ using callable = void; \ template \ auto operator()(Types && ... types) const \ -> decltype(f(std::forward(types)...)) \ { \ return f(std::forward(types)...); \ } \ \ friend std::ostream &operator<<(std::ostream &os, name) \ { \ return os << #name; \ } \ }; \ } // create a functor named `f' using function `ns::f' #define DEFINE_FUNCTOR(ns, f) DEFINE_FUNCTOR_2(f, ns::f) #define USING_FUNCTOR(f, alias) \ namespace functor \ { \ using f = alias; \ } #endif pythran-0.10.0+ds2/pythran/pythonic/include/utils/fwd.hpp000066400000000000000000000003221416264035500233420ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_UTILS_FWD_HPP #define PYTHONIC_INCLUDE_UTILS_FWD_HPP PYTHONIC_NS_BEGIN namespace utils { template void fwd(Types const &... types); } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/utils/int_.hpp000066400000000000000000000003171416264035500235170ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_UTILS_INT_HPP #define PYTHONIC_INCLUDE_UTILS_INT_HPP PYTHONIC_NS_BEGIN namespace utils { template struct int_ { }; // compile-time counter } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/utils/iterator.hpp000066400000000000000000000033601416264035500244200ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_UTILS_ITERATOR_HPP #define PYTHONIC_INCLUDE_UTILS_ITERATOR_HPP PYTHONIC_NS_BEGIN namespace utils { template struct comparable_iterator : T { comparable_iterator(); comparable_iterator(T const &t); bool operator<(comparable_iterator other); }; // Utility class to remind sequence we are iterating on to avoid dangling // reference template struct iterator_reminder; template struct iterator_reminder { std::tuple values; // FIXME : It works only because template arguments are ! references // so it trigger a copy. iterator_reminder() = default; iterator_reminder(T const &v, Others const &... o); }; template struct iterator_reminder { T values; iterator_reminder() = default; iterator_reminder(T const &v); }; template struct iterator_reminder { std::tuple values; iterator_reminder() = default; iterator_reminder(T const &v); }; /* Get the "minimum" of all iterators : - only random => random - at least one forward => forward */ template struct iterator_min; template struct iterator_min { using type = typename std::iterator_traits::iterator_category; }; template struct iterator_min { using type = typename std::conditional< std::is_same::iterator_category, std::forward_iterator_tag>::value, std::forward_iterator_tag, typename iterator_min::type>::type; }; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/utils/meta.hpp000066400000000000000000000025201416264035500235120ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_UTILS_META_HPP #define PYTHONIC_INCLUDE_UTILS_META_HPP PYTHONIC_NS_BEGIN namespace utils { template struct all_of; template <> struct all_of<> : std::true_type { }; template struct all_of { static constexpr bool value = V0 && all_of::value; }; template struct all_of { static constexpr bool value = V0; }; template struct any_of; template <> struct any_of<> : std::false_type { }; template struct any_of { static constexpr bool value = V0 || any_of::value; }; template struct any_of { static constexpr bool value = V0; }; template struct front { using type = T0; }; template struct max_element; template struct max_element { static constexpr size_t _value = max_element::value; static constexpr size_t _index = max_element::index; static constexpr size_t value = N0 > _value ? N0 : _value; static constexpr size_t index = N0 == value ? 0 : 1 + _index; }; template struct max_element { static constexpr size_t value = V0; static constexpr size_t index = 0; }; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/utils/nested_container.hpp000066400000000000000000000070661416264035500261220ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_UTILS_NESTED_CONTAINER_HPP #define PYTHONIC_INCLUDE_UTILS_NESTED_CONTAINER_HPP #include #include "pythonic/include/types/traits.hpp" #include "pythonic/include/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace types { template class sliced_list; template class list; template struct array_base; template struct dynamic_tuple; } namespace utils { /* compute nested container depth && memory size*/ template struct nested_container_depth_helper; template struct nested_container_depth_helper { static const int value = 0; }; template struct nested_container_depth_helper { static const int value = T::value; }; template struct nested_container_depth { static const int value = nested_container_depth_helper::value>::value; }; template struct nested_container_depth> { static const int value = 1 + nested_container_depth::value; }; template struct nested_container_depth> { static const int value = 1 + nested_container_depth::value; }; template struct nested_container_depth> { static const int value = 1 + nested_container_depth::value; }; template struct nested_container_depth> { static const int value = 1 + nested_container_depth::value; }; template struct nested_container_depth> { static const int value = std::tuple_size::value; }; /* Get the size of a container, using recursion on inner container if any * FIXME: should be a constexpr? * FIXME: why a class && ! a function? */ template struct nested_container_size { using Type = typename std::remove_cv::type>::type; static long flat_size(T const &t); }; /* Recursion stops on bool */ template <> struct nested_container_size { template constexpr static long flat_size(F); }; /* Statically define (by recursion) the type of element inside nested * containers */ template struct nested_container_value_type_helper; template struct nested_container_value_type_helper { using type = T; }; template struct nested_container_value_type_helper { using type = typename T::dtype; }; template struct nested_container_value_type { using type = typename nested_container_value_type_helper< T, types::is_array::value>::type; }; template struct nested_container_value_type> { using type = typename nested_container_value_type::type; }; template struct nested_container_value_type> { using type = typename nested_container_value_type::type; }; template struct nested_container_value_type> { using type = typename nested_container_value_type::type; }; template struct nested_container_value_type> { using type = typename nested_container_value_type::type; }; template struct nested_container_value_type> { using type = T; }; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/utils/neutral.hpp000066400000000000000000000035171416264035500242450ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_UTILS_NEUTRAL_HPP #define PYTHONIC_INCLUDE_UTILS_NEUTRAL_HPP #include "pythonic/include/operator_/iadd.hpp" #include "pythonic/include/operator_/iand.hpp" #include "pythonic/include/operator_/ior.hpp" #include "pythonic/include/operator_/imul.hpp" #include "pythonic/include/operator_/imax.hpp" #include "pythonic/include/operator_/imin.hpp" #include "pythonic/include/operator_/ixor.hpp" PYTHONIC_NS_BEGIN namespace utils { template struct neutral; template struct neutral { static T const value; }; template T const neutral::value = 0; template struct neutral { static T const value; }; template T const neutral::value = (T)-1; template struct neutral { static T const value; }; template T const neutral::value = 0; template struct neutral { static T const value; }; template T const neutral::value = 1; template struct neutral { static T const value; }; template T const neutral::value = std::numeric_limits::lowest(); template struct neutral { static T const value; }; template T const neutral::value = std::numeric_limits::max(); template struct neutral { static T const value; }; template T const neutral::value = 0; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/utils/numpy_conversion.hpp000066400000000000000000000024241416264035500262040ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_UTILS_NUMPY_CONVERSION_HPP #define PYTHONIC_INCLUDE_UTILS_NUMPY_CONVERSION_HPP #include "pythonic/include/utils/numpy_traits.hpp" #include #if _MSC_VER && !__clang__ #define NUMPY_EXPR_TO_NDARRAY0_DECL(fname) \ template ::value && \ types::is_array::value, \ E>::type * = nullptr> \ auto fname(E const &expr, Types &&... others); #else #define NUMPY_EXPR_TO_NDARRAY0_DECL(fname) \ template \ auto fname(E const &expr, Types &&... others) \ ->typename std::enable_if< \ !types::is_ndarray::value && types::is_array::value, \ decltype(fname( \ types::ndarray{expr}, \ std::forward(others)...))>::type; #endif #endif pythran-0.10.0+ds2/pythran/pythonic/include/utils/numpy_traits.hpp000066400000000000000000000073241416264035500253310ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_UTILS_NUMPY_TRAITS_HPP #define PYTHONIC_INCLUDE_UTILS_NUMPY_TRAITS_HPP #include "pythonic/include/types/traits.hpp" PYTHONIC_NS_BEGIN namespace types { template struct ndarray; template struct numpy_iexpr; template struct numpy_vexpr; template struct numpy_gexpr; template struct numpy_texpr; template struct numpy_texpr_2; template struct numpy_expr; template class list; template class sliced_list; struct empty_list; template struct array_base; template struct dynamic_tuple; template struct broadcasted; template struct broadcast; template struct is_ndarray { static constexpr bool value = false; }; template struct is_ndarray> { static constexpr bool value = true; }; /* Type trait that checks if a type is a potential numpy expression *parameter * * Only used to write concise expression templates */ template struct is_array { static constexpr bool value = false; }; template struct is_array> { static constexpr bool value = true; }; template struct is_array> { static constexpr bool value = true; }; template struct is_array> { static constexpr bool value = true; }; template struct is_array> { static constexpr bool value = true; }; template struct is_array> { static constexpr bool value = true; }; template struct is_array> { static constexpr bool value = true; }; template struct is_array> { static constexpr bool value = true; }; template struct is_numexpr_arg : is_array { }; template struct is_numexpr_arg : is_numexpr_arg { }; template struct is_numexpr_arg : is_numexpr_arg { }; template struct is_numexpr_arg : is_numexpr_arg { }; template struct is_numexpr_arg : is_numexpr_arg { }; template struct is_numexpr_arg> { static constexpr bool value = is_numexpr_arg::value || is_dtype::value; }; template struct is_numexpr_arg> { static constexpr bool value = is_numexpr_arg::value || is_dtype::value; }; template <> struct is_numexpr_arg { static constexpr bool value = true; }; template struct is_numexpr_arg> { static constexpr bool value = is_numexpr_arg::value || is_dtype::value; }; template struct is_numexpr_arg> { static constexpr bool value = is_numexpr_arg::value || is_dtype::value; }; template struct is_numexpr_arg> { static constexpr bool value = is_numexpr_arg::value || is_dtype::value; }; template struct is_numexpr_arg> { static constexpr bool value = is_numexpr_arg::value || is_dtype::value; }; template struct dtype_of { template static typename T::dtype get(typename T::dtype *); template static T get(...); using type = decltype(get(nullptr)); }; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/utils/reserve.hpp000066400000000000000000000004071416264035500242410ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_UTILS_RESERVE_HPP #define PYTHONIC_INCLUDE_UTILS_RESERVE_HPP PYTHONIC_NS_BEGIN namespace utils { template void reserve(Container &, From &&); // do nothing unless specialized } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/utils/seq.hpp000066400000000000000000000042771416264035500233670ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_UTILS_SEQ_HPP #define PYTHONIC_INCLUDE_UTILS_SEQ_HPP PYTHONIC_NS_BEGIN namespace utils { // make_integer_sequence() = integer_sequence<0, ..., N-1> template struct integer_sequence { }; template using index_sequence = integer_sequence; namespace details { template struct make_integer_sequence : make_integer_sequence(N - 1), S...> { }; template struct make_integer_sequence { using type = integer_sequence; }; } template using make_integer_sequence = typename details::make_integer_sequence::type; template using make_index_sequence = typename details::make_integer_sequence::type; // make_reversed_integer_sequence() = integer_sequence namespace details { template struct make_reversed_integer_sequence : make_reversed_integer_sequence { }; template struct make_reversed_integer_sequence { using type = integer_sequence; }; } template using make_reversed_integer_sequence = typename details::make_reversed_integer_sequence::type; template using make_reversed_index_sequence = typename details::make_reversed_integer_sequence::type; // make_repeated_type() => type_sequence template struct type_sequence { }; namespace details { template struct repeated_type : repeated_type { }; template struct repeated_type { using type = type_sequence; }; } template struct repeated_type : details::repeated_type { }; template using make_repeated_type = typename repeated_type::type; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/utils/shared_ref.hpp000066400000000000000000000043741416264035500246770ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_UTILS_SHARED_REF_HPP #define PYTHONIC_INCLUDE_UTILS_SHARED_REF_HPP #include #include #include #ifdef _OPENMP #define THREAD_SAFE_REF_COUNT #endif #ifdef THREAD_SAFE_REF_COUNT #include #endif #ifdef ENABLE_PYTHON_MODULE #include #endif PYTHONIC_NS_BEGIN #ifdef ENABLE_PYTHON_MODULE using extern_type = PyObject *; #else using extern_type = void *; #endif #ifdef THREAD_SAFE_REF_COUNT using atomic_size_t = std::atomic_size_t; #else using atomic_size_t = size_t; #endif namespace utils { // Force construction of an uninitialized shared_ref struct no_memory { }; /** Light-weight shared_ptr like-class * * Unlike std::shared_ptr, it allocates the memory itself using new. */ template class shared_ref { private: struct memory { T ptr; atomic_size_t count; extern_type foreign; template memory(Types &&... args); } * mem; public: // Uninitialized ctor shared_ref(no_memory const &) noexcept; // Uninitialized ctor (rvalue ref) shared_ref(no_memory &&) noexcept; // Ctor allocate T && forward all arguments to T ctor template shared_ref(Types &&... args); // Move Ctor shared_ref(shared_ref &&p) noexcept; // Copy Ctor shared_ref(shared_ref const &p) noexcept; // Copy Ctor, again // Without a non-const copy-ctor here, the greedy variadic template ctor // takes over shared_ref(shared_ref &p) noexcept; ~shared_ref() noexcept; // Magic swapperator, help for assignment operators void swap(shared_ref &rhs) noexcept; // Takes by copy so that acquire/release is handle by ctor shared_ref &operator=(shared_ref p) noexcept; T &operator*() const noexcept; T *operator->() const noexcept; bool operator!=(shared_ref const &other) const noexcept; bool operator==(shared_ref const &other) const noexcept; // Save pointer to the external object to decref once we doesn't // use it anymore void external(extern_type obj_ptr); extern_type get_foreign(); bool is_foreign() const; private: void dispose(); void acquire(); }; } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/utils/tags.hpp000066400000000000000000000006661416264035500235330ustar00rootroot00000000000000#ifndef PYTHONIC_INCLUDE_UTILS_TAGS_HPP #define PYTHONIC_INCLUDE_UTILS_TAGS_HPP #include "pythonic/include/types/traits.hpp" PYTHONIC_NS_BEGIN namespace purity { struct unknown_tag { }; struct pure_tag { }; } template struct purity_of { using type = typename std::conditional::value, purity::pure_tag, purity::unknown_tag>::type; }; PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/include/utils/yield.hpp000066400000000000000000000005771416264035500237040ustar00rootroot00000000000000#ifndef PYTHRAN_INCLUDE_UTILS_YIELD_HPP #define PYTHRAN_INCLUDE_UTILS_YIELD_HPP /* * This contains base class for yielders */ #include "pythonic/include/types/generator.hpp" PYTHONIC_NS_BEGIN class yielder { public: yielder(); bool operator!=(yielder const &other) const; bool operator==(yielder const &other) const; long __generator_state; }; PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/io/000077500000000000000000000000001416264035500177005ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/io/_io/000077500000000000000000000000001416264035500204465ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/io/_io/TextIOWrapper/000077500000000000000000000000001416264035500231635ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/io/_io/TextIOWrapper/close.hpp000066400000000000000000000003201416264035500247740ustar00rootroot00000000000000#ifndef PYTHONIC_IO__IO_TEXTIOWRAPPER_CLOSE_HPP #define PYTHONIC_IO__IO_TEXTIOWRAPPER_CLOSE_HPP #include "pythonic/include/io/_io/TextIOWrapper/close.hpp" #include "pythonic/builtins/file/close.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/io/_io/TextIOWrapper/fileno.hpp000066400000000000000000000003231416264035500251460ustar00rootroot00000000000000#ifndef PYTHONIC_IO__IO_TEXTIOWRAPPER_FILENO_HPP #define PYTHONIC_IO__IO_TEXTIOWRAPPER_FILENO_HPP #include "pythonic/include/io/_io/TextIOWrapper/fileno.hpp" #include "pythonic/builtins/file/fileno.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/io/_io/TextIOWrapper/flush.hpp000066400000000000000000000003171416264035500250160ustar00rootroot00000000000000#ifndef PYTHONIC_IO__IO_TEXTIOWRAPPER_FLUSH_HPP #define PYTHONIC_IO__IO_TEXTIOWRAPPER_FLUSH_HPP #include "pythonic/include/io/_io/TextIOWrapper/flush.hpp" #include "pythonic/builtins/file/flush.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/io/_io/TextIOWrapper/isatty.hpp000066400000000000000000000003231416264035500252070ustar00rootroot00000000000000#ifndef PYTHONIC_IO__IO_TEXTIOWRAPPER_ISATTY_HPP #define PYTHONIC_IO__IO_TEXTIOWRAPPER_ISATTY_HPP #include "pythonic/include/io/_io/TextIOWrapper/isatty.hpp" #include "pythonic/builtins/file/isatty.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/io/_io/TextIOWrapper/next.hpp000066400000000000000000000003141416264035500246500ustar00rootroot00000000000000#ifndef PYTHONIC_IO__IO_TEXTIOWRAPPER_NEXT_HPP #define PYTHONIC_IO__IO_TEXTIOWRAPPER_NEXT_HPP #include "pythonic/include/io/_io/TextIOWrapper/next.hpp" #include "pythonic/builtins/file/next.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/io/_io/TextIOWrapper/read.hpp000066400000000000000000000003141416264035500246050ustar00rootroot00000000000000#ifndef PYTHONIC_IO__IO_TEXTIOWRAPPER_READ_HPP #define PYTHONIC_IO__IO_TEXTIOWRAPPER_READ_HPP #include "pythonic/include/io/_io/TextIOWrapper/read.hpp" #include "pythonic/builtins/file/read.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/io/_io/TextIOWrapper/readline.hpp000066400000000000000000000003341416264035500254570ustar00rootroot00000000000000#ifndef PYTHONIC_IO__IO_TEXTIOWRAPPER_READLINE_HPP #define PYTHONIC_IO__IO_TEXTIOWRAPPER_READLINE_HPP #include "pythonic/include/io/_io/TextIOWrapper/readline.hpp" #include "pythonic/builtins/file/readline.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/io/_io/TextIOWrapper/readlines.hpp000066400000000000000000000003401416264035500256370ustar00rootroot00000000000000#ifndef PYTHONIC_IO__IO_TEXTIOWRAPPER_READLINES_HPP #define PYTHONIC_IO__IO_TEXTIOWRAPPER_READLINES_HPP #include "pythonic/include/io/_io/TextIOWrapper/readlines.hpp" #include "pythonic/builtins/file/readlines.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/io/_io/TextIOWrapper/seek.hpp000066400000000000000000000003141416264035500246210ustar00rootroot00000000000000#ifndef PYTHONIC_IO__IO_TEXTIOWRAPPER_SEEK_HPP #define PYTHONIC_IO__IO_TEXTIOWRAPPER_SEEK_HPP #include "pythonic/include/io/_io/TextIOWrapper/seek.hpp" #include "pythonic/builtins/file/seek.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/io/_io/TextIOWrapper/tell.hpp000066400000000000000000000003141416264035500246320ustar00rootroot00000000000000#ifndef PYTHONIC_IO__IO_TEXTIOWRAPPER_TELL_HPP #define PYTHONIC_IO__IO_TEXTIOWRAPPER_TELL_HPP #include "pythonic/include/io/_io/TextIOWrapper/tell.hpp" #include "pythonic/builtins/file/tell.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/io/_io/TextIOWrapper/truncate.hpp000066400000000000000000000003341416264035500255210ustar00rootroot00000000000000#ifndef PYTHONIC_IO__IO_TEXTIOWRAPPER_TRUNCATE_HPP #define PYTHONIC_IO__IO_TEXTIOWRAPPER_TRUNCATE_HPP #include "pythonic/include/io/_io/TextIOWrapper/truncate.hpp" #include "pythonic/builtins/file/truncate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/io/_io/TextIOWrapper/write.hpp000066400000000000000000000003201416264035500250210ustar00rootroot00000000000000#ifndef PYTHONIC_IO__IO_TEXTIOWRAPPER_WRITE_HPP #define PYTHONIC_IO__IO_TEXTIOWRAPPER_WRITE_HPP #include "pythonic/include/io/_io/TextIOWrapper/write.hpp" #include "pythonic/builtins/file/write.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/io/_io/TextIOWrapper/writelines.hpp000066400000000000000000000003441416264035500260620ustar00rootroot00000000000000#ifndef PYTHONIC_IO__IO_TEXTIOWRAPPER_WRITELINES_HPP #define PYTHONIC_IO__IO_TEXTIOWRAPPER_WRITELINES_HPP #include "pythonic/include/io/_io/TextIOWrapper/writelines.hpp" #include "pythonic/builtins/file/writelines.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/itertools/000077500000000000000000000000001416264035500213155ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/itertools/combinations.hpp000066400000000000000000000070461416264035500245220ustar00rootroot00000000000000#ifndef PYTHONIC_ITERTOOLS_COMBINATIONS_HPP #define PYTHONIC_ITERTOOLS_COMBINATIONS_HPP #include "pythonic/include/itertools/combinations.hpp" #include "pythonic/types/dynamic_tuple.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace itertools { namespace details { template template combination_iterator::combination_iterator(Iter &&pool, long r) : pool(pool.begin(), pool.end()), indices(r), r(r), stopped(r > long(this->pool.size())) { assert(r >= 0 && "r must be non-negative"); if (!stopped) { std::iota(indices.begin(), indices.end(), 0); result = std::vector(this->pool.begin(), this->pool.begin() + r); } } template combination_iterator::combination_iterator(bool) : stopped(true) { } template types::dynamic_tuple combination_iterator:: operator*() const { assert(!stopped && "! stopped"); return {result.begin(), result.end()}; } template combination_iterator &combination_iterator::operator++() { /* Scan indices right-to-left until finding one that is ! at its maximum (i + n - r). */ long i, n = pool.size(); for (i = r - 1; i >= 0 && indices[i] == i + n - r; i--) ; /* If i is negative, then the indices are all at their maximum value && we're done. */ if (i < 0) stopped = true; else { /* Increment the current index which we know is ! at its maximum. Then move back to the right setting each index to its lowest possible value (one higher than the index to its left -- this maintains the sort order invariant). */ indices[i]++; for (long j = i + 1; j < r; j++) indices[j] = indices[j - 1] + 1; /* Update the result tuple for the new indices starting with i, the leftmost index that changed */ for (; i < r; i++) { result[i] = pool[indices[i]]; } } return *this; } template bool combination_iterator:: operator!=(combination_iterator const &other) const { assert(stopped || other.stopped); return !(*this == other); } template bool combination_iterator:: operator==(combination_iterator const &other) const { assert(stopped || other.stopped); return other.stopped == stopped; } template bool combination_iterator:: operator<(combination_iterator const &other) const { return stopped != other.stopped; } template template combination::combination(Iter &&iter, long elts) : iterator(std::forward(iter), elts), num_elts(elts) { } template typename combination::iterator const &combination::begin() const { return *this; } template typename combination::iterator combination::begin() { return *this; } template typename combination::iterator combination::end() const { return {true}; } } template details::combination< typename std::remove_cv::type>::type> combinations(T0 &&iter, long num_elts) { return {std::forward(iter), num_elts}; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/itertools/common.hpp000066400000000000000000000002061416264035500233140ustar00rootroot00000000000000#ifndef PYTHONIC_ITERTOOLS_COMMON_HPP #define PYTHONIC_ITERTOOLS_COMMON_HPP #include "pythonic/include/itertools/common.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/itertools/count.hpp000066400000000000000000000041731416264035500231630ustar00rootroot00000000000000#ifndef PYTHONIC_ITERTOOLS_COUNT_HPP #define PYTHONIC_ITERTOOLS_COUNT_HPP #include "pythonic/include/itertools/count.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace itertools { namespace details { template count_iterator::count_iterator(T value, T step) : value(value), step(step) { } template T count_iterator::operator*() const { return value; } template count_iterator &count_iterator::operator++() { value += step; return *this; } template count_iterator &count_iterator::operator+=(long n) { value += step * n; return *this; } template bool count_iterator::operator!=(count_iterator const &other) const { return value != other.value; } template bool count_iterator::operator==(count_iterator const &other) const { return value == other.value; } template bool count_iterator::operator<(count_iterator const &other) const { return value < other.value; } template long count_iterator::operator-(count_iterator const &other) const { return (value - other.value) / step; } template count::count(T value, T step) : count_iterator(value, step) { } template typename count::iterator &count::begin() { return *this; } template typename count::iterator const &count::begin() const { return *this; } template typename count::iterator count::end() const { return {std::numeric_limits::max(), count_iterator::step}; } } template details::count::type> count(T0 start, T1 step) { using return_t = typename __combined::type; return {static_cast(start), static_cast(step)}; } details::count count() { return {0, 1}; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/itertools/ifilter.hpp000066400000000000000000000071241416264035500234700ustar00rootroot00000000000000#ifndef PYTHONIC_ITERTOOLS_IFILTER_HPP #define PYTHONIC_ITERTOOLS_IFILTER_HPP #include "pythonic/include/itertools/ifilter.hpp" #include "pythonic/utils/iterator.hpp" #include "pythonic/itertools/common.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace itertools { namespace details { template bool ifilter_iterator::test_filter(std::false_type) { return op(*iter); } template bool ifilter_iterator::test_filter(std::true_type) { return *iter; } template ifilter_iterator::ifilter_iterator(Operator _op, List0 &_seq) : op(_op), iter(_seq.begin()), iter_end(_seq.end()) { if (!test_filter(std::is_same())) next_value(); } template ifilter_iterator::ifilter_iterator(npos, Operator _op, List0 &_seq) : op(_op), iter(_seq.end()), iter_end(_seq.end()) { } template typename List0::value_type ifilter_iterator:: operator*() const { return *iter; } template ifilter_iterator &ifilter_iterator:: operator++() { next_value(); return *this; } template void ifilter_iterator::next_value() { while (++iter != iter_end) { if (test_filter(std::is_same())) return; } } template bool ifilter_iterator:: operator==(ifilter_iterator const &other) const { return !(iter != other.iter); } template bool ifilter_iterator:: operator!=(ifilter_iterator const &other) const { return iter != other.iter; } template bool ifilter_iterator:: operator<(ifilter_iterator const &other) const { return iter != other.iter; } template ifilter::ifilter(Operator _op, List0 const &_seq) : utils::iterator_reminder(_seq), iterator(_op, this->values), end_iter(npos(), _op, this->values) { } template typename ifilter::iterator & ifilter::begin() { return *this; } template typename ifilter::iterator const & ifilter::begin() const { return *this; } template typename ifilter::iterator const & ifilter::end() const { return end_iter; } } template details::ifilter::type>::type, typename std::remove_cv< typename std::remove_reference::type>::type> ifilter(Operator &&_op, List0 &&_seq) { return {std::forward(_op), std::forward(_seq)}; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/itertools/islice.hpp000066400000000000000000000063421416264035500233030ustar00rootroot00000000000000#ifndef PYTHONIC_ITERTOOLS_ISLICE_HPP #define PYTHONIC_ITERTOOLS_ISLICE_HPP #include "pythonic/include/itertools/islice.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/itertools/common.hpp" #include "pythonic/builtins/range.hpp" #include PYTHONIC_NS_BEGIN namespace itertools { template islice_iterator::islice_iterator() { } template islice_iterator::islice_iterator(Iterable const &iterable, builtins::range const &xr) : iterable_ref(iterable), iterable(iterable_ref.begin()), xr_ref(xr), state(xr_ref.begin()), prev(*state) { std::advance(this->iterable, *state); } template islice_iterator::islice_iterator(npos const &n, Iterable const &iterable, builtins::range const &xr) : iterable_ref(iterable), iterable(iterable_ref.begin()), xr_ref(xr), state(xr_ref.end()), prev(0) { } template typename Iterable::value_type islice_iterator::operator*() const { return *iterable; } template islice_iterator &islice_iterator::operator++() { ++state; std::advance(this->iterable, *state - prev); prev = *state; return *this; } template bool islice_iterator:: operator==(islice_iterator const &other) const { return (state == other.state); } template bool islice_iterator:: operator!=(islice_iterator const &other) const { return state != other.state; } template bool islice_iterator:: operator<(islice_iterator const &other) const { return state != other.state; } template int islice_iterator:: operator-(islice_iterator const &other) const { return state - other.state; } template _islice::_islice() { } template _islice::_islice(Iterable const &iterable, builtins::range const &xr) : iterator(iterable, xr), end_iter(npos(), iterable, xr) { } template typename _islice::iterator &_islice::begin() { return *this; } template typename _islice::iterator const &_islice::begin() const { return *this; } template typename _islice::iterator _islice::end() const { return end_iter; } template _islice::type>::type> islice(Iterable &&iterable, long start, long stop, long step) { return {iterable, builtins::range(start, stop, step)}; } template _islice::type>::type> islice(Iterable &&iterable, long stop) { return {iterable, builtins::range(0, stop, 1)}; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/itertools/permutations.hpp000066400000000000000000000075671416264035500245770ustar00rootroot00000000000000#ifndef PYTHONIC_ITERTOOLS_PERMUTATIONS_HPP #define PYTHONIC_ITERTOOLS_PERMUTATIONS_HPP #include "pythonic/include/itertools/permutations.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/dynamic_tuple.hpp" #include "pythonic/builtins/range.hpp" #include PYTHONIC_NS_BEGIN namespace itertools { template permutations_iterator::permutations_iterator() { } template permutations_iterator::permutations_iterator( std::vector const &iter, size_t num_elts, bool end) : pool(iter), curr_permut(pool.size()), _size(num_elts), end(end) { std::iota(curr_permut.begin(), curr_permut.end(), 0); if (num_elts > iter.size()) { end = true; } } template types::dynamic_tuple permutations_iterator:: operator*() const { std::vector res(_size); for (size_t i = 0; i < _size; i++) res[i] = pool[curr_permut[i]]; // Ok because types::dynamic_tuple is // indeed a vector return {res.begin(), res.end()}; } template permutations_iterator &permutations_iterator::operator++() { if (_size != pool.size()) { // Slow path, the iterator is a "view" of a prefix smaller // than the the pool size // FIXME a better implementation would be to avoid // std::next_permutation, but only in the slow path types::dynamic_tuple prev_permut(curr_permut.begin(), curr_permut.begin() + _size); while ((end = std::next_permutation(curr_permut.begin(), curr_permut.end()))) { // Check if the prefix of the new permutation is // different of the previous one types::dynamic_tuple new_permut(curr_permut.begin(), curr_permut.begin() + _size); if (!(prev_permut == new_permut)) break; } } else end = std::next_permutation(curr_permut.begin(), curr_permut.end()); return *this; } template bool permutations_iterator:: operator!=(permutations_iterator const &other) const { return !(*this == other); } template bool permutations_iterator:: operator==(permutations_iterator const &other) const { if (other.end != end) return false; return std::equal(curr_permut.begin(), curr_permut.end(), other.curr_permut.begin()); } template bool permutations_iterator:: operator<(permutations_iterator const &other) const { if (end != other.end) return end > other.end; for (long i = 0; i < pool.size(); i++) if (other.curr_permut[i] < curr_permut[i]) return false; else if (other.curr_permut[i] > curr_permut[i]) return true; return false; } template _permutations::_permutations() { } template _permutations::_permutations(T iter, long elts) : iterator(std::vector(iter.begin(), iter.end()), elts, true) { } template typename _permutations::iterator const &_permutations::begin() const { return *this; } template typename _permutations::iterator _permutations::begin() { return *this; } template typename _permutations::iterator _permutations::end() const { return iterator(iterator::pool, iterator::_size, false); } template _permutations permutations(T0 iter, long num_elts) { return _permutations(iter, num_elts); } template _permutations permutations(T0 iter) { return _permutations(iter, std::distance(iter.begin(), iter.end())); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/itertools/product.hpp000066400000000000000000000076431416264035500235200ustar00rootroot00000000000000#ifndef PYTHONIC_ITERTOOLS_PRODUCT_HPP #define PYTHONIC_ITERTOOLS_PRODUCT_HPP #include "pythonic/include/itertools/product.hpp" #include "pythonic/utils/int_.hpp" #include "pythonic/utils/seq.hpp" #include "pythonic/utils/iterator.hpp" #include "pythonic/itertools/common.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace itertools { namespace details { /// product iterator implementation template template product_iterator::product_iterator( std::tuple &_iters, utils::index_sequence const &) : it_begin(std::get(_iters).begin()...), it_end(std::get(_iters).end()...), it(std::get(_iters).begin()...), end(it_begin == it_end) { } template template product_iterator::product_iterator( npos, std::tuple &_iters, utils::index_sequence const &) : it_begin(std::get(_iters).end()...), it_end(std::get(_iters).end()...), it(std::get(_iters).end()...), end(true) { } template template types::make_tuple_t product_iterator::get_value( utils::index_sequence const &) const { return types::make_tuple(*std::get(it)...); } template types::make_tuple_t product_iterator::operator*() const { return get_value(utils::make_index_sequence{}); } template template void product_iterator::advance(utils::int_) { if (++std::get(it) == std::get(it_end)) { std::get(it) = std::get(it_begin); advance(utils::int_()); } } template void product_iterator::advance(utils::int_<0>) { if (++std::get<0>(it) == std::get<0>(it_end)) end = true; } template product_iterator &product_iterator::operator++() { advance(utils::int_{}); return *this; } template bool product_iterator:: operator==(product_iterator const &other) const { return end == other.end; } template bool product_iterator:: operator!=(product_iterator const &other) const { return end != other.end; } template bool product_iterator:: operator<(product_iterator const &other) const { return end != other.end; } /// details product implementation // FIXME: Iterators need to be evaluated as they may be used multiple // times template product::product(Iters const &... _iters) : utils::iterator_reminder(_iters...), iterator(this->values, utils::make_index_sequence{}), end_iter(npos(), this->values, utils::make_index_sequence{}) { } template typename product::iterator &product::begin() { return *this; } template typename product::iterator const &product::begin() const { return *this; } template typename product::iterator const &product::end() const { return end_iter; } } template details::product::type>::type...> product(Iter &&... iters) { return {std::forward(iters)...}; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/itertools/repeat.hpp000066400000000000000000000035431416264035500233130ustar00rootroot00000000000000#ifndef PYTHONIC_ITERTOOLS_REPEAT_HPP #define PYTHONIC_ITERTOOLS_REPEAT_HPP #include "pythonic/include/itertools/repeat.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/list.hpp" PYTHONIC_NS_BEGIN namespace itertools { template repeat_iterator::repeat_iterator(T value, long count) : value_(value), count_(count) { } template repeat_iterator &repeat_iterator::operator++() { ++count_; return *this; } template T repeat_iterator::operator*() { return value_; } template bool repeat_iterator:: operator!=(repeat_iterator const &other) const { return Endless || count_ != other.count_; } template bool repeat_iterator:: operator==(repeat_iterator const &other) const { return !Endless && count_ == other.count_; } template bool repeat_iterator:: operator<(repeat_iterator const &other) const { return !Endless && count_ < other.count_; } template _repeat::_repeat(T value, long count) : repeat_iterator(value, count) { } template typename _repeat::iterator _repeat::begin() const { return {_repeat::iterator::value_, 0}; } template typename _repeat::iterator _repeat::end() const { return *this; } template _repeat repeat(T value, long count) { return {value, count}; } template _repeat repeat(T value) { return {value, -1}; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/000077500000000000000000000000001416264035500202225ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/math/acos.hpp000066400000000000000000000003401416264035500216550ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_ACOS_HPP #define PYTHONIC_MATH_ACOS_HPP #include "pythonic/include/math/acos.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/acosh.hpp000066400000000000000000000003431416264035500220300ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_ACOSH_HPP #define PYTHONIC_MATH_ACOSH_HPP #include "pythonic/include/math/acosh.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/asin.hpp000066400000000000000000000003401416264035500216620ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_ASIN_HPP #define PYTHONIC_MATH_ASIN_HPP #include "pythonic/include/math/asin.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/asinh.hpp000066400000000000000000000003431416264035500220350ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_ASINH_HPP #define PYTHONIC_MATH_ASINH_HPP #include "pythonic/include/math/asinh.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/atan.hpp000066400000000000000000000003401416264035500216530ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_ATAN_HPP #define PYTHONIC_MATH_ATAN_HPP #include "pythonic/include/math/atan.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/atan2.hpp000066400000000000000000000003431416264035500217400ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_ATAN2_HPP #define PYTHONIC_MATH_ATAN2_HPP #include "pythonic/include/math/atan2.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/atanh.hpp000066400000000000000000000003431416264035500220260ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_ATANH_HPP #define PYTHONIC_MATH_ATANH_HPP #include "pythonic/include/math/atanh.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/ceil.hpp000066400000000000000000000004471416264035500216540ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_CEIL_HPP #define PYTHONIC_MATH_CEIL_HPP #include "pythonic/include/math/ceil.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { template long ceil(T x) { return std::ceil(x); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/copysign.hpp000066400000000000000000000003541416264035500225700ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_COPYSIGN_HPP #define PYTHONIC_MATH_COPYSIGN_HPP #include "pythonic/include/math/copysign.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/cos.hpp000066400000000000000000000003351416264035500215200ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_COS_HPP #define PYTHONIC_MATH_COS_HPP #include "pythonic/include/math/cos.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/cosh.hpp000066400000000000000000000003401416264035500216640ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_COSH_HPP #define PYTHONIC_MATH_COSH_HPP #include "pythonic/include/math/cosh.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/degrees.hpp000066400000000000000000000005171416264035500223540ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_DEGREES_HPP #define PYTHONIC_MATH_DEGREES_HPP #include "pythonic/include/math/degrees.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/math/pi.hpp" PYTHONIC_NS_BEGIN namespace math { template double degrees(T x) { return (x * 360.) / (2. * pi); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/e.hpp000066400000000000000000000001501416264035500211530ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_E_HPP #define PYTHONIC_MATH_E_HPP #include "pythonic/include/math/e.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/math/erf.hpp000066400000000000000000000003351416264035500215100ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_ERF_HPP #define PYTHONIC_MATH_ERF_HPP #include "pythonic/include/math/erf.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/erfc.hpp000066400000000000000000000003401416264035500216470ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_ERFC_HPP #define PYTHONIC_MATH_ERFC_HPP #include "pythonic/include/math/erfc.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/exp.hpp000066400000000000000000000003351416264035500215300ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_EXP_HPP #define PYTHONIC_MATH_EXP_HPP #include "pythonic/include/math/exp.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/expm1.hpp000066400000000000000000000003431416264035500217650ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_EXPM1_HPP #define PYTHONIC_MATH_EXPM1_HPP #include "pythonic/include/math/expm1.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/fabs.hpp000066400000000000000000000003401416264035500216430ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_FABS_HPP #define PYTHONIC_MATH_FABS_HPP #include "pythonic/include/math/fabs.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/factorial.hpp000066400000000000000000000005431416264035500227010ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_FACTORIAL_HPP #define PYTHONIC_MATH_FACTORIAL_HPP #include "pythonic/include/math/factorial.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace math { template T factorial(T x) { long res = 1; for (long i = 2; i <= x; i++) res *= i; return res; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/floor.hpp000066400000000000000000000004541416264035500220570ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_FLOOR_HPP #define PYTHONIC_MATH_FLOOR_HPP #include "pythonic/include/math/floor.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { template long floor(T x) { return std::floor(x); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/fmod.hpp000066400000000000000000000003401416264035500216550ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_FMOD_HPP #define PYTHONIC_MATH_FMOD_HPP #include "pythonic/include/math/fmod.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/frexp.hpp000066400000000000000000000006351416264035500220630ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_FREXP_HPP #define PYTHONIC_MATH_FREXP_HPP #include "pythonic/include/math/frexp.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/tuple.hpp" #include PYTHONIC_NS_BEGIN namespace math { std::tuple frexp(double x) { int exp; double sig = std::frexp(x, &exp); return std::tuple(sig, exp); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/gamma.hpp000066400000000000000000000004371416264035500220210ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_GAMMA_HPP #define PYTHONIC_MATH_GAMMA_HPP #include "pythonic/include/math/gamma.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { double gamma(double x) { return std::tgamma(x); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/hypot.hpp000066400000000000000000000003431416264035500220760ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_HYPOT_HPP #define PYTHONIC_MATH_HYPOT_HPP #include "pythonic/include/math/hypot.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/isinf.hpp000066400000000000000000000003431416264035500220430ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_ISINF_HPP #define PYTHONIC_MATH_ISINF_HPP #include "pythonic/include/math/isinf.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/isnan.hpp000066400000000000000000000003431416264035500220430ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_ISNAN_HPP #define PYTHONIC_MATH_ISNAN_HPP #include "pythonic/include/math/isnan.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/ldexp.hpp000066400000000000000000000003431416264035500220470ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_LDEXP_HPP #define PYTHONIC_MATH_LDEXP_HPP #include "pythonic/include/math/ldexp.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/lgamma.hpp000066400000000000000000000003461416264035500221740ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_LGAMMA_HPP #define PYTHONIC_MATH_LGAMMA_HPP #include "pythonic/include/math/lgamma.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/log.hpp000066400000000000000000000004731416264035500215200ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_LOG_HPP #define PYTHONIC_MATH_LOG_HPP #include "pythonic/include/math/log.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { using std::log; double log(double x, double base) { return log(x) / log(base); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/log10.hpp000066400000000000000000000003431416264035500216550ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_LOG10_HPP #define PYTHONIC_MATH_LOG10_HPP #include "pythonic/include/math/log10.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/log1p.hpp000066400000000000000000000003431416264035500217550ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_LOG1P_HPP #define PYTHONIC_MATH_LOG1P_HPP #include "pythonic/include/math/log1p.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/modf.hpp000066400000000000000000000006211416264035500216570ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_MODF_HPP #define PYTHONIC_MATH_MODF_HPP #include "pythonic/include/math/modf.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/tuple.hpp" #include PYTHONIC_NS_BEGIN namespace math { std::tuple modf(double x) { double i; double frac = std::modf(x, &i); return std::make_tuple(frac, i); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/pi.hpp000066400000000000000000000001531416264035500213420ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_PI_HPP #define PYTHONIC_MATH_PI_HPP #include "pythonic/include/math/pi.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/math/pow.hpp000066400000000000000000000003351416264035500215410ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_POW_HPP #define PYTHONIC_MATH_POW_HPP #include "pythonic/include/math/pow.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/radians.hpp000066400000000000000000000005141416264035500223540ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_RADIANS_HPP #define PYTHONIC_MATH_RADIANS_HPP #include "pythonic/include/math/radians.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/math/pi.hpp" PYTHONIC_NS_BEGIN namespace math { template double radians(T x) { return (x * 2. * pi) / 360.; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/sin.hpp000066400000000000000000000003351416264035500215250ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_SIN_HPP #define PYTHONIC_MATH_SIN_HPP #include "pythonic/include/math/sin.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/sinh.hpp000066400000000000000000000003401416264035500216710ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_SINH_HPP #define PYTHONIC_MATH_SINH_HPP #include "pythonic/include/math/sinh.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/sqrt.hpp000066400000000000000000000003401416264035500217210ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_SQRT_HPP #define PYTHONIC_MATH_SQRT_HPP #include "pythonic/include/math/sqrt.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/tan.hpp000066400000000000000000000003351416264035500215160ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_TAN_HPP #define PYTHONIC_MATH_TAN_HPP #include "pythonic/include/math/tan.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/tanh.hpp000066400000000000000000000003401416264035500216620ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_TANH_HPP #define PYTHONIC_MATH_TANH_HPP #include "pythonic/include/math/tanh.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/math/trunc.hpp000066400000000000000000000004401416264035500220640ustar00rootroot00000000000000#ifndef PYTHONIC_MATH_TRUNC_HPP #define PYTHONIC_MATH_TRUNC_HPP #include "pythonic/include/math/trunc.hpp" #include "pythonic/utils/functor.hpp" #include PYTHONIC_NS_BEGIN namespace math { template long trunc(T x) { return x; } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/000077500000000000000000000000001416264035500204415ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/.hpp000066400000000000000000000001501416264035500212250ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY__HPP #define PYTHONIC_NUMPY__HPP #include "pythonic/include/numpy/.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/NINF.hpp000066400000000000000000000001641416264035500217050ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_NINF_HPP #define PYTHONIC_NUMPY_NINF_HPP #include "pythonic/include/numpy/NINF.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/abs.hpp000066400000000000000000000005071416264035500217210ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ABS_HPP #define PYTHONIC_NUMPY_ABS_HPP #include "pythonic/include/numpy/abs.hpp" #include "pythonic/utils/functor.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME abs #define NUMPY_NARY_FUNC_SYM xsimd::abs #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/absolute.hpp000066400000000000000000000002421416264035500227660ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ABSOLUTE_HPP #define PYTHONIC_NUMPY_ABSOLUTE_HPP #include "pythonic/include/numpy/absolute.hpp" #include "pythonic/numpy/abs.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/add.hpp000066400000000000000000000007721416264035500217100ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ADD_HPP #define PYTHONIC_NUMPY_ADD_HPP #include "pythonic/include/numpy/add.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/operator_/add.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME add #define NUMPY_NARY_FUNC_SYM pythonic::operator_::add #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/add/000077500000000000000000000000001416264035500211715ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/add/accumulate.hpp000066400000000000000000000002431416264035500240240ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ADD_ACCUMULATE_HPP #define PYTHONIC_NUMPY_ADD_ACCUMULATE_HPP #define UFUNC_NAME add #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/add/reduce.hpp000066400000000000000000000004061416264035500231510ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ADD_REDUCE_HPP #define PYTHONIC_NUMPY_ADD_REDUCE_HPP #define UFUNC_NAME add #define UFUNC_INAME iadd #include "pythonic/include/numpy/add/reduce.hpp" #include "pythonic/numpy/ufunc_reduce.hpp" #undef UFUNC_NAME #undef UFUNC_INAME #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/alen.hpp000066400000000000000000000005211416264035500220670ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ALEN_HPP #define PYTHONIC_NUMPY_ALEN_HPP #include "pythonic/include/numpy/alen.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template long alen(T &&expr) { return expr.template shape<0>(); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/all.hpp000066400000000000000000000055031416264035500217250ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ALL_HPP #define PYTHONIC_NUMPY_ALL_HPP #include "pythonic/include/numpy/all.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/ValueError.hpp" #include "pythonic/numpy/multiply.hpp" PYTHONIC_NS_BEGIN namespace numpy { template bool _all(E begin, E end, utils::int_<1>) { return std::all_of(begin, end, [](typename std::iterator_traits::value_type e) -> bool { return e; }); } template bool _all(E begin, E end, utils::int_) { for (; begin != end; ++begin) if (!_all((*begin).begin(), (*begin).end(), utils::int_())) return false; return true; } template typename std::enable_if::value, bool>::type all(E const &expr, types::none_type) { return _all(expr.begin(), expr.end(), utils::int_()); } template typename std::enable_if< std::is_scalar::value || types::is_complex::value, bool>::type all(E const &expr, types::none_type) { return expr; } template auto all(E const &array, long axis) -> typename std::enable_if::value || types::is_complex::value, decltype(all(array))>::type { if (axis != 0) throw types::ValueError("axis out of bounds"); return all(array); } template auto all(E const &array, long axis) -> typename std::enable_if::type { if (axis != 0) throw types::ValueError("axis out of bounds"); return all(array); } template typename std::enable_if< E::value != 1, types::ndarray>>::type all(E const &array, long axis) { constexpr long N = E::value; typedef typename E::dtype T; if (axis < 0 || axis >= long(N)) throw types::ValueError("axis out of bounds"); if (axis == 0) { types::array shp; sutils::copy_shape<0, 1>(shp, array, utils::make_index_sequence()); types::ndarray> out(shp, true); return std::accumulate(array.begin(), array.end(), out, functor::multiply()); } else { types::array shp; sutils::copy_shape<0, 0>(shp, array, utils::make_index_sequence()); types::ndarray> ally(shp, builtins::None); std::transform( array.begin(), array.end(), ally.begin(), [=](types::ndarray> const &other) { return all(other, axis - 1); }); return ally; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/allclose.hpp000066400000000000000000000026711416264035500227560ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ALLCLOSE_HPP #define PYTHONIC_NUMPY_ALLCLOSE_HPP #include "pythonic/include/numpy/allclose.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/numpy/abs.hpp" #include "pythonic/numpy/isfinite.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace { template bool _allclose(I0 begin, I0 end, I1 ibegin, double rtol, double atol, utils::int_<1>) { for (; begin != end; ++begin, ++ibegin) { auto u = *begin; auto v = *ibegin; if (((!functor::isfinite()(u) || !functor::isfinite()(v)) && u != v) || // Infinite && NaN cases functor::abs()(u - v) > (atol + rtol * functor::abs()(v))) { return false; } } return true; } template bool _allclose(I0 begin, I0 end, I1 ibegin, double rtol, double atol, utils::int_) { for (; begin != end; ++begin, ++ibegin) if (!_allclose((*begin).begin(), (*begin).end(), (*ibegin).begin(), rtol, atol, utils::int_())) return false; return true; } } template bool allclose(U const &u, V const &v, double rtol, double atol) { return _allclose(u.begin(), u.end(), v.begin(), rtol, atol, utils::int_()); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/alltrue.hpp000066400000000000000000000006031416264035500226210ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ALLTRUE_HPP #define PYTHONIC_NUMPY_ALLTRUE_HPP #include "pythonic/include/numpy/alltrue.hpp" #include "pythonic/numpy/all.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto alltrue(Types &&... types) -> decltype(all(std::forward(types)...)) { return all(std::forward(types)...); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/amax.hpp000066400000000000000000000002261416264035500221000ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_AMAX_HPP #define PYTHONIC_NUMPY_AMAX_HPP #include "pythonic/include/numpy/amax.hpp" #include "pythonic/numpy/max.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/amin.hpp000066400000000000000000000002261416264035500220760ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_AMIN_HPP #define PYTHONIC_NUMPY_AMIN_HPP #include "pythonic/include/numpy/amin.hpp" #include "pythonic/numpy/min.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/angle.hpp000066400000000000000000000014671416264035500222500ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ANGLE_HPP #define PYTHONIC_NUMPY_ANGLE_HPP #include "pythonic/include/numpy/angle.hpp" #include "pythonic/numpy/angle_in_deg.hpp" #include "pythonic/numpy/angle_in_rad.hpp" #include "pythonic/types/assignable.hpp" PYTHONIC_NS_BEGIN namespace numpy { template auto angle(T const &t, bool in_deg) -> typename assignable::type // assignable to find a common type between the two expression templates { if (in_deg) return functor::angle_in_deg()(t); else return functor::angle_in_rad()(t); } // Numpy_expr can be use if only the first argument is given. template auto angle(T const &t) -> decltype(functor::angle_in_rad()(t)) { return functor::angle_in_rad()(t); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/angle_in_deg.hpp000066400000000000000000000011101416264035500235360ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ANGLEINDEG_HPP #define PYTHONIC_NUMPY_ANGLEINDEG_HPP #include "pythonic/include/numpy/angle_in_deg.hpp" #include "pythonic/numpy/angle_in_rad.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/numpy/pi.hpp" /* NOTE: angle_in_deg is not part of the official Numpy API, * this file is here only to split the angle function in two parts */ PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME angle_in_deg #define NUMPY_NARY_FUNC_SYM wrapper::angle_in_deg #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/angle_in_rad.hpp000066400000000000000000000015401416264035500235540ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ANGLEINRAD_HPP #define PYTHONIC_NUMPY_ANGLEINRAD_HPP #include "pythonic/include/numpy/angle_in_rad.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" #include "pythonic/numpy/arctan.hpp" #include "pythonic/numpy/pi.hpp" /* NOTE: angle_in_rad is not part of the official Numpy API, * this file is here only to split the angle function in two parts */ PYTHONIC_NS_BEGIN namespace numpy { namespace wrapper { template auto angle_in_rad(T const &t) -> decltype(std::atan2(std::imag(t), std::real(t))) { return std::atan2(std::imag(t), std::real(t)); } } #define NUMPY_NARY_FUNC_NAME angle_in_rad #define NUMPY_NARY_FUNC_SYM wrapper::angle_in_rad #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/any.hpp000066400000000000000000000053611416264035500217460ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ANY_HPP #define PYTHONIC_NUMPY_ANY_HPP #include "pythonic/include/numpy/any.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/builtins/ValueError.hpp" #include "pythonic/numpy/add.hpp" PYTHONIC_NS_BEGIN namespace numpy { template bool _any(E const &e, utils::int_<1>) { return std::any_of(e.begin(), e.end(), [](typename E::dtype elt) -> bool { return elt; }); } template bool _any(E const &e, utils::int_) { for (auto &&elt : e) if (_any(elt, utils::int_())) { return true; } return false; } template typename std::enable_if::value, bool>::type any(E const &expr, types::none_type) { return _any(expr, utils::int_()); } template typename std::enable_if< std::is_scalar::value || types::is_complex::value, bool>::type any(E const &expr, types::none_type) { return expr; } template auto any(E const &array, long axis) -> typename std::enable_if::value || types::is_complex::value, decltype(any(array))>::type { if (axis != 0) throw types::ValueError("axis out of bounds"); return any(array); } template auto any(E const &array, long axis) -> typename std::enable_if::type { if (axis != 0) throw types::ValueError("axis out of bounds"); return any(array); } template typename std::enable_if< E::value != 1, types::ndarray>>::type any(E const &array, long axis) { constexpr long N = E::value; using T = typename E::dtype; if (axis < 0 || axis >= long(N)) throw types::ValueError("axis out of bounds"); if (axis == 0) { types::array shp; shp[0] = 1; sutils::copy_shape<1, 0>(shp, array, utils::make_index_sequence()); types::ndarray> out(shp, false); return std::accumulate(array.begin(), array.end(), *out.begin(), numpy::functor::add()); } else { types::array shp; sutils::copy_shape<0, 0>(shp, array, utils::make_index_sequence()); types::ndarray> anyy(shp, builtins::None); std::transform( array.begin(), array.end(), anyy.begin(), [=](types::ndarray> const &other) { return any(other, axis - 1); }); return anyy; } } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/append.hpp000066400000000000000000000036501416264035500224250ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_APPEND_HPP #define PYTHONIC_NUMPY_APPEND_HPP #include "pythonic/include/numpy/append.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/numpy/asarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template typename std::enable_if< !types::is_dtype::value, types::ndarray< typename __combined::type>::type, types::pshape>>::type append(types::ndarray const &nto, F const &data) { auto ndata = numpy::functor::asarray{}(data); long nsize = nto.flat_size() + ndata.flat_size(); types::ndarray< typename __combined::type>::type, types::pshape> out(types::pshape(nsize), builtins::None); auto out_back = std::copy(nto.fbegin(), nto.fend(), out.fbegin()); std::copy(ndata.fbegin(), ndata.fend(), out_back); return out; } template typename std::enable_if< types::is_dtype::value, types::ndarray< typename __combined::type>::type, types::pshape>>::type append(types::ndarray const &nto, F const &data) { long nsize = nto.flat_size() + 1; types::ndarray< typename __combined::type>::type, types::pshape> out(types::pshape(nsize), builtins::None); auto out_back = std::copy(nto.fbegin(), nto.fend(), out.fbegin()); *out_back = data; return out; } template types::ndarray::type, typename types::dtype_of::type>::type, types::pshape> append(T const &to, F const &data) { return append(numpy::functor::asarray{}(to), data); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/arange.hpp000066400000000000000000000020211416264035500224020ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ARANGE_HPP #define PYTHONIC_NUMPY_ARANGE_HPP #include "pythonic/include/numpy/arange.hpp" #include "pythonic/operator_/pos.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" PYTHONIC_NS_BEGIN namespace numpy { template types::numpy_expr> arange(T begin, U end, S step, dtype d) { using R = typename dtype::type; long size; if (std::is_integral::value) size = std::max(R(0), R((end - begin + step - 1) / step)); else size = std::max(R(0), R(std::ceil((end - begin) / step))); return {details::arange_index{(R)begin, (R)step, size}}; } template types::numpy_expr::type>> arange(T end) { return arange>(T(0), end); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/arccos.hpp000066400000000000000000000006461416264035500224320ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ARCCOS_HPP #define PYTHONIC_NUMPY_ARCCOS_HPP #include "pythonic/include/numpy/arccos.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME arccos #define NUMPY_NARY_FUNC_SYM xsimd::acos #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/arccosh.hpp000066400000000000000000000006511416264035500225760ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ARCCOSH_HPP #define PYTHONIC_NUMPY_ARCCOSH_HPP #include "pythonic/include/numpy/arccosh.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME arccosh #define NUMPY_NARY_FUNC_SYM xsimd::acosh #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/arcsin.hpp000066400000000000000000000006441416264035500224350ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ARCSIN_HPP #define PYTHONIC_NUMPY_ARCSIN_HPP #include "pythonic/include/numpy/arcsin.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME arcsin #define NUMPY_NARY_FUNC_SYM xsimd::asin #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/arcsinh.hpp000066400000000000000000000006511416264035500226030ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ARCSINH_HPP #define PYTHONIC_NUMPY_ARCSINH_HPP #include "pythonic/include/numpy/arcsinh.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME arcsinh #define NUMPY_NARY_FUNC_SYM xsimd::asinh #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/arctan.hpp000066400000000000000000000007001416264035500224170ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ARCTAN_HPP #define PYTHONIC_NUMPY_ARCTAN_HPP #include "pythonic/include/numpy/arctan.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" #include PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME arctan #define NUMPY_NARY_FUNC_SYM xsimd::atan #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/arctan2.hpp000066400000000000000000000007301416264035500225040ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ARCTAN2_HPP #define PYTHONIC_NUMPY_ARCTAN2_HPP #include "pythonic/include/numpy/arctan2.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/types/numpy_broadcast.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME arctan2 #define NUMPY_NARY_FUNC_SYM xsimd::atan2 #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/arctan2/000077500000000000000000000000001416264035500217735ustar00rootroot00000000000000pythran-0.10.0+ds2/pythran/pythonic/numpy/arctan2/accumulate.hpp000066400000000000000000000002571416264035500246330ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ARCTAN2_ACCUMULATE_HPP #define PYTHONIC_NUMPY_ARCTAN2_ACCUMULATE_HPP #define UFUNC_NAME arctan2 #include "pythonic/numpy/ufunc_accumulate.hpp" #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/arctanh.hpp000066400000000000000000000006511416264035500225740ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ARCTANH_HPP #define PYTHONIC_NUMPY_ARCTANH_HPP #include "pythonic/include/numpy/arctanh.hpp" #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/utils/numpy_traits.hpp" PYTHONIC_NS_BEGIN namespace numpy { #define NUMPY_NARY_FUNC_NAME arctanh #define NUMPY_NARY_FUNC_SYM xsimd::atanh #include "pythonic/types/numpy_nary_expr.hpp" } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/argmax.hpp000066400000000000000000000020101416264035500224220ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ARGMAX_HPP #define PYTHONIC_NUMPY_ARGMAX_HPP #include "pythonic/include/numpy/argmax.hpp" #include "pythonic/numpy/argminmax.hpp" #include "pythonic/numpy/maximum.hpp" PYTHONIC_NS_BEGIN namespace numpy { template struct argmax_op { using op = functor::maximum; using expr_type = E; static typename E::dtype constexpr limit() { return std::numeric_limits::lowest(); } template static T elements(T first, T last) { return std::max_element(first, last); } template static T value(T self, T other) { return self > other; } }; template long argmax(E const &expr) { return argminmax>(expr); } template types::ndarray> argmax(E const &expr, long axis) { return argminmax>(expr, axis); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/argmin.hpp000066400000000000000000000020061416264035500224250ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ARGMIN_HPP #define PYTHONIC_NUMPY_ARGMIN_HPP #include "pythonic/include/numpy/argmin.hpp" #include "pythonic/numpy/argminmax.hpp" #include "pythonic/numpy/minimum.hpp" PYTHONIC_NS_BEGIN namespace numpy { template struct argmin_op { using op = functor::minimum; using expr_type = E; static typename E::dtype constexpr limit() { return std::numeric_limits::max(); } template static T elements(T first, T last) { return std::min_element(first, last); } template static T value(T self, T other) { return self < other; } }; template long argmin(E const &expr) { return argminmax>(expr); } template types::ndarray> argmin(E const &expr, long axis) { return argminmax>(expr, axis); } } PYTHONIC_NS_END #endif pythran-0.10.0+ds2/pythran/pythonic/numpy/argminmax.hpp000066400000000000000000000222651416264035500231440ustar00rootroot00000000000000#ifndef PYTHONIC_NUMPY_ARGMINMAX_HPP #define PYTHONIC_NUMPY_ARGMINMAX_HPP #include "pythonic/utils/functor.hpp" #include "pythonic/types/ndarray.hpp" #include "pythonic/numpy/asarray.hpp" #include "pythonic/builtins/ValueError.hpp" PYTHONIC_NS_BEGIN namespace numpy { namespace details { template P iota(utils::index_sequence) { return {static_cast(Is)...}; } template P iota() { return iota