pax_global_header00006660000000000000000000000064146067072720014525gustar00rootroot0000000000000052 comment=19c63b4d0893cdd8d3d5bb494f19eb96ec335060 mpi4py-3.1.6/000077500000000000000000000000001460670727200127565ustar00rootroot00000000000000mpi4py-3.1.6/.appveyor.yml000066400000000000000000000042501460670727200154250ustar00rootroot00000000000000# https://ci.appveyor.com/project/mpi4py/mpi4py image: Visual Studio 2019 environment: matrix: - PYTHON: "C:\\Python27" APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 - PYTHON: "C:\\Python27-x64" APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 - PYTHON: "C:\\Python35" APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 - PYTHON: "C:\\Python35-x64" APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 - PYTHON: "C:\\Python36" - PYTHON: "C:\\Python36-x64" - PYTHON: "C:\\Python37" - PYTHON: "C:\\Python37-x64" - PYTHON: "C:\\Python38" - PYTHON: "C:\\Python38-x64" - PYTHON: "C:\\Python39" - PYTHON: "C:\\Python39-x64" - PYTHON: "C:\\Python310" - PYTHON: "C:\\Python310-x64" - PYTHON: "C:\\Python311" - PYTHON: "C:\\Python311-x64" clone_depth: 1 branches: only: - master - maint - ci/all - ci/appveyor init: - "ECHO Python from %PYTHON%" install: # Python - "%PYTHON%\\python.exe --version" - "%PYTHON%\\python.exe -m pip install --upgrade pip" - "%PYTHON%\\python.exe -m pip --version" - "%PYTHON%\\python.exe -m pip install --upgrade setuptools" - "%PYTHON%\\python.exe -m pip install --upgrade wheel" - "%PYTHON%\\python.exe -m wheel version" # Cython - "%PYTHON%\\python.exe -m pip install -r conf\\requirements-build-cython.txt" - "%PYTHON%\\python.exe -m cython --version" # Microsoft MPI - "powershell .azure\\install-msmpi.ps1" - "SetEnvMPI.cmd" build: off build_script: - "%PYTHON%\\python.exe setup.py build" - "%PYTHON%\\python.exe setup.py --quiet bdist_wheel" test: off test_script: - "%PYTHON%\\Scripts\\pip.exe install --no-cache-dir --no-index --find-links=dist\\ mpi4py" - "\"%MSMPI_BIN%\\mpiexec.exe\" -n 1 %PYTHON%\\python.exe %CD%\\test\\runtests.py -v -f --no-builddir" - "\"%MSMPI_BIN%\\mpiexec.exe\" -n 1 %PYTHON%\\python.exe %CD%\\demo\\futures\\test_futures.py -v" - "\"%MSMPI_BIN%\\mpiexec.exe\" -n 2 %PYTHON%\\python.exe %CD%\\demo\\futures\\test_futures.py -v" - "%PYTHON%\\Scripts\\pip.exe uninstall --yes mpi4py" artifacts: - path: dist\* #cache: # - C:\Downloads\MSMPI -> .azure\install-msmpi.ps1 # - '%LOCALAPPDATA%\pip\Cache' mpi4py-3.1.6/.azure/000077500000000000000000000000001460670727200141625ustar00rootroot00000000000000mpi4py-3.1.6/.azure/install-mpich.sh000066400000000000000000000002171460670727200172620ustar00rootroot00000000000000#!/bin/bash set -e case `uname` in Linux) set -x; sudo apt install -y mpich libmpich-dev ;; Darwin) set -x; brew install mpich ;; esac mpi4py-3.1.6/.azure/install-msmpi.ps1000066400000000000000000000071731460670727200174100ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com $ErrorActionPreference = "Stop" $MS_DOWNLOAD_URL = "https://download.microsoft.com/download/" $MSMPI_HASH_URL_V0500 = "3/7/6/3764A48C-5C4E-4E4D-91DA-68CED9032EDE/" $MSMPI_HASH_URL_V0600 = "6/4/A/64A7852A-A8C3-476D-908C-30501F761DF3/" $MSMPI_HASH_URL_V0700 = "D/7/B/D7BBA00F-71B7-436B-80BC-4D22F2EE9862/" $MSMPI_HASH_URL_V0710 = "E/8/A/E8A080AF-040D-43FF-97B4-065D4F220301/" $MSMPI_HASH_URL_V0800 = "B/2/E/B2EB83FE-98C2-4156-834A-E1711E6884FB/" $MSMPI_HASH_URL_V0810 = "D/B/B/DBB64BA1-7B51-43DB-8BF1-D1FB45EACF7A/" $MSMPI_HASH_URL_V0900 = "2/E/C/2EC96D7F-687B-4613-80F6-E10F670A2D97/" $MSMPI_HASH_URL_V0901 = "4/A/6/4A6AAED8-200C-457C-AB86-37505DE4C90D/" $MSMPI_HASH_URL_V1000 = "A/E/0/AE002626-9D9D-448D-8197-1EA510E297CE/" $MSMPI_HASH_URL_V1011 = "2/9/e/29efe9b1-16d7-4912-a229-6734b0c4e235/" $MSMPI_HASH_URL_V1012 = "a/5/2/a5207ca5-1203-491a-8fb8-906fd68ae623/" $MSMPI_HASH_URL = $MSMPI_HASH_URL_V1012 $MSMPI_BASE_URL = $MS_DOWNLOAD_URL + $MSMPI_HASH_URL $DOWNLOADS = "C:\Downloads\MSMPI" function Download ($url, $filename, $destdir) { if ($destdir) { $item = New-Item $destdir -ItemType directory -Force $destdir = $item.FullName } else { $destdir = $pwd.Path } $filepath = Join-Path $destdir $filename if (Test-Path $filepath) { Write-Host "Reusing" $filename "from" $destdir return $filepath } Write-Host "Downloading" $filename "from" $url $webclient = New-Object System.Net.WebClient foreach($i in 1..3) { try { $webclient.DownloadFile($url, $filepath) Write-Host "File saved at" $filepath return $filepath } Catch [Exception] { Start-Sleep 1 } } Write-Host "Failed to download" $filename "from" $url return $null } function InstallMicrosoftMPISDK ($baseurl, $filename) { Write-Host "Installing Microsoft MPI SDK" $url = $baseurl + $filename $filepath = Download $url $filename $DOWNLOADS Write-Host "Installing" $filename $prog = "msiexec.exe" $args = "/quiet /qn /i $filepath" Write-Host "Executing:" $prog $args Start-Process -FilePath $prog -ArgumentList $args -Wait Write-Host "Microsoft MPI SDK installation complete" } function InstallMicrosoftMPIRuntime ($baseurl, $filename) { Write-Host "Installing Microsoft MPI Runtime" $url = $baseurl + $filename $filepath = Download $url $filename $DOWNLOADS Write-Host "Installing" $filename $prog = $filepath $args = "-unattend" Write-Host "Executing:" $prog $args Start-Process -FilePath $prog -ArgumentList $args -Wait Write-Host "Microsoft MPI Runtime installation complete" } function SaveMicrosoftMPIEnvironment ($filepath) { Write-Host "Saving Microsoft MPI environment variables to" $filepath $envlist = @("MSMPI_BIN", "MSMPI_INC", "MSMPI_LIB32", "MSMPI_LIB64") $stream = [IO.StreamWriter] $filepath foreach ($variable in $envlist) { $value = [Environment]::GetEnvironmentVariable($variable, "Machine") if ($value) { $stream.WriteLine("SET $variable=$value") } if ($value) { Write-Host "$variable=$value" } } $stream.Close() } function InstallMicrosoftMPI () { InstallMicrosoftMPISDK $MSMPI_BASE_URL "msmpisdk.msi" InstallMicrosoftMPIRuntime $MSMPI_BASE_URL "MSMpiSetup.exe" SaveMicrosoftMPIEnvironment "SetEnvMPI.cmd" $MSMPI_BIN = [Environment]::GetEnvironmentVariable("MSMPI_BIN", "Machine") if ($Env:GITHUB_PATH) { echo "$MSMPI_BIN" >> $Env:GITHUB_PATH } Write-Host "##vso[task.prependpath]$MSMPI_BIN"; } function main () { InstallMicrosoftMPI } main mpi4py-3.1.6/.azure/install-msmpi.sh000066400000000000000000000001131460670727200173020ustar00rootroot00000000000000#!/bin/bash _dir=$(dirname ${BASH_SOURCE[0]}) pwsh $_dir/install-msmpi.ps1 mpi4py-3.1.6/.azure/install-openmpi.sh000066400000000000000000000012311460670727200176260ustar00rootroot00000000000000#!/bin/bash set -e case `uname` in Linux) set -x; sudo apt install -y openmpi-bin libopenmpi-dev ;; Darwin) set -x; brew install openmpi ;; esac openmpi_mca_params=$HOME/.openmpi/mca-params.conf mkdir -p $(dirname $openmpi_mca_params) echo plm=isolated >> $openmpi_mca_params echo rmaps_base_oversubscribe=true >> $openmpi_mca_params echo btl_base_warn_component_unused=false >> $openmpi_mca_params echo btl_vader_single_copy_mechanism=none >> $openmpi_mca_params if [[ `uname` == Darwin ]]; then # open-mpi/ompi#7516 echo gds=hash >> $openmpi_mca_params # open-mpi/ompi#5798 echo btl_vader_backing_directory=/tmp >> $openmpi_mca_params fi mpi4py-3.1.6/.azure/pipelines.yml000066400000000000000000000064101460670727200166760ustar00rootroot00000000000000trigger: batch: false branches: include: - master - maint - ci/all - ci/azure jobs: - job: Linux pool: vmImage: 'Ubuntu-18.04' strategy: matrix: Python35_MPICH: PYTHON_VERSION: '3.5' MPI: 'mpich' Python35_OpenMPI: PYTHON_VERSION: '3.5' MPI: 'openmpi' Python36_MPICH: PYTHON_VERSION: '3.6' MPI: 'mpich' Python36_OpenMPI: PYTHON_VERSION: '3.6' MPI: 'openmpi' Python37_MPICH: PYTHON_VERSION: '3.7' MPI: 'mpich' Python37_OpenMPI: PYTHON_VERSION: '3.7' MPI: 'openmpi' Python38_MPICH: PYTHON_VERSION: '3.8' MPI: 'mpich' Python38_OpenMPI: PYTHON_VERSION: '3.8' MPI: 'openmpi' Python39_MPICH: PYTHON_VERSION: '3.9' MPI: 'mpich' Python39_OpenMPI: PYTHON_VERSION: '3.9' MPI: 'openmpi' Python310_MPICH: PYTHON_VERSION: '3.10' MPI: 'mpich' Python310_OpenMPI: PYTHON_VERSION: '3.10' MPI: 'openmpi' Python311_MPICH: PYTHON_VERSION: '3.11' MPI: 'mpich' Python311_OpenMPI: PYTHON_VERSION: '3.11' MPI: 'openmpi' steps: - bash: echo 127.0.0.1 `hostname` | sudo tee -a /etc/hosts > /dev/null displayName: 'Configure hostname' - template: steps.yml - job: macOS pool: vmImage: 'macOS-11' strategy: matrix: Python35_MPICH: PYTHON_VERSION: '3.5' MPI: 'mpich' Python35_OpenMPI: PYTHON_VERSION: '3.5' MPI: 'openmpi' Python36_MPICH: PYTHON_VERSION: '3.6' MPI: 'mpich' Python36_OpenMPI: PYTHON_VERSION: '3.6' MPI: 'openmpi' Python37_MPICH: PYTHON_VERSION: '3.7' MPI: 'mpich' Python37_OpenMPI: PYTHON_VERSION: '3.7' MPI: 'openmpi' Python38_MPICH: PYTHON_VERSION: '3.8' MPI: 'mpich' Python38_OpenMPI: PYTHON_VERSION: '3.8' MPI: 'openmpi' Python39_MPICH: PYTHON_VERSION: '3.9' MPI: 'mpich' Python39_OpenMPI: PYTHON_VERSION: '3.9' MPI: 'openmpi' Python310_MPICH: PYTHON_VERSION: '3.10' MPI: 'mpich' Python310_OpenMPI: PYTHON_VERSION: '3.10' MPI: 'openmpi' Python311_MPICH: PYTHON_VERSION: '3.11' MPI: 'mpich' Python311_OpenMPI: PYTHON_VERSION: '3.11' MPI: 'openmpi' steps: - bash: echo 127.0.0.1 `hostname` | sudo tee -a /etc/hosts > /dev/null displayName: 'Configure hostname' - template: steps.yml - job: Windows pool: vmImage: 'windows-2019' strategy: matrix: Python35_MSMPI: PYTHON_VERSION: '3.5' Python36_MSMPI: PYTHON_VERSION: '3.6' Python37_MSMPI: PYTHON_VERSION: '3.7' Python38_MSMPI: PYTHON_VERSION: '3.8' Python39_MSMPI: PYTHON_VERSION: '3.9' Python310_MSMPI: PYTHON_VERSION: '3.10' Python311_MSMPI: PYTHON_VERSION: '3.11' variables: MPI: 'msmpi' steps: - template: steps.yml - publish: dist artifact: 'Windows-py$(PYTHON_VERSION)' displayName: 'Publish package artifacts' condition: eq( variables['Agent.OS'], 'Windows_NT' ) mpi4py-3.1.6/.azure/steps.yml000066400000000000000000000023361460670727200160470ustar00rootroot00000000000000steps: - checkout: self clean: true fetchDepth: 1 - bash: source .azure/install-$(MPI).sh displayName: 'Install MPI' - task: UsePythonVersion@0 inputs: versionSpec: $(PYTHON_VERSION) architecture: x64 displayName: 'Use Python $(PYTHON_VERSION)' - script: python -m pip install --upgrade setuptools pip wheel displayName: 'Install packaging tools' - script: python -m pip install -r conf/requirements-build-cython.txt displayName: 'Install build dependencies' - script: python -m pip wheel -vvv --wheel-dir=dist . displayName: 'Build package' - script: python -m pip install --upgrade numpy cffi pyyaml displayName: 'Install test dependencies' - script: python -m pip install --no-index --find-links=dist mpi4py displayName: 'Install package for testing' - script: mpiexec -n 1 python test/runtests.py -v displayName: 'Test package' - script: mpiexec -n 2 python test/runtests.py -v -f -e spawn displayName: 'Test package' - script: mpiexec -n 1 python demo/futures/test_futures.py -v displayName: 'Test subpackage' - script: mpiexec -n 2 python demo/futures/test_futures.py -v displayName: 'Test subpackage' - script: python -m pip uninstall --yes mpi4py displayName: 'Uninstall package after testing' mpi4py-3.1.6/.circleci/000077500000000000000000000000001460670727200146115ustar00rootroot00000000000000mpi4py-3.1.6/.circleci/config.yml000066400000000000000000000016451460670727200166070ustar00rootroot00000000000000version: 2.1 jobs: test: parameters: os: type: executor py: type: string mpi: type: string executor: << parameters.os >> steps: - checkout - run: .circleci/test-package py=<< parameters.py >> mpi=<< parameters.mpi >> - run: .circleci/upload-coverage executors: linux: docker: - image: condaforge/linux-anvil-comp7 workflows: test-all: jobs: - test: filters: branches: only: - master - maint - ci/all - ci/circle matrix: parameters: os: - linux py: - "3.5" - "3.6" - "3.7" - "3.8" - "3.9" #- "3.10" mpi: - "mpich" - "openmpi" mpi4py-3.1.6/.circleci/test-package000077500000000000000000000005761460670727200171170ustar00rootroot00000000000000#!/bin/bash set -e export ANACONDA=/opt/conda export HYDRA_LAUNCHER=fork export OMPI_MCA_plm=isolated export OMPI_MCA_rmaps_base_oversubscribe=true export OMPI_MCA_btl_base_warn_component_unused=false export OMPI_MCA_btl_vader_single_copy_mechanism=none export OMPI_ALLOW_RUN_AS_ROOT=1 export OMPI_ALLOW_RUN_AS_ROOT_CONFIRM=1 source conf/ci/anaconda.sh test-package coverage=yes $@ mpi4py-3.1.6/.circleci/upload-coverage000077500000000000000000000002001460670727200176040ustar00rootroot00000000000000#!/bin/bash RUN() { echo + $@; $@; } RUN curl -s -o codecov.sh https://codecov.io/bash RUN bash codecov.sh -X gcov -X py -X fix mpi4py-3.1.6/.github/000077500000000000000000000000001460670727200143165ustar00rootroot00000000000000mpi4py-3.1.6/.github/workflows/000077500000000000000000000000001460670727200163535ustar00rootroot00000000000000mpi4py-3.1.6/.github/workflows/ci.yml000066400000000000000000000101761460670727200174760ustar00rootroot00000000000000name: ci on: push: branches: - master - maint - ci/all - ci/github pull_request: branches: - master - maint workflow_dispatch: jobs: build: runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: py-version: #- "2.7" - "3.5" - "3.6" - "3.7" - "3.8" - "3.9" - "3.10" - "3.11" - "3.12" #- "pypy-2.7" - "pypy-3.6" - "pypy-3.7" - "pypy-3.8" - "pypy-3.9" py-arch: - x64 mpi: - mpich - openmpi - msmpi os: - ubuntu-20.04 - macos-11 - windows-2019 exclude: - os: ubuntu-20.04 mpi: msmpi - os: macos-11 py-version: pypy-2.7 - os: macos-11 py-version: pypy-3.6 - os: macos-11 py-version: pypy-3.7 - os: macos-11 mpi: msmpi - os: windows-2019 mpi: mpich - os: windows-2019 mpi: openmpi - os: windows-2019 py-version: pypy-2.7 steps: - name: Configure hostname if: runner.os == 'Linux' || runner.os == 'macOS' run: echo 127.0.0.1 `hostname` | sudo tee -a /etc/hosts > /dev/null - name: Activate MSVC if: runner.os == 'Windows' && matrix.py-version == '2.7' uses: ilammy/msvc-dev-cmd@v1 with: arch: ${{ matrix.py-arch }} - name: Configure MSVC if: runner.os == 'Windows' && matrix.py-version == '2.7' run: | Add-Content $Env:GITHUB_ENV MSSdk=1 Add-Content $Env:GITHUB_ENV DISTUTILS_USE_SDK=1 - name: Checkout uses: actions/checkout@v2 - name: Setup MPI (${{ matrix.mpi }}) uses: mpi4py/setup-mpi@v1 with: mpi: ${{ matrix.mpi }} - name: Use Python ${{ matrix.py-version }} ${{ matrix.py-arch }} uses: actions/setup-python@v2 with: python-version: ${{ matrix.py-version }} architecture: ${{ matrix.py-arch }} - name: Install packaging tools run: python -m pip install --upgrade setuptools pip wheel - name: Install build dependencies run: python -m pip install -r conf/requirements-build-cython.txt - name: Build package run: python -m pip wheel -vvv --wheel-dir=dist . - name: Upload package artifacts uses: actions/upload-artifact@v2 with: name: mpi4py-${{matrix.mpi}}-${{matrix.os}} path: dist - name: Install test dependencies run: python -m pip install --upgrade numpy cffi pyyaml - name: Install package for testing run: python -m pip install --no-index --find-links=dist mpi4py - name: Test package run: mpiexec -n 1 python test/runtests.py -v - name: Test package run: mpiexec -n 2 python test/runtests.py -v -f -e spawn - name: Test subpackage run: mpiexec -n 1 python demo/futures/test_futures.py -v - name: Test subpackage run: mpiexec -n 2 python demo/futures/test_futures.py -v - name: Uninstall package after testing run: python -m pip uninstall --yes mpi4py lint: runs-on: ubuntu-20.04 steps: - name: Checkout uses: actions/checkout@v2 - name: Use Python uses: actions/setup-python@v2 with: python-version: 3 - name: Install package run: python -m pip install . env: MPICFG: nompi - name: Install lint dependencies run: python -m pip install pycodestyle pydocstyle flake8 pylint mypy - name: pycodestyle run: pycodestyle src/mpi4py - name: pydocstyle run: pydocstyle src/mpi4py - name: flake8 run: flake8 src/mpi4py - name: pylint run: pylint mpi4py #- name: mypy (stubtest) # run: stubtest mpi4py # --mypy-config-file=conf/mypy.ini # --allowlist=conf/mypy.stubtest.allow.txt - name: mypy (typecheck) run: mypy -p mpi4py --config-file=conf/mypy.ini mpi4py-3.1.6/.gitignore000066400000000000000000000003601460670727200147450ustar00rootroot00000000000000build docs/*.html docs/*.pdf docs/*.info docs/*.[137] docs/apiref docs/usrman dist MANIFEST *.egg-info .eggs __pycache__ *.pyc *.pyo .tox #src/mpi4py.MPI.c #src/mpi4py/include/mpi4py/mpi4py.MPI.h #src/mpi4py/include/mpi4py/mpi4py.MPI_api.h mpi4py-3.1.6/.mailmap000066400000000000000000000002131460670727200143730ustar00rootroot00000000000000Lisandro Dalcin Lisandro Dalcin mpi4py-3.1.6/.pylintrc000066400000000000000000000003321460670727200146210ustar00rootroot00000000000000[MASTER] disable = locally-disabled,file-ignored,no-else-return,consider-using-f-string,use-dict-literal good-names = i,j,k,_,fn reports = no extension-pkg-allow-list = mpi4py, array, _socket, _struct, mpi4py-3.1.6/.readthedocs.yaml000066400000000000000000000004461460670727200162110ustar00rootroot00000000000000# https://readthedocs.org/projects/mpi4py/builds/ version: 2 sphinx: configuration: docs/source/usrman/conf.py fail_on_warning: true formats: all build: os: ubuntu-22.04 tools: python: "3.12" python: install: - path: . - requirements: conf/requirements-docs.txt mpi4py-3.1.6/.travis.yml000066400000000000000000000027571460670727200151020ustar00rootroot00000000000000# https://app.travis-ci.com/mpi4py/mpi4py language: python python: - "2.7" - "3.5" - "3.6" - "3.7" - "3.8" - "3.9" #- "3.10" arch: - arm64 - ppc64le os: linux dist: xenial sudo: required env: global: - HYDRA_LAUNCHER=fork - OMPI_MCA_plm=isolated - OMPI_MCA_rmaps_base_oversubscribe=true matrix: - MPI=mpich - MPI=openmpi branches: only: - master - maint - ci/all - ci/travis git: depth: 3 cache: apt: true addons: apt: update: true homebrew: update: true before_install: - python -m pip install -r conf/requirements-build-cython.txt - python -m pip install numpy - source .azure/install-$MPI.sh - python --version - python -m cython --version - python -c "import numpy;print(numpy.__version__)" - if [[ "$MPI" == "mpich" ]]; then mpichversion; fi - if [[ "$MPI" == "openmpi" ]]; then ompi_info; fi install: - python -m pip -vvv install . before_script: - echo 127.0.0.1 `hostname` | sudo tee -a /etc/hosts > /dev/null - if [[ "$MPI" == "mpich" ]]; then P=2; else P=5; fi script: - mpiexec -n 1 python $PWD/test/runtests.py -v - mpiexec -n $P python $PWD/test/runtests.py -v -f -e spawn - mpiexec -n 1 python $PWD/demo/futures/test_futures.py -v - mpiexec -n $P python $PWD/demo/futures/test_futures.py -v - mpiexec -n 1 python -m mpi4py.futures $PWD/demo/futures/test_futures.py -v - mpiexec -n $P python -m mpi4py.futures $PWD/demo/futures/test_futures.py -v #notifications: # email: false mpi4py-3.1.6/CHANGES.rst000066400000000000000000000415551460670727200145720ustar00rootroot00000000000000======================= CHANGES: MPI for Python ======================= :Author: Lisandro Dalcin :Contact: dalcinl@gmail.com Release 3.1.6 [2024-04-14] ========================== .. warning:: This is the last release supporting Python 2. * Fix various build issues. Release 3.1.5 [2023-10-04] ========================== .. warning:: This is the last release supporting Python 2. * Rebuild C sources with Cython 0.29.36 to support Python 3.12. Release 3.1.4 [2022-11-02] ========================== .. warning:: This is the last release supporting Python 2. * Rebuild C sources with Cython 0.29.32 to support Python 3.11. * Fix contiguity check for DLPack and CAI buffers. * Workaround build failures with setuptools v60. Release 3.1.3 [2021-11-25] ========================== .. warning:: This is the last release supporting Python 2. * Add missing support for `MPI.BOTTOM` to generalized all-to-all collectives. Release 3.1.2 [2021-11-04] ========================== .. warning:: This is the last release supporting Python 2. * `mpi4py.futures`: Add `_max_workers` property to `MPIPoolExecutor`. * `mpi4py.util.dtlib`: Fix computation of alignment for predefined datatypes. * `mpi4py.util.pkl5`: Fix deadlock when using ``ssend()`` + ``mprobe()``. * `mpi4py.util.pkl5`: Add environment variable `MPI4PY_PICKLE_THRESHOLD`. * `mpi4py.rc`: Interpret ``"y"`` and ``"n"`` strings as boolean values. * Fix/add typemap/typestr for `MPI.WCHAR`/`MPI.COUNT` datatypes. * Minor fixes and additions to documentation. * Minor fixes to typing support. * Support for local version identifier (PEP-440). Release 3.1.1 [2021-08-14] ========================== .. warning:: This is the last release supporting Python 2. * Fix typo in Requires-Python package metadata. * Regenerate C sources with Cython 0.29.24. Release 3.1.0 [2021-08-12] ========================== .. warning:: This is the last release supporting Python 2. * New features: + `mpi4py.util`: New package collecting miscellaneous utilities. * Enhancements: + Add pickle-based ``Request.waitsome()`` and ``Request.testsome()``. + Add lowercase methods ``Request.get_status()`` and ``Request.cancel()``. + Support for passing Python GPU arrays compliant with the `DLPack`_ data interchange mechanism (`link `_) and the ``__cuda_array_interface__`` (CAI) standard (`link `_) to uppercase methods. This support requires that mpi4py is built against `CUDA-aware MPI `_ implementations. This feature is currently experimental and subject to future changes. + `mpi4py.futures`: Add support for initializers and canceling futures at shutdown. Environment variables names now follow the pattern ``MPI4PY_FUTURES_*``, the previous ``MPI4PY_*`` names are deprecated. + Add type annotations to Cython code. The first line of the docstring of functions and methods displays a signature including type annotations. + Add companion stub files to support type checkers. + Support for weak references. * Miscellaneous: + Add a new mpi4py publication (`link `_) to the citation listing. .. _DLPack: https://github.com/dmlc/dlpack .. _DIM: https://data-apis.org/array-api/latest/design_topics/data_interchange.html .. _CAI: https://numba.readthedocs.io/en/stable/cuda/cuda_array_interface.html .. _CAM: https://developer.nvidia.com/blog/introduction-cuda-aware-mpi/ .. _DOI: https://doi.org/10.1109/MCSE.2021.3083216 Release 3.0.3 [2019-11-04] ========================== * Regenerate Cython wrappers to support Python 3.8. Release 3.0.2 [2019-06-11] ========================== * Bug fixes: + Fix handling of readonly buffers in support for Python 2 legacy buffer interface. The issue triggers only when using a buffer-like object that is readonly and does not export the new Python 3 buffer interface. + Fix build issues with Open MPI 4.0.x series related to removal of many MPI-1 symbols deprecated in MPI-2 and removed in MPI-3. + Minor documentation fixes. Release 3.0.1 [2019-02-15] ========================== * Bug fixes: + Fix ``Comm.scatter()`` and other collectives corrupting input send list. Add safety measures to prevent related issues in global reduction operations. + Fix error-checking code for counts in ``Op.Reduce_local()``. * Enhancements: + Map size-specific Python/NumPy typecodes to MPI datatypes. + Allow partial specification of target list/tuple arguments in the various ``Win`` RMA methods. + Workaround for removal of ``MPI_{LB|UB}`` in Open MPI 4.0. + Support for Microsoft MPI v10.0. Release 3.0.0 [2017-11-08] ========================== * New features: + `mpi4py.futures`: Execute computations asynchronously using a pool of MPI processes. This package is based on ``concurrent.futures`` from the Python standard library. + `mpi4py.run`: Run Python code and abort execution in case of unhandled exceptions to prevent deadlocks. + `mpi4py.bench`: Run basic MPI benchmarks and tests. * Enhancements: + Lowercase, pickle-based collective communication calls are now thread-safe through the use of fine-grained locking. + The ``MPI`` module now exposes a ``memory`` type which is a lightweight variant of the builtin ``memoryview`` type, but exposes both the legacy Python 2 and the modern Python 3 buffer interface under a Python 2 runtime. + The ``MPI.Comm.Alltoallw()`` method now uses ``count=1`` and ``displ=0`` as defaults, assuming that messages are specified through user-defined datatypes. + The ``Request.Wait[all]()`` methods now return ``True`` to match the interface of ``Request.Test[all]()``. + The ``Win`` class now implements the Python buffer interface. * Backward-incompatible changes: + The ``buf`` argument of the ``MPI.Comm.recv()`` method is deprecated, passing anything but ``None`` emits a warning. + The ``MPI.Win.memory`` property was removed, use the ``MPI.Win.tomemory()`` method instead. + Executing ``python -m mpi4py`` in the command line is now equivalent to ``python -m mpi4py.run``. For the former behavior, use ``python -m mpi4py.bench``. + Python 2.6 and 3.2 are no longer supported. The ``mpi4py.MPI`` module may still build and partially work, but other pure-Python modules under the ``mpi4py`` namespace will not. + Windows: Remove support for legacy MPICH2, Open MPI, and DeinoMPI. Release 2.0.0 [2015-10-18] ========================== * Support for MPI-3 features. + Matched probes and receives. + Nonblocking collectives. + Neighborhood collectives. + New communicator constructors. + Request-based RMA operations. + New RMA communication and synchronisation calls. + New window constructors. + New datatype constructor. + New C++ boolean and floating complex datatypes. * Support for MPI-2 features not included in previous releases. + Generalized All-to-All collective (``Comm.Alltoallw()``) + User-defined data representations (``Register_datarep()``) * New scalable implementation of reduction operations for Python objects. This code is based on binomial tree algorithms using point-to-point communication and duplicated communicator contexts. To disable this feature, use ``mpi4py.rc.fast_reduce = False``. * Backward-incompatible changes: + Python 2.4, 2.5, 3.0 and 3.1 are no longer supported. + Default MPI error handling policies are overriden. After import, mpi4py sets the ``ERRORS_RETURN`` error handler in ``COMM_SELF`` and ``COMM_WORLD``, as well as any new ``Comm``, ``Win``, or ``File`` instance created through mpi4py, thus effectively ignoring the MPI rules about error handler inheritance. This way, MPI errors translate to Python exceptions. To disable this behavior and use the standard MPI error handling rules, use ``mpi4py.rc.errors = 'default'``. + Change signature of all send methods, ``dest`` is a required argument. + Change signature of all receive and probe methods, ``source`` defaults to ``ANY_SOURCE``, ``tag`` defaults to ``ANY_TAG``. + Change signature of send lowercase-spelling methods, ``obj`` arguments are not mandatory. + Change signature of recv lowercase-spelling methods, renamed 'obj' arguments to 'buf'. + Change ``Request.Waitsome()`` and ``Request.Testsome()`` to return ``None`` or ``list``. + Change signature of all lowercase-spelling collectives, ``sendobj`` arguments are now mandatory, ``recvobj`` arguments were removed. + Reduction operations ``MAXLOC`` and ``MINLOC`` are no longer special-cased in lowercase-spelling methods ``Comm.[all]reduce()`` and ``Comm.[ex]scan()``, the input object must be specified as a tuple ``(obj, location)``. + Change signature of name publishing functions. The new signatures are ``Publish_name(service_name, port_name, info=INFO_NULL)`` and ``Unpublish_name(service_name, port_name, info=INFO_NULL)```. + ``Win`` instances now cache Python objects exposing memory by keeping references instead of using MPI attribute caching. + Change signature of ``Win.Lock()``. The new signature is ``Win.Lock(rank, lock_type=LOCK_EXCLUSIVE, assertion=0)``. + Move ``Cartcomm.Map()`` to ``Intracomm.Cart_map()``. + Move ``Graphcomm.Map()`` to ``Intracomm.Graph_map()``. + Remove the ``mpi4py.MPE`` module. + Rename the Cython definition file for use with ``cimport`` statement from ``mpi_c.pxd`` to ``libmpi.pxd``. Release 1.3.1 [2013-08-07] ========================== * Regenerate C wrappers with Cython 0.19.1 to support Python 3.3. * Install ``*.pxd`` files in ``/mpi4py`` to ease the support for Cython's ``cimport`` statement in code requiring to access mpi4py internals. * As a side-effect of using Cython 0.19.1, ancient Python 2.3 is no longer supported. If you really need it, you can install an older Cython and run ``python setup.py build_src --force``. Release 1.3 [2012-01-20] ======================== * Now ``Comm.recv()`` accept a buffer to receive the message. * Add ``Comm.irecv()`` and ``Request.{wait|test}[any|all]()``. * Add ``Intracomm.Spawn_multiple()``. * Better buffer handling for PEP 3118 and legacy buffer interfaces. * Add support for attribute attribute caching on communicators, datatypes and windows. * Install MPI-enabled Python interpreter as ``/mpi4py/bin/python-mpi``. * Windows: Support for building with Open MPI. Release 1.2.2 [2010-09-13] ========================== * Add ``mpi4py.get_config()`` to retrieve information (compiler wrappers, includes, libraries, etc) about the MPI implementation employed to build mpi4py. * Workaround Python libraries with missing GILState-related API calls in case of non-threaded Python builds. * Windows: look for MPICH2, DeinoMPI, Microsoft HPC Pack at their default install locations under %ProgramFiles. * MPE: fix hacks related to old API's, these hacks are broken when MPE is built with a MPI implementations other than MPICH2. * HP-MPI: fix for missing Fortran datatypes, use dlopen() to load the MPI shared library before MPI_Init() * Many distutils-related fixes, cleanup, and enhancements, better logics to find MPI compiler wrappers. * Support for ``pip install mpi4py``. Release 1.2.1 [2010-02-26] ========================== * Fix declaration in Cython include file. This declaration, while valid for Cython, broke the simple-minded parsing used in conf/mpidistutils.py to implement configure-tests for availability of MPI symbols. * Update SWIG support and make it compatible with Python 3. Also generate an warning for SWIG < 1.3.28. * Fix distutils-related issues in Mac OS X. Now ARCHFLAGS environment variable is honored of all Python's ``config/Makefile`` variables. * Fix issues with Open MPI < 1.4.2 releated to error checking and ``MPI_XXX_NULL`` handles. Release 1.2 [2009-12-29] ======================== * Automatic MPI datatype discovery for NumPy arrays and PEP-3118 buffers. Now buffer-like objects can be messaged directly, it is no longer required to explicitly pass a 2/3-list/tuple like ``[data, MPI.DOUBLE]``, or ``[data, count, MPI.DOUBLE]``. Only basic types are supported, i.e., all C/C99-native signed/unsigned integral types and single/double precision real/complex floating types. Many thanks to Eilif Muller for the initial feedback. * Nonblocking send of pickled Python objects. Many thanks to Andreas Kloeckner for the initial patch and enlightening discussion about this enhancement. * ``Request`` instances now hold a reference to the Python object exposing the buffer involved in point-to-point communication or parallel I/O. Many thanks to Andreas Kloeckner for the initial feedback. * Support for logging of user-defined states and events using `MPE `_. Runtime (i.e., without requiring a recompile!) activation of logging of all MPI calls is supported in POSIX platforms implementing ``dlopen()``. * Support for all the new features in MPI-2.2 (new C99 and F90 datatypes, distributed graph topology, local reduction operation, and other minor enhancements). * Fix the annoying issues related to Open MPI and Python dynamic loading of extension modules in platforms supporting ``dlopen()``. * Fix SLURM dynamic loading issues on SiCortex. Many thanks to Ian Langmore for providing me shell access. Release 1.1.0 [2009-06-06] ========================== * Fix bug in ``Comm.Iprobe()`` that caused segfaults as Python C-API calls were issued with the GIL released (issue #2). * Add ``Comm.bsend()`` and ``Comm.ssend()`` for buffered and synchronous send semantics when communicating general Python objects. * Now the call ``Info.Get(key)`` return a *single* value (i.e, instead of a 2-tuple); this value is ``None`` if ``key`` is not in the ``Info`` object, or a string otherwise. Previously, the call redundantly returned ``(None, False)`` for missing key-value pairs; ``None`` is enough to signal a missing entry. * Add support for parametrized Fortran datatypes. * Add support for decoding user-defined datatypes. * Add support for user-defined reduction operations on memory buffers. However, at most 16 user-defined reduction operations can be created. Ask the author for more room if you need it. Release 1.0.0 [2009-03-20] ========================== This is the fist release of the all-new, Cython-based, implementation of *MPI for Python*. Unfortunately, this implementation is not backward-compatible with the previous one. The list below summarizes the more important changes that can impact user codes. * Some communication calls had *overloaded* functionality. Now there is a clear distinction between communication of general Python object with *pickle*, and (fast, near C-speed) communication of buffer-like objects (e.g., NumPy arrays). - for communicating general Python objects, you have to use all-lowercase methods, like ``send()``, ``recv()``, ``bcast()``, etc. - for communicating array data, you have to use ``Send()``, ``Recv()``, ``Bcast()``, etc. methods. Buffer arguments to these calls must be explicitly specified by using a 2/3-list/tuple like ``[data, MPI.DOUBLE]``, or ``[data, count, MPI.DOUBLE]`` (the former one uses the byte-size of ``data`` and the extent of the MPI datatype to define the ``count``). * Indexing a communicator with an integer returned a special object associating the communication with a target rank, alleviating you from specifying source/destination/root arguments in point-to-point and collective communications. This functionality is no longer available, expressions like:: MPI.COMM_WORLD[0].Send(...) MPI.COMM_WORLD[0].Recv(...) MPI.COMM_WORLD[0].Bcast(...) have to be replaced by:: MPI.COMM_WORLD.Send(..., dest=0) MPI.COMM_WORLD.Recv(..., source=0) MPI.COMM_WORLD.Bcast(..., root=0) * Automatic MPI initialization (i.e., at import time) requests the maximum level of MPI thread support (i.e., it is done by calling ``MPI_Init_thread()`` and passing ``MPI_THREAD_MULTIPLE``). In case you need to change this behavior, you can tweak the contents of the ``mpi4py.rc`` module. * In order to obtain the values of predefined attributes attached to the world communicator, now you have to use the ``Get_attr()`` method on the ``MPI.COMM_WORLD`` instance:: tag_ub = MPI.COMM_WORLD.Get_attr(MPI.TAG_UB) * In the previous implementation, ``MPI.COMM_WORLD`` and ``MPI.COMM_SELF`` were associated to **duplicates** of the (C-level) ``MPI_COMM_WORLD`` and ``MPI_COMM_SELF`` predefined communicator handles. Now this is no longer the case, ``MPI.COMM_WORLD`` and ``MPI.COMM_SELF`` proxies the **actual** ``MPI_COMM_WORLD`` and ``MPI_COMM_SELF`` handles. * Convenience aliases ``MPI.WORLD`` and ``MPI.SELF`` were removed. Use instead ``MPI.COMM_WORLD`` and ``MPI.COMM_SELF``. * Convenience constants ``MPI.WORLD_SIZE`` and ``MPI.WORLD_RANK`` were removed. Use instead ``MPI.COMM_WORLD.Get_size()`` and ``MPI.COMM_WORLD.Get_rank()``. mpi4py-3.1.6/DESCRIPTION.rst000066400000000000000000000127521460670727200153020ustar00rootroot00000000000000MPI for Python ============== This package provides Python bindings for the *Message Passing Interface* (MPI_) standard. It is implemented on top of the MPI specification and exposes an API which grounds on the standard MPI-2 C++ bindings. .. _MPI: https://www.mpi-forum.org Features -------- This package supports: * Convenient communication of any *picklable* Python object + point-to-point (send & receive) + collective (broadcast, scatter & gather, reductions) * Fast communication of Python object exposing the *Python buffer interface* (NumPy arrays, builtin bytes/string/array objects) + point-to-point (blocking/nonbloking/persistent send & receive) + collective (broadcast, block/vector scatter & gather, reductions) * Process groups and communication domains + Creation of new intra/inter communicators + Cartesian & graph topologies * Parallel input/output: + read & write + blocking/nonbloking & collective/noncollective + individual/shared file pointers & explicit offset * Dynamic process management + spawn & spawn multiple + accept/connect + name publishing & lookup * One-sided operations + remote memory access (put, get, accumulate) + passive target syncronization (start/complete & post/wait) + active target syncronization (lock & unlock) Install ------- You can install mpi4py from its source distribution using ``pip``:: $ python -m pip install mpi4py You can also install the in-development version with:: $ python -m pip install git+https://github.com/mpi4py/mpi4py or:: $ python -m pip install https://github.com/mpi4py/mpi4py/tarball/master Installing from source requires compilers and a working MPI implementation. The ``mpicc`` compiler wrapper is looked for on the executable search path (``PATH`` environment variable). Alternatively, you can set the ``MPICC`` environment variable to the full path or command corresponding to the MPI-aware C compiler. The **conda-forge** community provides ready-to-use binary packages from an ever growing collection of software libraries built around the multi-platform *conda* package manager. Three MPI implementations are available on conda-forge: Open MPI (Linux and macOS), MPICH (Linux and macOS), and Microsoft MPI (Windows). You can install mpi4py and your preferred MPI implementation using ``conda``:: * to use MPICH do:: $ conda install -c conda-forge mpi4py mpich * to use Open MPI do:: $ conda install -c conda-forge mpi4py openmpi * to use Microsoft MPI do:: $ conda install -c conda-forge mpi4py msmpi MPICH and many of its derivatives are ABI-compatible. You can provide the package specification ``mpich=X.Y.*=external_*`` (where ``X`` and ``Y`` are the major and minor version numbers) to request the conda package manager to use system-provided MPICH (or derivative) libraries. The ``openmpi`` package on conda-forge has built-in CUDA support, but it is disabled by default. To enable it, follow the instruction outlined during ``conda install``. Additionally, UCX support is also available once the ``ucx`` package is installed. On **Fedora Linux** systems (as well as **RHEL** and their derivatives using the EPEL software repository), you can install binary packages with the system package manager:: * using ``dnf`` and the ``mpich`` package:: $ sudo dnf install python3-mpi4py-mpich * using ``dnf`` and the ``openmpi`` package:: $ sudo dnf install python3-mpi4py-openmpi Please remember to load the correct MPI module for your chosen MPI implementation * for the ``mpich`` package do:: $ module load mpi/mpich-$(arch) $ python -c "from mpi4py import MPI" * for the ``openmpi`` package do:: $ module load mpi/openmpi-$(arch) $ python -c "from mpi4py import MPI" On **Ubuntu Linux** and **Debian Linux** systems, binary packages are available for installation using the system package manager:: $ sudo apt install python3-mpi4py Note that on Ubuntu/Debian systems, the mpi4py package uses Open MPI. To use MPICH, install the ``libmpich-dev`` and ``python3-dev`` packages (and any other required development tools). Afterwards, install mpi4py from sources using ``pip``. **macOS** users can install mpi4py using the Homebrew package manager:: $ brew install mpi4py Note that the Homebrew mpi4py package uses Open MPI. Alternatively, install the ``mpich`` package and next install mpi4py from sources using ``pip``. **Windows** users can install mpi4py from binary wheels hosted on the Python Package Index (PyPI) using ``pip``:: $ python -m pip install mpi4py Windows wheels require a separate, system-wide installation of the Microsoft MPI runtime. Citations --------- If MPI for Python been significant to a project that leads to an academic publication, please acknowledge that fact by citing the project. * L. Dalcin and Y.-L. L. Fang, *mpi4py: Status Update After 12 Years of Development*, Computing in Science & Engineering, 23(4):47-54, 2021. https://doi.org/10.1109/MCSE.2021.3083216 * L. Dalcin, P. Kler, R. Paz, and A. Cosimo, *Parallel Distributed Computing using Python*, Advances in Water Resources, 34(9):1124-1139, 2011. https://doi.org/10.1016/j.advwatres.2011.04.013 * L. Dalcin, R. Paz, M. Storti, and J. D'Elia, *MPI for Python: performance improvements and MPI-2 extensions*, Journal of Parallel and Distributed Computing, 68(5):655-662, 2008. https://doi.org/10.1016/j.jpdc.2007.09.005 * L. Dalcin, R. Paz, and M. Storti, *MPI for Python*, Journal of Parallel and Distributed Computing, 65(9):1108-1115, 2005. https://doi.org/10.1016/j.jpdc.2005.03.010 mpi4py-3.1.6/LICENSE.rst000066400000000000000000000026231460670727200145750ustar00rootroot00000000000000======================= LICENSE: MPI for Python ======================= :Author: Lisandro Dalcin :Contact: dalcinl@gmail.com Copyright (c) 2024, Lisandro Dalcin. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. mpi4py-3.1.6/MANIFEST.in000066400000000000000000000011451460670727200145150ustar00rootroot00000000000000include setup.py pyproject.toml *.cfg *.rst exclude .* recursive-include demo *.py *.pyx *.i *.h *.c *.cxx *.f90 *.f08 recursive-include demo [M,m]akefile python-config *.sh *.txt *.bat recursive-include conf *.py *.sh *.txt *.h *.cfg *.conf *.ini *.bat recursive-include src *.py py.typed *.pyi *.pyx *.px[di] recursive-include src *.pth *.h *.c *.i recursive-include test *.py include docs/*.html include docs/*.pdf include docs/*.info include docs/*.[137] include docs/*.rst include docs/*.bib graft docs/usrman graft docs/apiref graft docs/source prune docs/source/usrman/reference prune conf/ci mpi4py-3.1.6/README.rst000066400000000000000000000052621460670727200144520ustar00rootroot00000000000000============== MPI for Python ============== .. image:: https://github.com/mpi4py/mpi4py/workflows/ci/badge.svg?branch=master :target: https://github.com/mpi4py/mpi4py/actions/ .. image:: https://readthedocs.org/projects/mpi4py/badge/?version=latest :target: https://mpi4py.readthedocs.io/en/latest/ .. image:: https://dev.azure.com/mpi4py/mpi4py/_apis/build/status/ci?branchName=master :target: https://dev.azure.com/mpi4py/mpi4py/_build .. image:: https://ci.appveyor.com/api/projects/status/whh5xovp217h0f7n?svg=true :target: https://ci.appveyor.com/project/mpi4py/mpi4py .. image:: https://circleci.com/gh/mpi4py/mpi4py.svg?style=shield :target: https://circleci.com/gh/mpi4py/mpi4py .. image:: https://app.travis-ci.com/mpi4py/mpi4py.svg?branch=master :target: https://app.travis-ci.com/mpi4py/mpi4py .. image:: https://codecov.io/gh/mpi4py/mpi4py/branch/master/graph/badge.svg :target: https://codecov.io/gh/mpi4py/mpi4py .. image:: https://scan.coverity.com/projects/mpi4py-mpi4py/badge.svg :target: https://scan.coverity.com/projects/mpi4py-mpi4py Overview -------- This package provides Python bindings for the *Message Passing Interface* (`MPI `_) standard. It is implemented on top of the MPI specification and exposes an API which grounds on the standard MPI-2 C++ bindings. Dependencies ------------ * `Python `_ 2.7, 3.5 or above, or `PyPy `_ 2.0 or above. * An MPI implementation like `MPICH `_ or `Open MPI `_ built with shared/dynamic libraries. * To work with the in-development version, you need to install `Cython `_. Documentation ------------- * Read the Docs: https://mpi4py.readthedocs.io/ * GitHub Pages: https://mpi4py.github.io/ Support ------- * Mailing List: mpi4py@googlegroups.com * Google Groups: https://groups.google.com/g/mpi4py * GitHub Discussions: https://github.com/mpi4py/mpi4py/discussions Testsuite --------- The testsuite is run periodically on * `GitHub Actions `_ * `Read the Docs `_ * `Azure Pipelines `_ * `AppVeyor `_ * `Circle CI `_ * `Travis CI `_ * `Codecov `_ Citation -------- + L. Dalcin and Y.-L. L. Fang, *mpi4py: Status Update After 12 Years of Development*, Computing in Science & Engineering, 23(4):47-54, 2021. https://doi.org/10.1109/MCSE.2021.3083216 mpi4py-3.1.6/bitbucket-pipelines.yml000066400000000000000000000010171460670727200174420ustar00rootroot00000000000000pipelines: custom: default: &default-step - step: script: - source conf/ci/anaconda.sh - install-anaconda - test-package python=2.7 MPI=mpich - test-package python=3.7 MPI=mpich - test-package python=3.8 MPI=mpich - test-package python=2.7 MPI=openmpi - test-package python=3.7 MPI=openmpi - test-package python=3.8 MPI=openmpi clone: depth: 3 # branches: # maint: *default-step # master: *default-step mpi4py-3.1.6/conf/000077500000000000000000000000001460670727200137035ustar00rootroot00000000000000mpi4py-3.1.6/conf/CMakeLists.txt000066400000000000000000000140461460670727200164500ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com CMAKE_MINIMUM_REQUIRED(VERSION 2.6) PROJECT(mpi4py) SET(PythonInterp_FIND_VERSION ${PYTHON_VERSION}) SET(PythonLibs_FIND_VERSION ${PYTHON_VERSION_STRING}) FIND_PACKAGE(PythonInterp) FIND_PACKAGE(PythonLibs) FIND_PACKAGE(MPI) SET(mpi4py_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/src") SET(mpi4py_BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/mpi4py") FILE(GLOB mpi4py_PYTHON_FILES RELATIVE ${mpi4py_SOURCE_DIR}/mpi4py ${mpi4py_SOURCE_DIR}/mpi4py/*.py ${mpi4py_SOURCE_DIR}/mpi4py/futures/*.py ${mpi4py_SOURCE_DIR}/mpi4py/util/*.py ) FILE(GLOB mpi4py_HEADER_FILES RELATIVE ${mpi4py_SOURCE_DIR}/mpi4py ${mpi4py_SOURCE_DIR}/mpi4py/*.pxd ${mpi4py_SOURCE_DIR}/mpi4py/include/mpi4py/*.[hi] ${mpi4py_SOURCE_DIR}/mpi4py/include/mpi4py/*.px[di] ) FILE(GLOB mpi4py_STUBS_FILES RELATIVE ${mpi4py_SOURCE_DIR}/mpi4py ${mpi4py_SOURCE_DIR}/mpi4py/py.typed ${mpi4py_SOURCE_DIR}/mpi4py/*.pyi ${mpi4py_SOURCE_DIR}/mpi4py/futures/*.pyi ${mpi4py_SOURCE_DIR}/mpi4py/util/*.pyi ) FOREACH(file ${mpi4py_PYTHON_FILES} ${mpi4py_HEADER_FILES} ${mpi4py_STUBS_FILES} ) SET(src "${mpi4py_SOURCE_DIR}/mpi4py/${file}") SET(tgt "${mpi4py_BINARY_DIR}/${file}") ADD_CUSTOM_COMMAND( DEPENDS ${src} OUTPUT ${tgt} COMMAND ${CMAKE_COMMAND} ARGS -E copy ${src} ${tgt} COMMENT "copy: ${file}" ) SET(mpi4py_OUTPUT_FILES ${mpi4py_OUTPUT_FILES} ${tgt}) ENDFOREACH(file) FIND_PROGRAM(MPI_COMPILER_CC NAMES mpicc HINTS "${MPI_BASE_DIR}" PATH_SUFFIXES bin DOC "MPI C compiler wrapper") MARK_AS_ADVANCED(MPI_COMPILER_CC) FIND_PROGRAM(MPI_COMPILER_CXX NAMES mpicxx mpic++ mpiCC HINTS "${MPI_BASE_DIR}" PATH_SUFFIXES bin DOC "MPI C++ compiler wrapper") MARK_AS_ADVANCED(MPI_COMPILER_CXX) find_program(MPI_COMPILER_FC NAMES mpifort mpif90 mpif77 HINTS "${MPI_BASE_DIR}" PATH_SUFFIXES bin DOC "MPI Fortran compiler wrapper") MARK_AS_ADVANCED(MPI_COMPILER_FC) FIND_PROGRAM(MPI_COMPILER_F90 NAMES mpif90 HINTS "${MPI_BASE_DIR}" PATH_SUFFIXES bin DOC "MPI Fortran 90 compiler wrapper") MARK_AS_ADVANCED(MPI_COMPILER_F90) find_program(MPI_COMPILER_F77 NAMES mpif77 HINTS "${MPI_BASE_DIR}" PATH_SUFFIXES bin DOC "MPI Fortran 77 compiler wrapper") MARK_AS_ADVANCED(MPI_COMPILER_F77) FOREACH(file "mpi.cfg") SET(tgt "${mpi4py_BINARY_DIR}/${file}") ADD_CUSTOM_COMMAND( OUTPUT ${tgt} COMMAND ${CMAKE_COMMAND} ARGS -E echo '[mpi]' > "${tgt}" COMMAND ${CMAKE_COMMAND} ARGS -E echo 'mpicc = ${MPI_COMPILER_CC}' >> ${tgt} COMMAND ${CMAKE_COMMAND} ARGS -E echo 'mpicxx = ${MPI_COMPILER_CXX}' >> ${tgt} COMMAND ${CMAKE_COMMAND} ARGS -E echo 'mpifort = ${MPI_COMPILER_FC}' >> ${tgt} COMMAND ${CMAKE_COMMAND} ARGS -E echo 'mpif90 = ${MPI_COMPILER_F90}' >> ${tgt} COMMAND ${CMAKE_COMMAND} ARGS -E echo 'mpif77 = ${MPI_COMPILER_F77}' >> ${tgt} COMMAND ${CMAKE_COMMAND} ARGS -E echo '' >> ${tgt} COMMENT "write: ${file}" ) SET(mpi4py_OUTPUT_FILES ${mpi4py_OUTPUT_FILES} ${tgt}) ENDFOREACH(file) ADD_CUSTOM_TARGET(mpi4py ALL DEPENDS ${mpi4py_OUTPUT_FILES}) INCLUDE_DIRECTORIES( ${MPI_INCLUDE_PATH} ${PYTHON_INCLUDE_PATH} "${mpi4py_SOURCE_DIR}" ) # --- mpi4py.MPI --- PYTHON_ADD_MODULE(mpi4py.MPI MODULE "${mpi4py_SOURCE_DIR}/MPI.c") SET_TARGET_PROPERTIES( mpi4py.MPI PROPERTIES OUTPUT_NAME "MPI" PREFIX "" COMPILE_FLAGS "${MPI_COMPILE_FLAGS}" LINK_FLAGS "${MPI_LINK_FLAGS}" LIBRARY_OUTPUT_DIRECTORY "${mpi4py_BINARY_DIR}" RUNTIME_OUTPUT_DIRECTORY "${mpi4py_BINARY_DIR}" LINKER_LANGUAGE C ) TARGET_LINK_LIBRARIES(mpi4py.MPI ${PYTHON_LIBRARY}) TARGET_LINK_LIBRARIES(mpi4py.MPI ${MPI_LIBRARIES}) # --- mpi4py.dl --- PYTHON_ADD_MODULE(mpi4py.dl MODULE "${mpi4py_SOURCE_DIR}/dynload.c") SET_TARGET_PROPERTIES( mpi4py.dl PROPERTIES OUTPUT_NAME "dl" PREFIX "" LIBRARY_OUTPUT_DIRECTORY "${mpi4py_BINARY_DIR}" RUNTIME_OUTPUT_DIRECTORY "${mpi4py_BINARY_DIR}" LINKER_LANGUAGE C ) TARGET_LINK_LIBRARIES(mpi4py.dl ${PYTHON_LIBRARY}) TARGET_LINK_LIBRARIES(mpi4py.dl ${CMAKE_DL_LIBS}) # --- mpi4py/bin/python-mpi --- ADD_EXECUTABLE(python-mpi "${mpi4py_SOURCE_DIR}/python.c") SET_TARGET_PROPERTIES( python-mpi PROPERTIES COMPILE_FLAGS "${MPI_COMPILE_FLAGS}" LINK_FLAGS "${MPI_LINK_FLAGS}" RUNTIME_OUTPUT_DIRECTORY "${mpi4py_BINARY_DIR}/bin" LINKER_LANGUAGE C ) TARGET_LINK_LIBRARIES(python-mpi ${PYTHON_LIBRARY}) TARGET_LINK_LIBRARIES(python-mpi ${MPI_LIBRARIES}) # --- mpi4py/lib-pmpi/libmpe.so --- ADD_LIBRARY(pmpi-mpe MODULE "${mpi4py_SOURCE_DIR}/lib-pmpi/mpe.c") SET_TARGET_PROPERTIES( pmpi-mpe PROPERTIES OUTPUT_NAME "mpe" LIBRARY_OUTPUT_DIRECTORY "${mpi4py_BINARY_DIR}/lib-pmpi" RUNTIME_OUTPUT_DIRECTORY "${mpi4py_BINARY_DIR}/lib-pmpi" LINKER_LANGUAGE C ) TARGET_LINK_LIBRARIES(pmpi-mpe ${MPE_LIBRARIES}) TARGET_LINK_LIBRARIES(pmpi-mpe ${MPI_LIBRARIES}) # --- mpi4py/lib-pmpi/libvt.so --- ADD_LIBRARY(pmpi-vt MODULE "${mpi4py_SOURCE_DIR}/lib-pmpi/vt.c") SET_TARGET_PROPERTIES( pmpi-vt PROPERTIES OUTPUT_NAME "vt" LIBRARY_OUTPUT_DIRECTORY "${mpi4py_BINARY_DIR}/lib-pmpi" RUNTIME_OUTPUT_DIRECTORY "${mpi4py_BINARY_DIR}/lib-pmpi" LINKER_LANGUAGE C ) TARGET_LINK_LIBRARIES(pmpi-vt ${VT_LIBRARIES}) TARGET_LINK_LIBRARIES(pmpi-vt ${MPI_LIBRARIES}) # --- mpi4py/lib-pmpi/libvt-mpi.so --- ADD_LIBRARY(pmpi-vt-mpi MODULE "${mpi4py_SOURCE_DIR}/lib-pmpi/vt-mpi.c") SET_TARGET_PROPERTIES( pmpi-vt-mpi PROPERTIES OUTPUT_NAME "vt-mpi" LIBRARY_OUTPUT_DIRECTORY "${mpi4py_BINARY_DIR}/lib-pmpi" RUNTIME_OUTPUT_DIRECTORY "${mpi4py_BINARY_DIR}/lib-pmpi" LINKER_LANGUAGE C ) TARGET_LINK_LIBRARIES(pmpi-vt-mpi ${VT_MPI_LIBRARIES}) TARGET_LINK_LIBRARIES(pmpi-vt-mpi ${MPI_LIBRARIES}) # --- mpi4py/lib-pmpi/libvt-hyb.so --- ADD_LIBRARY(pmpi-vt-hyb MODULE "${mpi4py_SOURCE_DIR}/lib-pmpi/vt-hyb.c") SET_TARGET_PROPERTIES( pmpi-vt-hyb PROPERTIES OUTPUT_NAME "vt-hyb" LIBRARY_OUTPUT_DIRECTORY "${mpi4py_BINARY_DIR}/lib-pmpi" RUNTIME_OUTPUT_DIRECTORY "${mpi4py_BINARY_DIR}/lib-pmpi" LINKER_LANGUAGE C ) TARGET_LINK_LIBRARIES(pmpi-vt-hyb ${VT_HYB_LIBRARIES}) TARGET_LINK_LIBRARIES(pmpi-vt-hyb ${MPI_LIBRARIES}) mpi4py-3.1.6/conf/ci/000077500000000000000000000000001460670727200142765ustar00rootroot00000000000000mpi4py-3.1.6/conf/ci/anaconda.sh000066400000000000000000000033721460670727200164030ustar00rootroot00000000000000#!/bin/bash RUN() { echo + $@; $@; } RUN export ANACONDA=${ANACONDA-/opt/anaconda} RUN export CFLAGS=-O0 install-anaconda() { MINICONDA=Miniconda3-latest-Linux-$(arch).sh RUN curl -s -o ~/$MINICONDA https://repo.anaconda.com/miniconda/$MINICONDA RUN bash ~/$MINICONDA -b -f -p $ANACONDA RUN source $ANACONDA/bin/activate root RUN conda config --set show_channel_urls yes } test-package() { unset PY unset MPI unset COVERAGE for arg in $@; do case $arg in python=?*) PY="${arg#*=}";; py=?*) PY="${arg#*=}";; MPI=?*) MPI="${arg#*=}";; mpi=?*) MPI="${arg#*=}";; coverage=?*) COVERAGE="${arg#*=}";; *) break esac done PY=${PY-2.7} MPI=${MPI-mpich} COVERAGE=${COVERAGE-no} RUN source $ANACONDA/bin/activate root RUN rm -rf $ANACONDA/envs/test RUN conda create --quiet --yes -n test -c conda-forge python=$PY $MPI $MPI-mpicc numpy cython coverage RUN conda activate test RUN python setup.py build_src --force RUN python setup.py install RUN python setup.py --quiet clean --all if [[ "$MPI" == "mpich" ]]; then P=2; else P=5; fi export MPIEXEC=${MPIEXEC-mpiexec} RUN $MPIEXEC -n 1 python $PWD/test/runtests.py RUN $MPIEXEC -n $P python $PWD/test/runtests.py -f RUN $MPIEXEC -n 1 python $PWD/demo/futures/test_futures.py RUN $MPIEXEC -n $P python $PWD/demo/futures/test_futures.py -f RUN $MPIEXEC -n 1 python -m mpi4py.futures $PWD/demo/futures/test_futures.py RUN $MPIEXEC -n $P python -m mpi4py.futures $PWD/demo/futures/test_futures.py -f if [[ "$COVERAGE" == "yes" ]]; then RUN ./conf/coverage.sh RUN coverage report RUN coverage xml RUN mv coverage.xml coverage-py$PY-$MPI.xml fi RUN conda deactivate } mpi4py-3.1.6/conf/ci/appveyor-artifacts.py000066400000000000000000000031271460670727200204760ustar00rootroot00000000000000#!/usr/bin/env python # Author: Lisandro Dalcin # Contact: dalcinl@gmail.com import os import argparse import requests APIURL = 'https://ci.appveyor.com/api' ACCOUNT = 'mpi4py/mpi4py' BRANCH = 'maint' parser = argparse.ArgumentParser(prog=os.path.basename(__file__)) parser.add_argument("-q", "--quiet", action="store_false", dest="verbose", default=True) parser.add_argument("-n", "--dry-run", action="store_false", dest="download", default=True) parser.add_argument("-a", "--account", type=str, action="store", dest="account", default=ACCOUNT) parser.add_argument("-b", "--branch", type=str, action="store", dest="branch", default=BRANCH) options = parser.parse_args() ACCOUNT = options.account BRANCH = options.branch branch_url = APIURL + '/projects/' + ACCOUNT + "/branch/" + BRANCH branch = requests.get(branch_url).json() jobs = branch['build']['jobs'] jobids = [job['jobId'] for job in jobs] if options.verbose: print("Downloading AppVeyor artifacts " "account={} branch={}".format(ACCOUNT, BRANCH)) for jobid in jobids: artifacts_url = APIURL + '/buildjobs/' + jobid + '/artifacts' artifacts = requests.get(artifacts_url).json() filenames = [a['fileName'] for a in artifacts] for filename in filenames: download_url = artifacts_url + '/' + filename if options.verbose: print(download_url) if options.download: data = requests.get(download_url).content with open(os.path.basename(filename), "wb") as f: f.write(data) mpi4py-3.1.6/conf/ci/fedoracloud.sh000077500000000000000000000046471460670727200171370ustar00rootroot00000000000000#!/usr/bin/env bash # Test script running on Fedora Jenkins # http://jenkins.fedorainfracloud.org/job/mpi4py/ # Copyright (c) 2015, Thomas Spura. source /etc/profile.d/modules.sh MPI=mpich PYTHON=$(command -v python) for arg in "$@"; do case "$arg" in mpich|openmpi) MPI="$arg" ;; python|python2|python3|pypy|pypy3) PYTHON=$(command -v "$arg") ;; *) echo "Unknown argument: $arg" exit 1 ;; esac done PY=$(basename "$PYTHON") echo "Creating virtualenv: venv-$PY-$MPI" rm -rf build venv-$PY-$MPI virtualenv -p "$PYTHON" venv-$PY-$MPI source venv-$PY-$MPI/bin/activate pip install pip --upgrade echo "Installing dependencies" pip install -r conf/requirements-build-cython.txt pip install pydocstyle pylint coverage --upgrade echo "Loading MPI module: $MPI" module purge module load mpi/$MPI-$(uname -m) hash -r echo "Installing package" pip -vvv install . echo "Running pydocstyle" pydocstyle src/mpi4py | tee pydocstyle-$PY-$MPI.out echo "Running pylint" pylint mpi4py | tee pylint-$PY-$MPI.out echo "Running coverage" /usr/bin/env bash ./conf/coverage.sh coverage xml mv coverage.xml coverage-$PY-$MPI.xml echo "Running testsuite" case "$MPI" in mpich) python demo/test-run/test_run.py -v ;; openmpi) #python demo/test-run/test_run.py -v ;; esac set -e case "$MPI" in mpich) mpiexec -n 1 python test/runtests.py -v mpiexec -n 2 python test/runtests.py -v -f -e spawn mpiexec -n 3 python test/runtests.py -v -f -e spawn #mpiexec -n 8 python test/runtests.py -v -f -e spawn mpiexec -n 1 python demo/futures/test_futures.py -v mpiexec -n 2 python -m mpi4py.futures demo/futures/test_futures.py -v -f mpiexec -n 3 python -m mpi4py.futures demo/futures/test_futures.py -v -f ;; openmpi) mpiexec -n 1 python test/runtests.py --no-threads -v -f mpiexec -n 2 python test/runtests.py --no-threads -v -f -e spawn mpiexec -n 3 python test/runtests.py --no-threads -v -f -e spawn #mpiexec -n 8 python test/runtests.py --no-threads -v -f -e spawn mpiexec -n 1 python demo/futures/test_futures.py -v mpiexec -n 2 python -m mpi4py.futures demo/futures/test_futures.py -v -f mpiexec -n 3 python -m mpi4py.futures demo/futures/test_futures.py -v -f ;; esac set +e module purge deactivate mpi4py-3.1.6/conf/coverage-helper.py000066400000000000000000000027451460670727200173350ustar00rootroot00000000000000# --- import mpi4py try: mpi4py.get_include() except: pass try: mpi4py.get_config() except: pass # --- def test_mpi4py_rc(): import mpi4py.rc mpi4py.rc( initialize = True, threads = True, thread_level = 'multiple', finalize = None, fast_reduce = True, recv_mprobe = True, errors = 'exception', ) try: mpi4py.rc(qwerty=False) except TypeError: pass else: raise RuntimeError repr(mpi4py.rc) test_mpi4py_rc() # --- def test_mpi4py_profile(): import mpi4py def mpi4py_profile(*args, **kargs): try: mpi4py.profile(*args, **kargs) except ValueError: pass import warnings warnings.simplefilter('ignore') mpi4py_profile('mpe') mpi4py_profile('mpe', path="/usr/lib") mpi4py_profile('mpe', path=["/usr/lib"]) mpi4py_profile('mpe', logfile="mpi4py") mpi4py_profile('mpe', logfile="mpi4py") mpi4py_profile('vt') mpi4py_profile('vt', path="/usr/lib") mpi4py_profile('vt', path=["/usr/lib"]) mpi4py_profile('vt', logfile="mpi4py") mpi4py_profile('vt', logfile="mpi4py") mpi4py_profile('@querty') mpi4py_profile('c', path=["/usr/lib", "/usr/lib64"]) mpi4py_profile('m', path=["/usr/lib", "/usr/lib64"]) mpi4py_profile('dl', path=["/usr/lib", "/usr/lib64"]) mpi4py_profile('hosts', path=["/etc"]) test_mpi4py_profile() # --- import mpi4py.__main__ import mpi4py.bench import mpi4py.futures import mpi4py.futures.__main__ import mpi4py.futures.server import mpi4py.run mpi4py-3.1.6/conf/coverage.sh000077500000000000000000000175551460670727200160520ustar00rootroot00000000000000#!/bin/bash MPIEXEC=${MPIEXEC-mpiexec} PYTHON=${1-${PYTHON-python}} export PYTHONDONTWRITEBYTECODE=1 $PYTHON -m coverage erase $MPIEXEC -n 1 $PYTHON -m coverage run "$(dirname "$0")/coverage-helper.py" > /dev/null || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench --help > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench --threads helloworld -q $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench --no-threads helloworld -q $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench --thread-level=single helloworld -q $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench helloworld > /dev/null $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.bench helloworld > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench helloworld > /dev/null $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.bench helloworld -q $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench ringtest > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench ringtest -q -l 2 -s 1 $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.bench ringtest -q -l 2 -s 1 $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench > /dev/null 2>&1 || true $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.bench > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench qwerty > /dev/null 2>&1 || true $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.bench qwerty > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench --mpe qwerty > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.bench --vt qwerty > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.run --help > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py --version > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py --help > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py - < /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py -c "42" > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py -m this > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py "$(dirname "$0")/coverage-helper.py" > /dev/null || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py -rc threads=0 --rc=thread_level=single -c "" > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py -p mpe -profile mpe -c "" > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py --profile mpe --profile=mpe -c "" > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py -vt --vt -mpe --mpe -c "" > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py -m > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py -c > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py -p > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py -bad > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py --bad=a > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py -rc= > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py --rc=a > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py --rc=a= > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py --rc==a > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py -c "import sys; sys.exit()" > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py -c "import sys; sys.exit(0)" > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py -c "import sys; sys.exit(1)" > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py -c "import sys; sys.exit('error')" > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py -c "from mpi4py import MPI; 1/0;" > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run test/test_util_pkl5.py -q 2> /dev/null $MPIEXEC -n 2 $PYTHON -m coverage run test/test_util_pkl5.py -q 2> /dev/null $MPIEXEC -n 3 $PYTHON -m coverage run test/test_util_pkl5.py -q 2> /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run test/test_util_dtlib.py -q 2> /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run demo/futures/test_futures.py -q 2> /dev/null $MPIEXEC -n 2 $PYTHON -m coverage run demo/futures/test_futures.py -q 2> /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.run -rc threads=False demo/futures/test_futures.py -q 2> /dev/null $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.run -rc threads=False demo/futures/test_futures.py -q 2> /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures demo/futures/test_futures.py -q 2> /dev/null $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.futures demo/futures/test_futures.py -q ASharedPoolInitTest 2> /dev/null $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.futures demo/futures/test_futures.py -q ProcessPoolPickleTest 2> /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures -h > /dev/null $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.futures -h > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures -m this > /dev/null $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.futures -m this > /dev/null $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures -c "42" $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.futures -c "42" $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures - /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures xy > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures -c > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures -m > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures -x > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures -c "1/0" > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures -c "raise SystemExit(11)" > /dev/null 2>&1 || true $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures -c "raise SystemExit('')" > /dev/null 2>&1 || true if [ $(command -v mpichversion) ]; then testdir=demo/futures $MPIEXEC -n 1 $PYTHON -m coverage run -m mpi4py.futures.server --xyz > /dev/null 2>&1 || true $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.futures.server --bind localhost & mpi4pyserver=$!; sleep 1; $MPIEXEC -n 1 $PYTHON -m coverage run $testdir/test_service.py --host localhost wait $mpi4pyserver $MPIEXEC -n 2 $PYTHON -m coverage run -m mpi4py.futures.server --port 31414 --info "a=x,b=y" & mpi4pyserver=$!; sleep 1; $MPIEXEC -n 1 $PYTHON -m coverage run $testdir/test_service.py --port 31414 --info "a=x,b=y" wait $mpi4pyserver fi if [ $(command -v mpichversion) ] && [ $(command -v hydra_nameserver) ]; then testdir=demo/futures hydra_nameserver & nameserver=$!; sleep 1; $MPIEXEC -nameserver localhost -n 2 $PYTHON -m coverage run -m mpi4py.futures.server & mpi4pyserver=$!; sleep 1; $MPIEXEC -nameserver localhost -n 1 $PYTHON -m coverage run $testdir/test_service.py wait $mpi4pyserver kill -TERM $nameserver wait $nameserver 2>/dev/null || true fi $PYTHON -m coverage combine mpi4py-3.1.6/conf/cyautodoc.py000066400000000000000000000215161460670727200162540ustar00rootroot00000000000000from __future__ import absolute_import from __future__ import print_function from Cython.Compiler import Options from Cython.Compiler import PyrexTypes from Cython.Compiler.Visitor import CythonTransform from Cython.Compiler.StringEncoding import EncodedString from Cython.Compiler.AutoDocTransforms import ExpressionWriter from Cython.Compiler.AutoDocTransforms import AnnotationWriter class EmbedSignature(CythonTransform): def __init__(self, context): super(EmbedSignature, self).__init__(context) self.class_name = None self.class_node = None def _fmt_expr(self, node): writer = ExpressionWriter() result = writer.write(node) # print(type(node).__name__, '-->', result) return result def _fmt_annotation(self, node): writer = AnnotationWriter() result = writer.write(node) # print(type(node).__name__, '-->', result) return result def _fmt_arg(self, arg): annotation = None if arg.is_self_arg: doc = arg.name # clinic: '$self' elif arg.is_type_arg: doc = arg.name # clinic: '$type' else: doc = arg.name if arg.type is PyrexTypes.py_object_type: annotation = None # XXX use 'Any' ? else: annotation = arg.type.declaration_code('', for_display=1) #if arg.default and arg.default.is_none: # annotation = 'Optional[%s]' % annotation if arg.annotation: annotation = self._fmt_annotation(arg.annotation) if annotation: doc = doc + (': %s' % annotation) if arg.default: default = self._fmt_expr(arg.default) doc = doc + (' = %s' % default) elif arg.default: default = self._fmt_expr(arg.default) doc = doc + ('=%s' % default) return doc def _fmt_star_arg(self, arg): arg_doc = arg.name if arg.annotation: annotation = self._fmt_annotation(arg.annotation) arg_doc = arg_doc + (': %s' % annotation) return arg_doc def _fmt_arglist(self, args, npoargs=0, npargs=0, pargs=None, nkargs=0, kargs=None, hide_self=False): arglist = [] for arg in args: if not hide_self or not arg.entry.is_self_arg: arg_doc = self._fmt_arg(arg) arglist.append(arg_doc) if pargs: arg_doc = self._fmt_star_arg(pargs) arglist.insert(npargs + npoargs, '*%s' % arg_doc) elif nkargs: arglist.insert(npargs + npoargs, '*') if npoargs: arglist.insert(npoargs, '/') if kargs: arg_doc = self._fmt_star_arg(kargs) arglist.append('**%s' % arg_doc) return arglist def _fmt_ret_type(self, ret): if ret is PyrexTypes.py_object_type: return None else: return ret.declaration_code("", for_display=1) def _fmt_signature(self, cls_name, func_name, args, npoargs=0, npargs=0, pargs=None, nkargs=0, kargs=None, return_expr=None, return_type=None, hide_self=False): arglist = self._fmt_arglist(args, npoargs, npargs, pargs, nkargs, kargs, hide_self=hide_self) arglist_doc = ', '.join(arglist) func_doc = '%s(%s)' % (func_name, arglist_doc) if cls_name: func_doc = '%s.%s' % (cls_name, func_doc) ret_doc = None if return_expr: ret_doc = self._fmt_annotation(return_expr) elif return_type: ret_doc = self._fmt_ret_type(return_type) if ret_doc: docfmt = '%s -> %s' # clinic: '%s -> (%s)' func_doc = docfmt % (func_doc, ret_doc) return func_doc def _embed_signature(self, signature, node_doc): if node_doc: docfmt = "%s\n%s" # clinic: "%s\n--\n\n%s return docfmt % (signature, node_doc) else: return signature def __call__(self, node): if not Options.docstrings: return node else: return super(EmbedSignature, self).__call__(node) def visit_ClassDefNode(self, node): oldname = self.class_name oldclass = self.class_node self.class_node = node try: # PyClassDefNode self.class_name = node.name except AttributeError: # CClassDefNode self.class_name = node.class_name self.visitchildren(node) self.class_name = oldname self.class_node = oldclass return node def visit_LambdaNode(self, node): # lambda expressions so not have signature or inner functions return node def visit_DefNode(self, node): if not self.current_directives['embedsignature']: return node is_constructor = False hide_self = False if node.entry.is_special: is_constructor = self.class_node and node.name == '__init__' if not is_constructor: return node class_name, func_name = None, self.class_name hide_self = True else: class_name, func_name = self.class_name, node.name npoargs = getattr(node, 'num_posonly_args', 0) nkargs = getattr(node, 'num_kwonly_args', 0) npargs = len(node.args) - nkargs - npoargs signature = self._fmt_signature( class_name, func_name, node.args, npoargs, npargs, node.star_arg, nkargs, node.starstar_arg, return_expr=node.return_type_annotation, return_type=None, hide_self=hide_self) if signature: if is_constructor: doc_holder = self.class_node.entry.type.scope else: doc_holder = node.entry if doc_holder.doc is not None: old_doc = doc_holder.doc elif not is_constructor and getattr(node, 'py_func', None) is not None: old_doc = node.py_func.entry.doc else: old_doc = None new_doc = self._embed_signature(signature, old_doc) doc_holder.doc = EncodedString(new_doc) if not is_constructor and getattr(node, 'py_func', None) is not None: node.py_func.entry.doc = EncodedString(new_doc) return node def visit_CFuncDefNode(self, node): if not self.current_directives['embedsignature']: return node if not node.overridable: # not cpdef FOO(...): return node signature = self._fmt_signature( self.class_name, node.declarator.base.name, node.declarator.args, return_type=node.return_type) if signature: if node.entry.doc is not None: old_doc = node.entry.doc elif getattr(node, 'py_func', None) is not None: old_doc = node.py_func.entry.doc else: old_doc = None new_doc = self._embed_signature(signature, old_doc) node.entry.doc = EncodedString(new_doc) py_func = getattr(node, 'py_func', None) if py_func is not None: py_func.entry.doc = EncodedString(new_doc) return node def visit_PropertyNode(self, node): if not self.current_directives['embedsignature']: return node entry = node.entry body = node.body prop_name = entry.name type_name = None if entry.visibility == 'public': # property synthesised from a cdef public attribute type_name = entry.type.declaration_code("", for_display=1) if not entry.type.is_pyobject: type_name = "'%s'" % type_name elif entry.type.is_extension_type: type_name = entry.type.module_name + '.' + type_name if type_name is None: for stat in body.stats: if stat.name != '__get__': continue cls_name = self.class_name if cls_name: prop_name = '%s.%s' % (cls_name, prop_name) ret_annotation = stat.return_type_annotation if ret_annotation: type_name = self._fmt_annotation(ret_annotation) if type_name is not None: signature = '%s: %s' % (prop_name, type_name) new_doc = self._embed_signature(signature, entry.doc) entry.doc = EncodedString(new_doc) return node # Monkeypatch EmbedSignature transform from Cython.Compiler import AutoDocTransforms AutoDocTransforms.EmbedSignature = EmbedSignature mpi4py-3.1.6/conf/cythonize.bat000066400000000000000000000002131460670727200164030ustar00rootroot00000000000000@echo off python -m cython --3str --cleanup 3 -w src %* mpi4py.MPI.pyx -o mpi4py.MPI.c move src\mpi4py.MPI*.h src\mpi4py\include\mpi4py mpi4py-3.1.6/conf/cythonize.py000066400000000000000000000044111460670727200162710ustar00rootroot00000000000000#!/usr/bin/env python import sys, os def cythonize(source, output=None, includes=(), destdir_c=None, destdir_h=None, wdir=None): import cyautodoc from Cython.Compiler.Main import \ CompilationOptions, default_options, \ compile, \ PyrexError from Cython.Compiler import Options cwd = os.getcwd() try: if output is None: name, _ = os.path.splitext(source) output = name + '.c' else: name, _ = os.path.splitext(output) outputs_c = [output] outputs_h = [name + '.h', name + '_api.h'] # change working directory if wdir: os.chdir(wdir) # run Cython on source options = CompilationOptions(default_options) if Options.directive_types['language_level'] is str: options.language_level = '3str' options.output_file = outputs_c[0] options.include_path = list(includes) Options.generate_cleanup_code = 3 any_failures = 0 try: result = compile(source, options) if result.num_errors > 0: any_failures = 1 except (EnvironmentError, PyrexError): e = sys.exc_info()[1] sys.stderr.write(str(e) + '\n') any_failures = 1 if any_failures: for output in outputs_c + outputs_h: try: os.remove(output) except OSError: pass return 1 # move ouputs for destdir, outputs in ( (destdir_c, outputs_c), (destdir_h, outputs_h)): if destdir is None: continue for output in outputs: dest = os.path.join( destdir, os.path.basename(output)) try: os.remove(dest) except OSError: pass os.rename(output, dest) # return 0 # finally: os.chdir(cwd) if __name__ == "__main__": sys.exit( cythonize('mpi4py/MPI.pyx', 'mpi4py.MPI.c', destdir_h=os.path.join('mpi4py', 'include', 'mpi4py'), wdir='src') ) mpi4py-3.1.6/conf/cythonize.sh000077500000000000000000000002131460670727200162520ustar00rootroot00000000000000#!/bin/sh python -m cython --3str --cleanup 3 -w src $@ mpi4py/MPI.pyx -o mpi4py.MPI.c && \ mv src/mpi4py.MPI*.h src/mpi4py/include/mpi4py mpi4py-3.1.6/conf/docutils.conf000066400000000000000000000001771460670727200164050ustar00rootroot00000000000000[general] input-encoding: UTF-8:strict output-encoding: UTF-8:strict [html5 writer] compact-lists: no compact-field-lists: no mpi4py-3.1.6/conf/epydoc.cfg000066400000000000000000000105451460670727200156540ustar00rootroot00000000000000[epydoc] # Epydoc section marker (required by ConfigParser) # The list of objects to document. Objects can be named using # dotted names, module filenames, or package directory names. # Alases for this option include "objects" and "values". modules: mpi4py # The type of output that should be generated. Should be one # of: html, text, latex, dvi, ps, pdf. #output: html # The path to the output directory. May be relative or absolute. #target: docs/html/ # An integer indicating how verbose epydoc should be. The default # value is 0; negative values will supress warnings and errors; # positive values will give more verbose output. verbosity: 0 # A boolean value indicating that Epydoc should show a tracaback # in case of unexpected error. By default don't show tracebacks #debug: 0 # If True, don't try to use colors or cursor control when doing # textual output. The default False assumes a rich text prompt #simple-term: 0 ### Generation options # The default markup language for docstrings, for modules that do # not define __docformat__. Defaults to epytext. docformat: reStructuredText # Whether or not parsing should be used to examine objects. parse: yes # Whether or not introspection should be used to examine objects. introspect: yes # Don't examine in any way the modules whose dotted name match this # regular expression pattern. exclude: mpi4py\.(?!MPI) # Don't perform introspection on the modules whose dotted name match this # regular expression pattern. #exclude-introspect # Don't perform parsing on the modules whose dotted name match this # regular expression pattern. #exclude-parse # The format for showing inheritance objects. # It should be one of: 'grouped', 'listed', 'included'. inheritance: listed # Whether or not to inclue private variables. (Even if included, # private variables will be hidden by default.) private: yes # Whether or not to list each module's imports. imports: no # Whether or not to include syntax highlighted source code in # the output (HTML only). sourcecode: yes # Whether or not to include a a page with Epydoc log, containing # effective option at the time of generation and the reported logs. include-log: no ### Output options # The documented project's name. name: MPI for Python # The documented project's URL. url: https://github.com/mpi4py/mpi4py # The CSS stylesheet for HTML output. Can be the name of a builtin # stylesheet, or the name of a file. css: white # HTML code for the project link in the navigation bar. If left # unspecified, the project link will be generated based on the # project's name and URL. #link: My Cool Project # The "top" page for the documentation. Can be a URL, the name # of a module or class, or one of the special names "trees.html", # "indices.html", or "help.html" #top: os.path # An alternative help file. The named file should contain the # body of an HTML file; navigation bars will be added to it. #help: my_helpfile.html # Whether or not to include a frames-based table of contents. frames: yes # Whether each class should be listed in its own section when # generating LaTeX or PDF output. separate-classes: no ### API linking options # Define a new API document. A new interpreted text role # will be created #external-api: epydoc # Use the records in this file to resolve objects in the API named NAME. #external-api-file: epydoc:api-objects.txt # Use this URL prefix to configure the string returned for external API. #external-api-root: epydoc:http://epydoc.sourceforge.net/api ### Graph options # The list of graph types that should be automatically included # in the output. Graphs are generated using the Graphviz "dot" # executable. Graph types include: "classtree", "callgraph", # "umlclasstree". Use "all" to include all graph types graph: classtree # The path to the Graphviz "dot" executable, used to generate # graphs. #dotpath: /usr/local/bin/dot # The name of one or more pstat files (generated by the profile # or hotshot module). These are used to generate call graphs. #pstat: profile.out # Specify the font used to generate Graphviz graphs. # (e.g., helvetica or times). graph-font: Helvetica # Specify the font size used to generate Graphviz graphs. graph-font-size: 10 ### Return value options # The condition upon which Epydoc should exit with a non-zero # exit status. Possible values are error, warning, docstring_warning #fail-on: error mpi4py-3.1.6/conf/epydocify.py000077500000000000000000000065221460670727200162600ustar00rootroot00000000000000#!/usr/bin/env python # -------------------------------------------------------------------- from mpi4py import MPI try: from signal import signal, SIGPIPE, SIG_IGN signal(SIGPIPE, SIG_IGN) except ImportError: pass # -------------------------------------------------------------------- try: from docutils.nodes import NodeVisitor NodeVisitor.unknown_visit = lambda self, node: None NodeVisitor.unknown_departure = lambda self, node: None except ImportError: pass try: # epydoc 3.0.1 + docutils 0.6 from docutils.nodes import Text try: from collections import UserString except ImportError: from UserString import UserString if not isinstance(Text, UserString): def Text_get_data(s): try: return s._data except AttributeError: return s.astext() def Text_set_data(s, d): s.astext = lambda: d s._data = d Text.data = property(Text_get_data, Text_set_data) except ImportError: pass # -------------------------------------------------------------------- from epydoc.docwriter import dotgraph import re dotgraph._DOT_VERSION_RE = \ re.compile(r'dot (?:- Graphviz )version ([\d\.]+)') try: dotgraph.DotGraph.DEFAULT_HTML_IMAGE_FORMAT dotgraph.DotGraph.DEFAULT_HTML_IMAGE_FORMAT = 'png' except AttributeError: DotGraph_to_html = dotgraph.DotGraph.to_html DotGraph_run_dot = dotgraph.DotGraph._run_dot def to_html(self, image_file, image_url, center=True): if image_file[-4:] == '.gif': image_file = image_file[:-4] + '.png' if image_url[-4:] == '.gif': image_url = image_url[:-4] + '.png' return DotGraph_to_html(self, image_file, image_url) def _run_dot(self, *options): if '-Tgif' in options: opts = list(options) for i, o in enumerate(opts): if o == '-Tgif': opts[i] = '-Tpng' options = type(options)(opts) return DotGraph_run_dot(self, *options) dotgraph.DotGraph.to_html = to_html dotgraph.DotGraph._run_dot = _run_dot # -------------------------------------------------------------------- from epydoc import docstringparser as dp dp.STANDARD_FIELDS += [ dp.DocstringField(['credits'], 'Credits', multivalue=0, varnames=['__credits__']), ] from epydoc import docintrospecter as di di.UNDOCUMENTED_MODULE_VARS += ('__package__',) # -------------------------------------------------------------------- import re _SIGNATURE_RE = re.compile( # Class name (for builtin methods) r'^\s*((?P\w+)\.)?' + # The function name r'(?P\w+)' + # The parameters r'\(((?P(?:self|cls|mcs)),?)?(?P.*?)\)' + # The return value (optional) r'(\s*(->)\s*(?P.*))?'+ # The end marker r'\s*(\n|\s+(--|<=+>)\s+|$|\.\s+|\.\n)') from epydoc import docstringparser as dsp dsp._SIGNATURE_RE = _SIGNATURE_RE # -------------------------------------------------------------------- import sys, os import epydoc.cli def epydocify(): dirname = os.path.dirname(__file__) config = os.path.join(dirname, 'epydoc.cfg') sys.argv.append('--config=' + config) epydoc.cli.cli() if __name__ == '__main__': epydocify() # -------------------------------------------------------------------- mpi4py-3.1.6/conf/mpiconfig.py000066400000000000000000000450431460670727200162360ustar00rootroot00000000000000import sys, os, platform from distutils.util import split_quoted from distutils.spawn import find_executable from distutils import log as dulog try: from collections import OrderedDict except ImportError: OrderedDict = dict try: from configparser import ConfigParser from configparser import Error as ConfigParserError except ImportError: from ConfigParser import ConfigParser from ConfigParser import Error as ConfigParserError class Config(object): def __init__(self, logger=None): self.log = logger or dulog self.section = None self.filename = None self.compiler_info = OrderedDict(( ('mpicc' , None), ('mpicxx' , None), ('mpifort', None), ('mpif90' , None), ('mpif77' , None), ('mpild' , None), )) self.library_info = OrderedDict(( ('define_macros' , []), ('undef_macros' , []), ('include_dirs' , []), ('libraries' , []), ('library_dirs' , []), ('runtime_library_dirs' , []), ('extra_compile_args' , []), ('extra_link_args' , []), ('extra_objects' , []), )) def __bool__(self): for v in self.compiler_info.values(): if v: return True for v in self.library_info.values(): if v: return True return False __nonzero__ = __bool__ def get(self, k, d=None): if k in self.compiler_info: return self.compiler_info[k] if k in self.library_info: return self.library_info[k] return d def info(self, log=None): if log is None: log = self.log mpicc = self.compiler_info.get('mpicc') mpicxx = self.compiler_info.get('mpicxx') mpifort = self.compiler_info.get('mpifort') mpif90 = self.compiler_info.get('mpif90') mpif77 = self.compiler_info.get('mpif77') mpild = self.compiler_info.get('mpild') if mpicc: log.info("MPI C compiler: %s", mpicc) if mpicxx: log.info("MPI C++ compiler: %s", mpicxx) if mpifort: log.info("MPI F compiler: %s", mpifort) if mpif90: log.info("MPI F90 compiler: %s", mpif90) if mpif77: log.info("MPI F77 compiler: %s", mpif77) if mpild: log.info("MPI linker: %s", mpild) def update(self, config, **more): if hasattr(config, 'keys'): config = config.items() for option, value in config: if option in self.compiler_info: self.compiler_info[option] = value if option in self.library_info: self.library_info[option] = value if more: self.update(more) def setup(self, options, environ=None): if environ is None: environ = os.environ self.setup_library_info(options, environ) self.setup_compiler_info(options, environ) def setup_library_info(self, options, environ): filename = section = None mpiopt = getattr(options, 'mpi', None) mpiopt = environ.get('MPICFG', mpiopt) if mpiopt: if ',' in mpiopt: section, filename = mpiopt.split(',', 1) elif ':' in mpiopt: filename, section = mpiopt.split(':', 1) elif os.path.isfile(mpiopt): filename = mpiopt else: section = mpiopt if not filename: filename = "mpi.cfg" if not section: section = "mpi" mach = platform.machine() arch = platform.architecture()[0] plat = sys.platform osnm = os.name if 'linux' == plat[:5]: plat = 'linux' elif 'sunos' == plat[:5]: plat = 'solaris' elif 'win' == plat[:3]: plat = 'windows' suffixes = [] suffixes.append(plat+'-'+mach) suffixes.append(plat+'-'+arch) suffixes.append(plat) suffixes.append(osnm+'-'+mach) suffixes.append(osnm+'-'+arch) suffixes.append(osnm) suffixes.append(mach) suffixes.append(arch) sections = [section+"-"+s for s in suffixes] sections += [section] self.load(filename, sections) if not self: if os.name == 'posix': self._setup_posix() if sys.platform == 'win32': self._setup_windows() def _setup_posix(self): pass def _setup_windows(self): if self._setup_windows_intelmpi(): return if self._setup_windows_msmpi(): return def _setup_windows_intelmpi(self): from os.path import join, isdir, isfile I_MPI_ROOT = os.environ.get('I_MPI_ROOT') if not I_MPI_ROOT: return None if not isdir(I_MPI_ROOT): return None arch = platform.architecture()[0][:2] archdir = {'32':'ia32', '64':'intel64'}[arch] mpi_dir = join(I_MPI_ROOT, archdir) if not isdir(mpi_dir): mpi_dir = I_MPI_ROOT IMPI_INC = join(mpi_dir, 'include') IMPI_LIB = join(mpi_dir, 'lib') I_MPI_LIBRARY_KIND = os.environ.get('I_MPI_LIBRARY_KIND') library_kind = os.getenv('library_kind') kind = I_MPI_LIBRARY_KIND or library_kind or 'release' if isfile(join(IMPI_LIB, kind, 'impi.lib')): IMPI_LIB = join(IMPI_LIB, kind) ok = ( IMPI_INC and isfile(join(IMPI_INC, 'mpi.h')) and IMPI_LIB and isfile(join(IMPI_LIB, 'impi.lib')) ) if not ok: return False IMPI_INC = os.path.normpath(IMPI_INC) IMPI_LIB = os.path.normpath(IMPI_LIB) self.library_info.update( include_dirs=[IMPI_INC], library_dirs=[IMPI_LIB], libraries=['impi']) self.section = 'impi' self.filename = [os.path.dirname(IMPI_INC)] return True def _setup_windows_msmpi(self): # Microsoft MPI (v7, v6, v5, v4) def msmpi_ver(): try: try: import winreg except ImportError: import _winreg as winreg HKLM = winreg.HKEY_LOCAL_MACHINE subkey = r"SOFTWARE\Microsoft\MPI" with winreg.OpenKey(HKLM, subkey) as key: for i in range(winreg.QueryInfoKey(key)[1]): name, value, type = winreg.EnumValue(key, i) if name != "Version": continue major, minor = value.split('.')[:2] return (int(major), int(minor)) except Exception: # noqa: S110 pass MSMPI_VER = os.environ.get('MSMPI_VER') if MSMPI_VER: try: major, minor = MSMPI_VER.split('.')[:2] return (int(major), int(minor)) except Exception: raise RuntimeError( "invalid environment: MSMPI_VER="+MSMPI_VER ) return None def setup_msmpi(MSMPI_INC, MSMPI_LIB): from os.path import join, isfile ok = ( MSMPI_INC and isfile(join(MSMPI_INC, 'mpi.h')) and MSMPI_LIB and isfile(join(MSMPI_LIB, 'msmpi.lib')) ) if not ok: return False version = msmpi_ver() if version is not None: major, minor = version MSMPI_VER = hex((major<<8)|(minor&0xFF)) self.library_info.update( define_macros=[('MSMPI_VER', MSMPI_VER)], ) MSMPI_INC = os.path.normpath(MSMPI_INC) MSMPI_LIB = os.path.normpath(MSMPI_LIB) self.library_info.update( include_dirs=[MSMPI_INC], library_dirs=[MSMPI_LIB], libraries=['msmpi'], ) self.section = 'msmpi' self.filename = [os.path.dirname(MSMPI_INC)] return True arch = platform.architecture()[0][:2] # Look for Microsoft MPI in the environment MSMPI_INC = os.environ.get('MSMPI_INC') MSMPI_LIB = os.environ.get('MSMPI_LIB'+arch) MSMPI_LIB = MSMPI_LIB or os.environ.get('MSMPI_LIB') if setup_msmpi(MSMPI_INC, MSMPI_LIB): return True # Look for Microsoft MPI v7/v6/v5 in default install path for ProgramFiles in ('ProgramFiles', 'ProgramFiles(x86)'): ProgramFiles = os.environ.get(ProgramFiles, '') archdir = {'32':'x86', '64':'x64'}[arch] MSMPI_DIR = os.path.join(ProgramFiles, 'Microsoft SDKs', 'MPI') MSMPI_INC = os.path.join(MSMPI_DIR, 'Include') MSMPI_LIB = os.path.join(MSMPI_DIR, 'Lib', archdir) if setup_msmpi(MSMPI_INC, MSMPI_LIB): return True # Look for Microsoft HPC Pack 2012 R2 in default install path for ProgramFiles in ('ProgramFiles', 'ProgramFiles(x86)'): ProgramFiles = os.environ.get(ProgramFiles, '') archdir = {'32':'i386', '64':'amd64'}[arch] MSMPI_DIR = os.path.join(ProgramFiles, 'Microsoft MPI') MSMPI_INC = os.path.join(MSMPI_DIR, 'Inc') MSMPI_LIB = os.path.join(MSMPI_DIR, 'Lib', archdir) if setup_msmpi(MSMPI_INC, MSMPI_LIB): return True # Microsoft MPI (legacy) and others ProgramFiles = os.environ.get('ProgramFiles', '') CCP_HOME = os.environ.get('CCP_HOME', '') for (name, prefix, suffix) in ( ('msmpi', CCP_HOME, ''), ('msmpi', ProgramFiles, 'Microsoft HPC Pack 2012 R2'), ('msmpi', ProgramFiles, 'Microsoft HPC Pack 2012'), ('msmpi', ProgramFiles, 'Microsoft HPC Pack 2012 SDK'), ('msmpi', ProgramFiles, 'Microsoft HPC Pack 2008 R2'), ('msmpi', ProgramFiles, 'Microsoft HPC Pack 2008'), ('msmpi', ProgramFiles, 'Microsoft HPC Pack 2008 SDK'), ): mpi_dir = os.path.join(prefix, suffix) if not mpi_dir or not os.path.isdir(mpi_dir): continue define_macros = [] include_dir = os.path.join(mpi_dir, 'include') library = 'mpi' library_dir = os.path.join(mpi_dir, 'lib') if name == 'msmpi': include_dir = os.path.join(mpi_dir, 'inc') library = 'msmpi' arch = platform.architecture()[0] if arch == '32bit': library_dir = os.path.join(library_dir, 'i386') if arch == '64bit': library_dir = os.path.join(library_dir, 'amd64') if not os.path.isdir(include_dir): include_dir = os.path.join(mpi_dir, 'include') self.library_info.update( define_macros=define_macros, include_dirs=[include_dir], libraries=[library], library_dirs=[library_dir], ) self.section = name self.filename = [mpi_dir] return True return None def setup_compiler_info(self, options, environ): def find_exe(cmd, path=None): if not cmd: return None parts = split_quoted(cmd) exe, args = parts[0], parts[1:] if not os.path.isabs(exe) and path: exe = os.path.basename(exe) exe = find_executable(exe, path) if not exe: return None return ' '.join([exe]+args) COMPILERS = ( ('mpicc', ['mpicc', 'mpcc_r']), ('mpicxx', ['mpicxx', 'mpic++', 'mpiCC', 'mpCC_r']), ('mpifort', ['mpifort', 'mpif90', 'mpif77', 'mpfort_r']), ('mpif90', ['mpif90', 'mpf90_r']), ('mpif77', ['mpif77', 'mpf77_r']), ('mpild', []), ) # compiler_info = {} PATH = environ.get('PATH', '') for name, _ in COMPILERS: cmd = (environ.get(name.upper()) or getattr(options, name, None) or self.compiler_info.get(name) or None) if cmd: exe = find_exe(cmd, path=PATH) if exe: path = os.path.dirname(exe) PATH = path + os.path.pathsep + PATH compiler_info[name] = exe else: self.log.error("error: '%s' not found", cmd) # if not self and not compiler_info: for name, candidates in COMPILERS: for cmd in candidates: cmd = find_exe(cmd) if cmd: compiler_info[name] = cmd break # self.compiler_info.update(compiler_info) def load(self, filename="mpi.cfg", section='mpi'): if isinstance(filename, str): filenames = filename.split(os.path.pathsep) else: filenames = list(filename) if isinstance(section, str): sections = section.split(',') else: sections = list(section) # try: parser = ConfigParser(dict_type=OrderedDict) except TypeError: parser = ConfigParser() try: read_ok = parser.read(filenames) except ConfigParserError: self.log.error( "error: parsing configuration file/s '%s'", os.path.pathsep.join(filenames)) return None for section in sections: if parser.has_section(section): break section = None if not section: self.log.error( "error: section/s '%s' not found in file/s '%s'", ','.join(sections), os.path.pathsep.join(filenames)) return None parser_items = list(parser.items(section, vars=None)) # compiler_info = type(self.compiler_info)() for option, value in parser_items: if option in self.compiler_info: compiler_info[option] = value # pathsep = os.path.pathsep expanduser = os.path.expanduser expandvars = os.path.expandvars library_info = type(self.library_info)() for k, v in parser_items: if k in ('define_macros', 'undef_macros', ): macros = [e.strip() for e in v.split(',')] if k == 'define_macros': for i, m in enumerate(macros): try: # -DFOO=bar idx = m.index('=') macro = (m[:idx], m[idx+1:] or None) except ValueError: # -DFOO macro = (m, None) macros[i] = macro library_info[k] = macros elif k in ('include_dirs', 'library_dirs', 'runtime_dirs', 'runtime_library_dirs', ): if k == 'runtime_dirs': k = 'runtime_library_dirs' pathlist = [p.strip() for p in v.split(pathsep)] library_info[k] = [expanduser(expandvars(p)) for p in pathlist if p] elif k == 'libraries': library_info[k] = [e.strip() for e in split_quoted(v)] elif k in ('extra_compile_args', 'extra_link_args', ): library_info[k] = split_quoted(v) elif k == 'extra_objects': library_info[k] = [expanduser(expandvars(e)) for e in split_quoted(v)] elif hasattr(self, k): library_info[k] = v.strip() else: pass # self.section = section self.filename = read_ok self.compiler_info.update(compiler_info) self.library_info.update(library_info) return compiler_info, library_info, section, read_ok def dump(self, filename=None, section='mpi'): # prepare configuration values compiler_info = self.compiler_info.copy() library_info = self.library_info.copy() for k in library_info: if k in ('define_macros', 'undef_macros', ): macros = library_info[k] if k == 'define_macros': for i, (m, v) in enumerate(macros): if v is None: macros[i] = m else: macros[i] = '%s=%s' % (m, v) library_info[k] = ','.join(macros) elif k in ('include_dirs', 'library_dirs', 'runtime_library_dirs', ): library_info[k] = os.path.pathsep.join(library_info[k]) elif isinstance(library_info[k], list): library_info[k] = ' '.join(library_info[k]) # fill configuration parser try: parser = ConfigParser(dict_type=OrderedDict) except TypeError: parser = ConfigParser() parser.add_section(section) for option, value in compiler_info.items(): if not value: continue parser.set(section, option, value) for option, value in library_info.items(): if not value: continue parser.set(section, option, value) # save configuration file if filename is None: parser.write(sys.stdout) elif hasattr(filename, 'write'): parser.write(filename) elif isinstance(filename, str): with open(filename, 'w') as f: parser.write(f) return parser if __name__ == '__main__': import optparse parser = optparse.OptionParser() parser.add_option("--mpi", type="string") parser.add_option("--mpicc", type="string") parser.add_option("--mpicxx", type="string") parser.add_option("--mpifort", type="string") parser.add_option("--mpif90", type="string") parser.add_option("--mpif77", type="string") parser.add_option("--mpild", type="string") (opts, args) = parser.parse_args() log = dulog.Log(dulog.INFO) cfg = Config(log) cfg.setup(opts) cfg.dump() mpi4py-3.1.6/conf/mpidistutils.py000066400000000000000000001733721460670727200170240ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """ Support for building mpi4py with distutils/setuptools. """ # ----------------------------------------------------------------------------- import sys, os, shlex, platform from distutils import sysconfig from distutils.util import convert_path from distutils.util import split_quoted from distutils.file_util import copy_file from distutils import log # Fix missing variables PyPy's distutils.sysconfig if hasattr(sys, 'pypy_version_info'): config_vars = sysconfig.get_config_vars() for name in ('prefix', 'exec_prefix'): if name not in config_vars: config_vars[name] = os.path.normpath(getattr(sys, name)) if sys.platform == 'darwin' and 'LDSHARED' in config_vars: ldshared = shlex.split(config_vars['LDSHARED']) while '-shared' in ldshared: ldshared[ldshared.index('-shared')] = '-bundle' if '-undefined' not in ldshared: ldshared.extend('-undefined dynamic_lookup'.split()) config_vars['LDSHARED'] = ' '.join(ldshared) # Workaround distutils.cygwinccompiler.get_versions() # failing when the compiler path contains spaces from distutils import cygwinccompiler as cygcc if hasattr(cygcc, 'get_versions'): cygcc_get_versions = cygcc.get_versions def get_versions(): import distutils.spawn find_executable_orig = distutils.spawn.find_executable def find_executable(exe): exe = find_executable_orig(exe) if exe and ' ' in exe: exe = '"' + exe + '"' return exe distutils.spawn.find_executable = find_executable versions = cygcc_get_versions() distutils.spawn.find_executable = find_executable_orig return versions cygcc.get_versions = get_versions # Workaround distutils.ccompiler.CCompiler._fix_lib_args from distutils.ccompiler import CCompiler cc_fix_compile_args_orig = getattr(CCompiler, '_fix_compile_args', None) cc_fix_lib_args_orig = getattr(CCompiler, '_fix_lib_args', None) def cc_fix_compile_args(self, out_dir, macros, inc_dirs): macros = macros or [] inc_dirs = inc_dirs or [] return cc_fix_compile_args_orig(self, out_dir, macros, inc_dirs) def cc_fix_lib_args(self, libs, lib_dirs, rt_lib_dirs): libs = libs or [] lib_dirs = lib_dirs or [] rt_lib_dirs = rt_lib_dirs or [] return cc_fix_lib_args_orig(self, libs, lib_dirs, rt_lib_dirs) CCompiler._fix_compile_args = cc_fix_compile_args CCompiler._fix_lib_args = cc_fix_lib_args # Normalize linker flags for runtime library dirs from distutils.unixccompiler import UnixCCompiler rpath_option_orig = UnixCCompiler.runtime_library_dir_option def rpath_option(compiler, dir): option = rpath_option_orig(compiler, dir) if not isinstance(option, str): return option if sys.platform.startswith('linux'): if option.startswith('-R'): option = option.replace('-R', '-Wl,-rpath,', 1) elif option.startswith('-Wl,-R,'): option = option.replace('-Wl,-R,', '-Wl,-rpath,', 1) return option UnixCCompiler.runtime_library_dir_option = rpath_option def _fix_env(cmd, i): while os.path.basename(cmd[i]) == 'env': i = i + 1 while '=' in cmd[i]: i = i + 1 return i def _fix_xcrun(cmd, i): if os.path.basename(cmd[i]) == 'xcrun': del cmd[i] while True: if cmd[i] == '-sdk': del cmd[i:i+2] continue if cmd[i] == '-log': del cmd[i] continue break return i def fix_compiler_cmd(cc, mpicc): if not mpicc: return i = 0 i = _fix_env(cc, i) i = _fix_xcrun(cc, i) while os.path.basename(cc[i]) == 'ccache': i = i + 1 cc[i:i+1] = split_quoted(mpicc) def fix_linker_cmd(ld, mpild): if not mpild: return i = 0 if (sys.platform.startswith('aix') and os.path.basename(ld[i]) == 'ld_so_aix'): i = 1 i = _fix_env(ld, i) i = _fix_xcrun(ld, i) while os.path.basename(ld[i]) == 'ccache': del ld[i] ld[i:i+1] = split_quoted(mpild) def customize_compiler( compiler, lang=None, mpicc=None, mpicxx=None, mpild=None, ): sysconfig.customize_compiler(compiler) if compiler.compiler_type == 'unix': ld = compiler.linker_exe for envvar in ('LDFLAGS', 'CFLAGS', 'CPPFLAGS'): if envvar in os.environ: ld += split_quoted(os.environ[envvar]) if os.environ.get('SOURCE_DATE_EPOCH') is not None: # Linker tweaks for reproducible build if sys.platform == 'darwin': os.environ['ZERO_AR_DATE'] = 'YES' if compiler.compiler_type == 'msvc': if not compiler.initialized: compiler.initialize() for flags in compiler._ldflags.values(): flags.append('/BREPRO') if compiler.compiler_type == 'unix': # Compiler command overriding if mpicc: fix_compiler_cmd(compiler.compiler, mpicc) if lang in ('c', None): fix_compiler_cmd(compiler.compiler_so, mpicc) if mpicxx: fix_compiler_cmd(compiler.compiler_cxx, mpicxx) if lang == 'c++': fix_compiler_cmd(compiler.compiler_so, mpicxx) if mpild: for ld in [compiler.linker_so, compiler.linker_exe]: fix_linker_cmd(ld, mpild) if compiler.compiler_type == 'cygwin': compiler.set_executables( preprocessor = 'gcc -mcygwin -E', ) if compiler.compiler_type == 'mingw32': compiler.set_executables( preprocessor = 'gcc -mno-cygwin -E', ) if compiler.compiler_type in ('unix', 'cygwin', 'mingw32'): badcxxflags = [ '-Wimplicit', '-Wstrict-prototypes'] for flag in badcxxflags: while flag in compiler.compiler_cxx: compiler.compiler_cxx.remove(flag) if lang == 'c++': while flag in compiler.compiler_so: compiler.compiler_so.remove(flag) if compiler.compiler_type == 'mingw32': # Remove msvcrXX.dll del compiler.dll_libraries[:] # https://bugs.python.org/issue12641 if compiler.gcc_version >= '4.4': for attr in ( 'preprocessor', 'compiler', 'compiler_cxx', 'compiler_so', 'linker_so', 'linker_exe', ): try: getattr(compiler, attr).remove('-mno-cygwin') except: pass # Add required define and compiler flags for AMD64 if platform.architecture()[0] == '64bit': for attr in ( 'preprocessor', 'compiler', 'compiler_cxx', 'compiler_so', 'linker_so', 'linker_exe', ): getattr(compiler, attr).insert(1, '-DMS_WIN64') getattr(compiler, attr).insert(1, '-m64') if compiler.compiler_type == 'msvc': if not compiler.initialized: compiler.initialize() compiler.ldflags_shared.append('/MANIFEST') compiler.ldflags_shared_debug.append('/MANIFEST') compile_options = (compiler.compile_options, compiler.compile_options_debug) from distutils.msvc9compiler import VERSION if VERSION < 10.0: for options in compile_options: options.append('/D_USE_DECLSPECS_FOR_SAL=0') options.append('/D_USE_ATTRIBUTES_FOR_SAL=0') options.append('/DMSMPI_NO_SAL') if VERSION <= 10.0: topdir = os.path.dirname(os.path.dirname(__file__)) srcdir = os.path.abspath(os.path.join(topdir, 'src')) header = os.path.join(srcdir, 'msvcfix.h') if os.path.exists(header): for options in compile_options: options.append('/FI%s' % header) # ----------------------------------------------------------------------------- from mpiconfig import Config def configuration(command_obj, verbose=True): config = Config() config.setup(command_obj) if verbose: if config.section and config.filename: log.info("MPI configuration: [%s] from '%s'", config.section, ','.join(config.filename)) config.info(log) return config def configure_compiler(compiler, config, lang=None): # mpicc = config.get('mpicc') mpicxx = config.get('mpicxx') mpild = config.get('mpild') if not mpild and (mpicc or mpicxx): if lang == 'c': mpild = mpicc if lang == 'c++': mpild = mpicxx if not mpild: mpild = mpicc or mpicxx # customize_compiler( compiler, lang, mpicc=mpicc, mpicxx=mpicxx, mpild=mpild, ) # for k, v in config.get('define_macros', []): compiler.define_macro(k, v) for v in config.get('undef_macros', []): compiler.undefine_macro(v) for v in config.get('include_dirs', []): compiler.add_include_dir(v) for v in config.get('libraries', []): compiler.add_library(v) for v in config.get('library_dirs', []): compiler.add_library_dir(v) for v in config.get('runtime_library_dirs', []): compiler.add_runtime_library_dir(v) for v in config.get('extra_objects', []): compiler.add_link_object(v) if compiler.compiler_type in ( 'unix', 'intel', 'cygwin', 'mingw32', ): cc_args = config.get('extra_compile_args', []) ld_args = config.get('extra_link_args', []) compiler.compiler += cc_args compiler.compiler_so += cc_args compiler.compiler_cxx += cc_args compiler.linker_so += ld_args compiler.linker_exe += ld_args return compiler # ----------------------------------------------------------------------------- try: from mpiscanner import Scanner except ImportError: class Scanner(object): def parse_file(self, *args): raise NotImplementedError( "You forgot to grab 'mpiscanner.py'") class ConfigureMPI(object): SRCDIR = 'src' SOURCES = [os.path.join('mpi4py', 'libmpi.pxd')] DESTDIR = os.path.join('src', 'lib-mpi') CONFIG_H = os.path.join('config', 'config.h') MISSING_H = 'missing.h' CONFIGTEST_H = """\ /* _configtest.h */ #if !defined(MPIAPI) # define MPIAPI #endif """ def __init__(self, config_cmd): self.scanner = Scanner() for filename in self.SOURCES: fullname = os.path.join(self.SRCDIR, filename) self.scanner.parse_file(fullname) self.config_cmd = config_cmd def run(self): results = [] with open('_configtest.h', 'w') as f: f.write(self.CONFIGTEST_H) for node in self.scanner: name = node.name testcode = node.config() confcode = node.missing(guard=False) log.info("checking for '%s' ..." % name) ok = self.run_test(testcode) if not ok: log.info("**** failed check for '%s'" % name) with open('_configtest.h', 'a') as f: f.write(confcode) results.append((name, ok)) try: os.remove('_configtest.h') except OSError: pass return results def gen_test(self, code): body = ['#include "_configtest.h"', 'int main(int argc, char **argv) {', '\n'.join([' ' + line for line in code.split('\n')]), ' (void)argc; (void)argv;', ' return 0;', '}'] body = '\n'.join(body) + '\n' return body def run_test(self, code, lang='c'): level = log.set_threshold(log.WARN) log.set_threshold(level) if not self.config_cmd.noisy: level = log.set_threshold(log.WARN) try: body = self.gen_test(code) headers = ['stdlib.h', 'mpi.h'] ok = self.config_cmd.try_link(body, headers=headers, lang=lang) return ok finally: log.set_threshold(level) def dump(self, results): destdir = self.DESTDIR config_h = os.path.join(destdir, self.CONFIG_H) missing_h = os.path.join(destdir, self.MISSING_H) log.info("writing '%s'", config_h) self.scanner.dump_config_h(config_h, results) log.info("writing '%s'", missing_h) self.scanner.dump_missing_h(missing_h, None) # ----------------------------------------------------------------------------- cmd_mpi_opts = [ ('mpild=', None, "MPI linker command, " "overridden by environment variable 'MPILD' " "(defaults to 'mpicc' or 'mpicxx' if any is available)"), ('mpif77=', None, "MPI F77 compiler command, " "overridden by environment variable 'MPIF77' " "(defaults to 'mpif77' if available)"), ('mpif90=', None, "MPI F90 compiler command, " "overridden by environment variable 'MPIF90' " "(defaults to 'mpif90' if available)"), ('mpifort=', None, "MPI Fortran compiler command, " "overridden by environment variable 'MPIFORT' " "(defaults to 'mpifort' if available)"), ('mpicxx=', None, "MPI C++ compiler command, " "overridden by environment variable 'MPICXX' " "(defaults to 'mpicxx', 'mpiCC', or 'mpic++' if any is available)"), ('mpicc=', None, "MPI C compiler command, " "overridden by environment variables 'MPICC' " "(defaults to 'mpicc' if available)"), ('mpi=', None, "specify a configuration section, " "and an optional list of configuration files " + "(e.g. --mpi=section,file1" + os.path.pathsep + "file2), " + "to look for MPI includes/libraries, " "overridden by environment variable 'MPICFG' " "(defaults to section 'mpi' in configuration file 'mpi.cfg')"), ('configure', None, "exhaustive test for checking missing MPI constants/types/functions"), ] def cmd_get_mpi_options(cmd_opts): optlist = [] for (option, _, _) in cmd_opts: if option[-1] == '=': option = option[:-1] option = option.replace('-','_') optlist.append(option) return optlist def cmd_initialize_mpi_options(cmd): mpiopts = cmd_get_mpi_options(cmd_mpi_opts) for op in mpiopts: setattr(cmd, op, None) def cmd_set_undefined_mpi_options(cmd, basecmd): mpiopts = cmd_get_mpi_options(cmd_mpi_opts) optlist = tuple(zip(mpiopts, mpiopts)) cmd.set_undefined_options(basecmd, *optlist) # ----------------------------------------------------------------------------- try: import setuptools except ImportError: setuptools = None def import_command(cmd): try: from importlib import import_module except ImportError: import_module = lambda n: __import__(n, fromlist=[None]) try: if not setuptools: raise ImportError return import_module('setuptools.command.' + cmd) except ImportError: return import_module('distutils.command.' + cmd) if setuptools: from setuptools import Distribution as cls_Distribution from setuptools import Extension as cls_Extension from setuptools import Command else: from distutils.core import Distribution as cls_Distribution from distutils.core import Extension as cls_Extension from distutils.core import Command cmd_config = import_command('config') cmd_build = import_command('build') cmd_install = import_command('install') cmd_sdist = import_command('sdist') cmd_clean = import_command('clean') cmd_build_py = import_command('build_py') cmd_build_clib = import_command('build_clib') cmd_build_ext = import_command('build_ext') cmd_install_lib = import_command('install_lib') cmd_install_data = import_command('install_data') from distutils.errors import DistutilsError from distutils.errors import DistutilsSetupError from distutils.errors import DistutilsPlatformError from distutils.errors import DistutilsOptionError from distutils.errors import CCompilerError # ----------------------------------------------------------------------------- # Distribution class supporting a 'executables' keyword class Distribution(cls_Distribution): def __init__ (self, attrs=None): # support for pkg data self.package_data = {} # PEP 314 self.provides = None self.requires = None self.obsoletes = None # supports 'executables' keyword self.executables = None cls_Distribution.__init__(self, attrs) def has_executables(self): return self.executables and len(self.executables) > 0 def is_pure (self): return (cls_Distribution.is_pure(self) and not self.has_executables()) # Extension class class Extension(cls_Extension): def __init__ (self, **kw): optional = kw.pop('optional', None) configure = kw.pop('configure', None) cls_Extension.__init__(self, **kw) self.optional = optional self.configure = configure # Library class class Library(Extension): def __init__ (self, **kw): kind = kw.pop('kind', "static") package = kw.pop('package', None) dest_dir = kw.pop('dest_dir', None) Extension.__init__(self, **kw) self.kind = kind self.package = package self.dest_dir = dest_dir # Executable class class Executable(Extension): def __init__ (self, **kw): package = kw.pop('package', None) dest_dir = kw.pop('dest_dir', None) Extension.__init__(self, **kw) self.package = package self.dest_dir = dest_dir # setup function def setup(**attrs): if setuptools: from setuptools import setup as fcn_setup else: from distutils.core import setup as fcn_setup if 'distclass' not in attrs: attrs['distclass'] = Distribution if 'cmdclass' not in attrs: attrs['cmdclass'] = {} cmdclass = attrs['cmdclass'] for cmd in ( config, build, install, clean, test, sdist, build_py, build_clib, build_src, build_ext, build_exe, install_lib, install_data, install_exe, ): if cmd.__name__ not in cmdclass: cmdclass[cmd.__name__] = cmd return fcn_setup(**attrs) # ----------------------------------------------------------------------------- # A minimalistic MPI program :-) ConfigTest = """\ int main(int argc, char **argv) { int ierr; (void)argc; (void)argv; ierr = MPI_Init(&argc, &argv); if (ierr) return -1; ierr = MPI_Finalize(); if (ierr) return -1; return 0; } """ class config(cmd_config.config): user_options = cmd_config.config.user_options + cmd_mpi_opts def initialize_options(self): cmd_config.config.initialize_options(self) cmd_initialize_mpi_options(self) self.noisy = 0 def finalize_options(self): cmd_config.config.finalize_options(self) if not self.noisy: self.dump_source = 0 def _clean(self, *a, **kw): if sys.platform.startswith('win'): for fn in ('_configtest.exe.manifest', ): if os.path.exists(fn): self.temp_files.append(fn) cmd_config.config._clean(self, *a, **kw) def check_header (self, header, headers=None, include_dirs=None): if headers is None: headers = [] log.info("checking for header '%s' ..." % header) body = "int main(int n, char**v) { (void)n; (void)v; return 0; }" ok = self.try_compile(body, list(headers) + [header], include_dirs) log.info(ok and 'success!' or 'failure.') return ok def check_macro (self, macro, headers=None, include_dirs=None): log.info("checking for macro '%s' ..." % macro) body = ("#ifndef %s\n" "#error macro '%s' not defined\n" "#endif\n") % (macro, macro) body += "int main(int n, char**v) { (void)n; (void)v; return 0; }" ok = self.try_compile(body, headers, include_dirs) return ok def check_library (self, library, library_dirs=None, headers=None, include_dirs=None, other_libraries=[], lang="c"): if sys.platform == "darwin": self.compiler.linker_exe.append('-flat_namespace') self.compiler.linker_exe.append('-undefined') self.compiler.linker_exe.append('suppress') log.info("checking for library '%s' ..." % library) body = "int main(int n, char**v) { (void)n; (void)v; return 0; }" ok = self.try_link(body, headers, include_dirs, [library]+other_libraries, library_dirs, lang=lang) if sys.platform == "darwin": self.compiler.linker_exe.remove('-flat_namespace') self.compiler.linker_exe.remove('-undefined') self.compiler.linker_exe.remove('suppress') return ok def check_function (self, function, headers=None, include_dirs=None, libraries=None, library_dirs=None, decl=0, call=0, lang="c"): log.info("checking for function '%s' ..." % function) body = [] if decl: if call: proto = "int %s (void);" else: proto = "int %s;" if lang == "c": proto = "\n".join([ "#ifdef __cplusplus", "extern \"C\"", "#endif", proto]) body.append(proto % function) body.append( "int main (int n, char**v) {") if call: body.append(" (void)%s();" % function) else: body.append(" %s;" % function) body.append( " (void)n; (void)v;") body.append( " return 0;") body.append( "}") body = "\n".join(body) + "\n" ok = self.try_link(body, headers, include_dirs, libraries, library_dirs, lang=lang) return ok def check_symbol (self, symbol, type="int", headers=None, include_dirs=None, libraries=None, library_dirs=None, decl=0, lang="c"): log.info("checking for symbol '%s' ..." % symbol) body = [] if decl: body.append("%s %s;" % (type, symbol)) body.append("int main (int n, char**v) {") body.append(" %s s; s = %s; (void)s;" % (type, symbol)) body.append(" (void)n; (void)v;") body.append(" return 0;") body.append("}") body = "\n".join(body) + "\n" ok = self.try_link(body, headers, include_dirs, libraries, library_dirs, lang=lang) return ok def check_function_call (self, function, args='', headers=None, include_dirs=None, libraries=None, library_dirs=None, lang="c"): log.info("checking for function '%s' ..." % function) body = [] body.append("int main (int n, char**v) {") body.append(" (void)%s(%s);" % (function, args)) body.append(" (void)n; (void)v;") body.append(" return 0;") body.append("}") body = "\n".join(body) + "\n" ok = self.try_link(body, headers, include_dirs, libraries, library_dirs, lang=lang) return ok check_hdr = check_header check_lib = check_library check_func = check_function check_sym = check_symbol def run (self): # config = configuration(self, verbose=True) # test MPI C compiler self.compiler = getattr( self.compiler, 'compiler_type', self.compiler) self._check_compiler() configure_compiler(self.compiler, config, lang='c') self.try_link(ConfigTest, headers=['mpi.h'], lang='c') # test MPI C++ compiler self.compiler = getattr( self.compiler, 'compiler_type', self.compiler) self._check_compiler() configure_compiler(self.compiler, config, lang='c++') self.try_link(ConfigTest, headers=['mpi.h'], lang='c++') class build(cmd_build.build): user_options = cmd_build.build.user_options + cmd_mpi_opts boolean_options = cmd_build.build.boolean_options user_options += [( 'inplace', 'i', "ignore build-lib and put compiled extensions into the source " "directory alongside your pure Python modules", )] boolean_options += ['inplace'] def initialize_options(self): cmd_build.build.initialize_options(self) cmd_initialize_mpi_options(self) self.inplace = None def finalize_options(self): cmd_build.build.finalize_options(self) config_cmd = self.get_finalized_command('config') if isinstance(config_cmd, config): cmd_set_undefined_mpi_options(self, 'config') if self.inplace is None: self.inplace = False def has_executables (self): return self.distribution.has_executables() sub_commands = \ [('build_src', lambda *args: True)] + \ cmd_build.build.sub_commands + \ [('build_exe', has_executables)] # XXX disable build_exe subcommand !!! del sub_commands[-1] class build_src(Command): description = "build C sources from Cython files" user_options = [ ('force', 'f', "forcibly build everything (ignore file timestamps)"), ] boolean_options = ['force'] def initialize_options(self): self.force = False def finalize_options(self): self.set_undefined_options('build', ('force', 'force'), ) def run(self): pass if sys.version_info[0] >= 3: build_py = cmd_build_py.build_py else: def get_fixers_3to2(): import re import lib3to2 try: loader = lib3to2.__loader__ files = loader._files.keys() except AttributeError: from lib3to2.build import refactor return refactor.get_fixers_from_package('lib3to2.fixes') fixers = [] pattern = re.compile(r'lib3to2/fixes/(fix_.*)\.py$') for f in files: m = pattern.match(f) if m: fixer = 'lib3to2.fixes.' + m.groups()[0] fixers.append(fixer) return fixers def get_fixers_extra(): from lib2to3 import fixer_base from lib2to3.fixer_util import Name class FixModSpecAttr(fixer_base.BaseFix): PATTERN = """ power< name='__spec__' trailer< '.' attr=('parent') > > """ MAP = { 'parent': '__package__', } def transform(self, node, results): name = results["name"] attr = results["attr"] newval = self.MAP[attr.value] node.replace(Name(newval, prefix=name.prefix)) import sys import lib3to2.fixes modname = lib3to2.fixes.__name__ + '.fix_mod_spec_attr' module = type(sys)(modname) module.FixModSpecAttr = FixModSpecAttr setattr(lib3to2.fixes, modname, module) sys.modules[modname] = module return [modname] def run_3to2(files, fixer_names=None, options=None, explicit=None, unwanted=None): from lib3to2.fixes import fix_imports from lib3to2.build import DistutilsRefactoringTool if not files: return if fixer_names is None: fixer_names = get_fixers_3to2() for fixer in get_fixers_extra(): fixer_names.append(fixer) if unwanted is not None: fixer_names = list(fixer_names) for fixer in unwanted: if fixer in fixer_names: fixer_names.remove(fixer) r = DistutilsRefactoringTool( fixer_names, options=options, explicit=explicit) try: fix_imports.MAPPING['queue'] = 'queue' r.refactor(files, write=True) finally: fix_imports.MAPPING['queue'] = 'Queue' class Mixin3to2: fixer_names = None options = None explicit = [ 'lib3to2.fixes.fix_printfunction', ] unwanted = [ 'lib3to2.fixes.fix_str', 'lib3to2.fixes.fix_with', 'lib3to2.fixes.fix_print', 'lib3to2.fixes.fix_except', 'lib3to2.fixes.fix_absimport', ] def run_3to2(self, files): return run_3to2(files, self.fixer_names, self.options, self.explicit, self.unwanted) class build_py(cmd_build_py.build_py, Mixin3to2): def run(self): self.updated_files = [] # build_py code if self.py_modules: self.build_modules() if self.packages: self.build_packages() self.build_package_data() # 3to2 self.run_3to2(self.updated_files) # build_py code self.byte_compile(self.get_outputs(include_bytecode=0)) def build_module(self, module, module_file, package): super_build_module = cmd_build_py.build_py.build_module ret = super_build_module(self, module, module_file, package) if ret[1]: self.updated_files.append(ret[0]) return ret # Command class to build libraries class build_clib(cmd_build_clib.build_clib): user_options = [ ('build-clib-a=', 's', "directory to build C/C++ static libraries to"), ('build-clib-so=', 's', "directory to build C/C++ shared libraries to"), ] user_options += cmd_build_clib.build_clib.user_options + cmd_mpi_opts def initialize_options (self): self.libraries = None self.libraries_a = [] self.libraries_so = [] self.library_dirs = None self.rpath = None self.link_objects = None self.build_lib = None self.build_clib_a = None self.build_clib_so = None cmd_build_clib.build_clib.initialize_options(self) cmd_initialize_mpi_options(self) def finalize_options (self): cmd_build_clib.build_clib.finalize_options(self) build_cmd = self.get_finalized_command('build') if isinstance(build_cmd, build): cmd_set_undefined_mpi_options(self, 'build') # self.set_undefined_options('build', ('build_lib', 'build_lib'), ('build_lib', 'build_clib_a'), ('build_lib', 'build_clib_so')) # if self.libraries: libraries = self.libraries[:] self.libraries = [] self.check_library_list (libraries) for i, lib in enumerate(libraries): if isinstance(lib, Library): if lib.kind == "static": self.libraries_a.append(lib) else: self.libraries_so.append(lib) else: self.libraries.append(lib) def check_library_list (self, libraries): ListType, TupleType = type([]), type(()) if not isinstance(libraries, ListType): raise DistutilsSetupError( "'libraries' option must be a list of " "Library instances or 2-tuples") for lib in libraries: # if isinstance(lib, Library): lib_name = lib.name build_info = lib.__dict__ elif isinstance(lib, TupleType) and len(lib) == 2: lib_name, build_info = lib else: raise DistutilsSetupError( "each element of 'libraries' option must be an " "Library instance or 2-tuple") # if not isinstance(lib_name, str): raise DistutilsSetupError( "first element of each tuple in 'libraries' " "must be a string (the library name)") if '/' in lib_name or (os.sep != '/' and os.sep in lib_name): raise DistutilsSetupError( "bad library name '%s': " "may not contain directory separators" % lib[0]) if not isinstance(build_info, dict): raise DistutilsSetupError( "second element of each tuple in 'libraries' " "must be a dictionary (build info)") lib_type = build_info.get('kind', 'static') if lib_type not in ('static', 'shared', 'dylib'): raise DistutilsSetupError( "in 'kind' option (library '%s'), " "'kind' must be one of " " \"static\", \"shared\", \"dylib\"" % lib_name) sources = build_info.get('sources') if (sources is None or type(sources) not in (ListType, TupleType)): raise DistutilsSetupError( "in 'libraries' option (library '%s'), " "'sources' must be present and must be " "a list of source filenames" % lib_name) depends = build_info.get('depends') if (depends is not None and type(depends) not in (ListType, TupleType)): raise DistutilsSetupError( "in 'libraries' option (library '%s'), " "'depends' must be a list " "of source filenames" % lib_name) def run (self): cmd_build_clib.build_clib.run(self) if (not self.libraries_a and not self.libraries_so): return # from distutils.ccompiler import new_compiler self.compiler = new_compiler(compiler=self.compiler, dry_run=self.dry_run, force=self.force) # if self.define is not None: for (name, value) in self.define: self.compiler.define_macro(name, value) if self.undef is not None: for macro in self.undef: self.compiler.undefine_macro(macro) if self.include_dirs is not None: self.compiler.set_include_dirs(self.include_dirs) if self.library_dirs is not None: self.compiler.set_library_dirs(self.library_dirs) if self.rpath is not None: self.compiler.set_runtime_library_dirs(self.rpath) if self.link_objects is not None: self.compiler.set_link_objects(self.link_objects) # config = configuration(self, verbose=True) configure_compiler(self.compiler, config) if self.compiler.compiler_type == "unix": try: del self.compiler.shared_lib_extension except: pass # self.build_libraries(self.libraries) self.build_libraries(self.libraries_a) self.build_libraries(self.libraries_so) def build_libraries (self, libraries): for lib in libraries: # old-style if not isinstance(lib, Library): cmd_build_clib.build_clib.build_libraries(self, [lib]) continue # new-style try: self.build_library(lib) except (DistutilsError, CCompilerError): if not lib.optional: raise e = sys.exc_info()[1] self.warn('%s' % e) self.warn('building optional library "%s" failed' % lib.name) def config_library (self, lib): if lib.configure: config_cmd = self.get_finalized_command('config') config_cmd.compiler = self.compiler # fix compiler return lib.configure(lib, config_cmd) def build_library(self, lib): from distutils.dep_util import newer_group sources = [convert_path(p) for p in lib.sources] depends = [convert_path(p) for p in lib.depends] depends = sources + depends if lib.kind == "static": build_dir = self.build_clib_a else: build_dir = self.build_clib_so lib_fullpath = self.get_lib_fullpath(lib, build_dir) if not (self.force or newer_group(depends, lib_fullpath, 'newer')): log.debug("skipping '%s' %s library (up-to-date)", lib.name, lib.kind) return ok = self.config_library(lib) log.info("building '%s' %s library", lib.name, lib.kind) # First, compile the source code to object files in the library # directory. (This should probably change to putting object # files in a temporary build directory.) macros = lib.define_macros[:] for undef in lib.undef_macros: macros.append((undef,)) objects = self.compiler.compile( sources, depends=lib.depends, output_dir=self.build_temp, macros=macros, include_dirs=lib.include_dirs, extra_preargs=None, extra_postargs=lib.extra_compile_args, debug=self.debug, ) if lib.kind == "static": # Now "link" the object files together # into a static library. self.compiler.create_static_lib( objects, lib.name, output_dir=os.path.dirname(lib_fullpath), debug=self.debug, ) else: extra_objects = lib.extra_objects[:] export_symbols = lib.export_symbols[:] extra_link_args = lib.extra_link_args[:] extra_preargs = None objects.extend(extra_objects) if (self.compiler.compiler_type == 'msvc' and export_symbols is not None): output_dir = os.path.dirname(lib_fullpath) implib_filename = self.compiler.library_filename(lib.name) implib_file = os.path.join(output_dir, lib_fullpath) extra_link_args.append ('/IMPLIB:' + implib_file) # Detect target language, if not provided src_language = self.compiler.detect_language(sources) language = (lib.language or src_language) # Now "link" the object files together # into a shared library. if sys.platform == 'darwin': linker_so = self.compiler.linker_so[:] while '-bundle' in self.compiler.linker_so: pos = self.compiler.linker_so.index('-bundle') self.compiler.linker_so[pos] = '-shared' install_name = os.path.basename(lib_fullpath) extra_preargs = ['-install_name', install_name] if sys.platform.startswith('linux'): extra_preargs = ['-Wl,--no-as-needed'] self.compiler.link( self.compiler.SHARED_LIBRARY, objects, lib_fullpath, # libraries=lib.libraries, library_dirs=lib.library_dirs, runtime_library_dirs=lib.runtime_library_dirs, export_symbols=export_symbols, extra_preargs=extra_preargs, extra_postargs=extra_link_args, debug=self.debug, target_lang=language, ) if sys.platform == 'darwin': self.compiler.linker_so = linker_so return def get_lib_fullpath (self, lib, build_dir): package_dir = (lib.package or '').split('.') dest_dir = convert_path(lib.dest_dir or '') output_dir = os.path.join(build_dir, *package_dir+[dest_dir]) lib_type = lib.kind if sys.platform != 'darwin': if lib_type == 'dylib': lib_type = 'shared' lib_fullpath = self.compiler.library_filename( lib.name, lib_type=lib_type, output_dir=output_dir) return lib_fullpath def get_source_files (self): filenames = cmd_build_clib.build_clib.get_source_files(self) self.check_library_list(self.libraries) self.check_library_list(self.libraries_a) self.check_library_list(self.libraries_so) for (lib_name, build_info) in self.libraries: filenames.extend(build_info.get(sources, [])) for lib in self.libraries_so + self.libraries_a: filenames.extend(lib.sources) return filenames def get_outputs (self): outputs = [] for lib in self.libraries_a: lib_fullpath = self.get_lib_fullpath(lib, self.build_clib_a) outputs.append(lib_fullpath) for lib in self.libraries_so: lib_fullpath = self.get_lib_fullpath(lib, self.build_clib_so) outputs.append(lib_fullpath) return outputs # Command class to build extension modules class build_ext(cmd_build_ext.build_ext): user_options = cmd_build_ext.build_ext.user_options + cmd_mpi_opts def initialize_options(self): cmd_build_ext.build_ext.initialize_options(self) cmd_initialize_mpi_options(self) self.inplace = None def finalize_options(self): self.set_undefined_options('build', ('inplace', 'inplace')) cmd_build_ext.build_ext.finalize_options(self) build_cmd = self.get_finalized_command('build') if isinstance(build_cmd, build): cmd_set_undefined_mpi_options(self, 'build') # if ((sys.platform.startswith('linux') or sys.platform.startswith('gnu') or sys.platform.startswith('sunos')) and sysconfig.get_config_var('Py_ENABLE_SHARED')): # Remove /lib[64]/pythonX.Y/config libdir = os.path.dirname(sysconfig.get_makefile_filename()) if libdir in self.library_dirs: self.library_dirs.remove(bad_libdir) # Add /lib[64] libdir = sysconfig.get_config_var("LIBDIR") if libdir not in self.library_dirs: self.library_dirs.append(libdir) if libdir not in self.rpath: self.rpath.append(libdir) # Special-case if sys.exec_prefix == '/usr': self.library_dirs.remove(libdir) self.rpath.remove(libdir) def run (self): if self.distribution.has_c_libraries(): build_clib = self.get_finalized_command('build_clib') if build_clib.libraries: build_clib.run() cmd_build_ext.build_ext.run(self) def build_extensions(self): from copy import deepcopy # First, sanity-check the 'extensions' list self.check_extensions_list(self.extensions) # customize compiler self.compiler_sys = deepcopy(self.compiler) customize_compiler(self.compiler_sys) # parse configuration file and configure compiler self.compiler_mpi = self.compiler self.config = configuration(self, verbose=True) configure_compiler(self.compiler, self.config) # extra configuration, check for all MPI symbols if self.configure: log.info('testing for missing MPI symbols') config_cmd = self.get_finalized_command('config') config_cmd.compiler = self.compiler # fix compiler configure = ConfigureMPI(config_cmd) results = configure.run() configure.dump(results) # macro = 'HAVE_CONFIG_H' log.info("defining preprocessor macro '%s'" % macro) self.compiler.define_macro(macro, 1) # build extensions for ext in self.extensions: try: self.build_extension(ext) except (DistutilsError, CCompilerError): if not ext.optional: raise e = sys.exc_info()[1] self.warn('%s' % e) exe = isinstance(ext, Executable) knd = 'executable' if exe else 'extension' self.warn('building optional %s "%s" failed' % (knd, ext.name)) def config_extension (self, ext): configure = getattr(ext, 'configure', None) if configure: config_cmd = self.get_finalized_command('config') config_cmd.compiler = self.compiler # fix compiler configure(ext, config_cmd) def build_extension (self, ext): from distutils.dep_util import newer_group fullname = self.get_ext_fullname(ext.name) filename = os.path.join( self.build_lib, self.get_ext_filename(fullname)) depends = ext.sources + ext.depends if not (self.force or newer_group(depends, filename, 'newer')): log.debug("skipping '%s' extension (up-to-date)", ext.name) return # # XXX -- this is a Vile HACK! self.compiler = self.compiler_mpi if ext.name == 'mpi4py.dl': self.compiler = self.compiler_sys # self.config_extension(ext) cmd_build_ext.build_ext.build_extension(self, ext) # # XXX -- this is a Vile HACK! if ext.name == 'mpi4py.MPI': dest_dir = os.path.dirname(filename) self.mkpath(dest_dir) mpi_cfg = os.path.join(dest_dir, 'mpi.cfg') log.info("writing %s" % mpi_cfg) if not self.dry_run: self.config.dump(filename=mpi_cfg) # if ext.name == 'mpi4py.MPI' and sys.platform == 'win32': confdir = os.path.dirname(__file__) topdir = os.path.dirname(confdir) srcdir = os.path.join(topdir, 'src') pthfile = 'mpi.pth' source = os.path.join(srcdir, pthfile) target = os.path.join(self.build_lib, pthfile) if os.path.exists(source): log.info("writing %s", target) copy_file( source, target, verbose=False, dry_run=self.dry_run, ) def copy_extensions_to_source(self): build_py = self.get_finalized_command('build_py') cmd_build_ext.build_ext.copy_extensions_to_source(self) for ext in self.extensions: if ext.name == 'mpi4py.MPI': fullname = self.get_ext_fullname(ext.name) filename = self.get_ext_filename(fullname) dirname = os.path.dirname(filename) dest_dir = os.path.join(self.build_lib, dirname) regular_file = os.path.join(dest_dir, 'mpi.cfg') package = fullname.rpartition('.')[0] package_dir = build_py.get_package_dir(package) inplace_file = os.path.join(package_dir, 'mpi.cfg') self.copy_file(regular_file, inplace_file, level=self.verbose) def get_outputs(self): outputs = cmd_build_ext.build_ext.get_outputs(self) for ext in self.extensions: # XXX -- this is a Vile HACK! if ext.name == 'mpi4py.MPI': fullname = self.get_ext_fullname(ext.name) filename = self.get_ext_filename(fullname) dirname = os.path.dirname(filename) dest_dir = os.path.join(self.build_lib, dirname) output_file = os.path.join(dest_dir, 'mpi.cfg') outputs.append(output_file) if ext.name == 'mpi4py.MPI' and sys.platform == 'win32': pthfile = 'mpi.pth' output_file = os.path.join(self.build_lib, pthfile) outputs.append(output_file) return outputs # Command class to build executables class build_exe(build_ext): description = "build binary executable components" user_options = [ ('build-exe=', None, "build directory for executable components"), ] + build_ext.user_options def initialize_options (self): build_ext.initialize_options(self) self.build_base = None self.build_exe = None self.inplace = None def finalize_options (self): build_ext.finalize_options(self) self.configure = None self.set_undefined_options('build', ('build_base','build_base'), ('build_lib', 'build_exe')) self.set_undefined_options('build_ext', ('inplace', 'inplace')) self.executables = self.distribution.executables # XXX This is a hack self.extensions = self.distribution.executables self.get_ext_filename = self.get_exe_filename self.check_extensions_list = self.check_executables_list self.build_extension = self.build_executable self.copy_extensions_to_source = self.copy_executables_to_source self.build_lib = self.build_exe def get_exe_filename(self, exe_name): exe_ext = sysconfig.get_config_var('EXE') or '' return exe_name + exe_ext def check_executables_list (self, executables): ListType, TupleType = type([]), type(()) if type(executables) is not ListType: raise DistutilsSetupError( "'executables' option must be a list of Executable instances") for exe in executables: if not isinstance(exe, Executable): raise DistutilsSetupError( "'executables' items must be Executable instances") if (exe.sources is None or type(exe.sources) not in (ListType, TupleType)): raise DistutilsSetupError( ("in 'executables' option (executable '%s'), " + "'sources' must be present and must be " + "a list of source filenames") % exe.name) def get_exe_fullpath(self, exe, build_dir=None): build_dir = build_dir or self.build_exe package_dir = (exe.package or '').split('.') dest_dir = convert_path(exe.dest_dir or '') output_dir = os.path.join(build_dir, *package_dir+[dest_dir]) exe_filename = self.get_exe_filename(exe.name) return os.path.join(output_dir, exe_filename) def config_executable (self, exe): build_ext.config_extension(self, exe) def build_executable (self, exe): from distutils.dep_util import newer_group sources = list(exe.sources) depends = list(exe.depends) exe_fullpath = self.get_exe_fullpath(exe) depends = sources + depends if not (self.force or newer_group(depends, exe_fullpath, 'newer')): log.debug("skipping '%s' executable (up-to-date)", exe.name) return self.config_executable(exe) log.info("building '%s' executable", exe.name) # Next, compile the source code to object files. # XXX not honouring 'define_macros' or 'undef_macros' -- the # CCompiler API needs to change to accommodate this, and I # want to do one thing at a time! macros = exe.define_macros[:] for undef in exe.undef_macros: macros.append((undef,)) # Two possible sources for extra compiler arguments: # - 'extra_compile_args' in Extension object # - CFLAGS environment variable (not particularly # elegant, but people seem to expect it and I # guess it's useful) # The environment variable should take precedence, and # any sensible compiler will give precedence to later # command line args. Hence we combine them in order: extra_args = exe.extra_compile_args[:] objects = self.compiler.compile( sources, output_dir=self.build_temp, macros=macros, include_dirs=exe.include_dirs, debug=self.debug, extra_postargs=extra_args, depends=exe.depends) self._built_objects = objects[:] # Now link the object files together into a "shared object" -- # of course, first we have to figure out all the other things # that go into the mix. if exe.extra_objects: objects.extend(exe.extra_objects) extra_args = exe.extra_link_args[:] # Get special linker flags for building a executable with # bundled Python library, also fix location of needed # python.exp file on AIX ldshflag = sysconfig.get_config_var('LINKFORSHARED') or '' ldshflag = ldshflag.replace('-Xlinker ', '-Wl,') if sys.platform == 'darwin': # fix wrong framework paths fwkprefix = sysconfig.get_config_var('PYTHONFRAMEWORKPREFIX') fwkdir = sysconfig.get_config_var('PYTHONFRAMEWORKDIR') if fwkprefix and fwkdir and fwkdir != 'no-framework': for flag in split_quoted(ldshflag): if flag.startswith(fwkdir): fwkpath = os.path.join(fwkprefix, flag) ldshflag = ldshflag.replace(flag, fwkpath) if sys.platform.startswith('aix'): python_lib = sysconfig.get_python_lib(standard_lib=1) python_exp = os.path.join(python_lib, 'config', 'python.exp') ldshflag = ldshflag.replace('Modules/python.exp', python_exp) # Detect target language, if not provided language = exe.language or self.compiler.detect_language(sources) self.compiler.link( self.compiler.EXECUTABLE, objects, exe_fullpath, output_dir=None, libraries=self.get_libraries(exe), library_dirs=exe.library_dirs, runtime_library_dirs=exe.runtime_library_dirs, extra_preargs=split_quoted(ldshflag), extra_postargs=extra_args, debug=self.debug, target_lang=language) def copy_executables_to_source(self): build_py = self.get_finalized_command('build_py') root_dir = build_py.get_package_dir('') for exe in self.executables: src = self.get_exe_fullpath(exe) dest = self.get_exe_fullpath(exe, root_dir) self.mkpath(os.path.dirname(dest)) copy_file( src, dest, verbose=self.verbose, dry_run=self.dry_run ) def get_outputs (self): outputs = [] for exe in self.executables: outputs.append(self.get_exe_fullpath(exe)) return outputs class install(cmd_install.install): def run(self): cmd_install.install.run(self) def has_lib (self): return (cmd_install.install.has_lib(self) and self.has_exe()) def has_exe (self): return self.distribution.has_executables() sub_commands = \ cmd_install.install.sub_commands[:] + \ [('install_exe', has_exe)] # XXX disable install_exe subcommand !!! del sub_commands[-1] class install_lib(cmd_install_lib.install_lib): def get_outputs(self): outputs = cmd_install_lib.install_lib.get_outputs(self) for (build_cmd, build_dir) in (('build_clib', 'build_lib'), ('build_exe', 'build_exe')): outs = self._mutate_outputs(1, build_cmd, build_dir, self.install_dir) build_cmd = self.get_finalized_command(build_cmd) build_files = build_cmd.get_outputs() for out in outs: if os.path.exists(out): outputs.append(out) return outputs class install_data(cmd_install_data.install_data): def finalize_options (self): self.set_undefined_options('install', ('install_lib', 'install_dir'), ('root', 'root'), ('force', 'force'), ) class install_exe(cmd_install_lib.install_lib): description = "install binary executable components" user_options = [ ('install-dir=', 'd', "directory to install to"), ('build-dir=','b', "build directory (where to install from)"), ('force', 'f', "force installation (overwrite existing files)"), ('skip-build', None, "skip the build steps"), ] boolean_options = ['force', 'skip-build'] negative_opt = { } def initialize_options (self): self.install_dir = None self.build_dir = None self.force = 0 self.skip_build = None def finalize_options (self): self.set_undefined_options('build_exe', ('build_exe', 'build_dir')) self.set_undefined_options('install', ('force', 'force'), ('skip_build', 'skip_build'), ('install_scripts', 'install_dir')) def run(self): self.build() self.install() def build (self): if not self.skip_build: if self.distribution.has_executables(): self.run_command('build_exe') def install (self): self.outfiles = [] if self.distribution.has_executables(): build_exe = self.get_finalized_command('build_exe') for exe in build_exe.executables: exe_fullpath = build_exe.get_exe_fullpath(exe) exe_filename = os.path.basename(exe_fullpath) if exe_filename.startswith("python-") and os.name == 'posix': x, y = sys.version_info[:2] install_name = exe_filename.replace( "python-", "python%s.%s-" % sys.version_info[:2]) link = None else: install_name = exe_filename link = None source = exe_fullpath target = os.path.join(self.install_dir, install_name) self.mkpath(self.install_dir) out, done = self.copy_file(source, target, link=link) self.outfiles.append(out) def get_outputs (self): return self.outfiles def get_inputs (self): inputs = [] if self.distribution.has_executables(): build_exe = self.get_finalized_command('build_exe') inputs.extend(build_exe.get_outputs()) return inputs class test(Command): description = "run the test suite" user_options = [ ('args=', 'a', "options"), ] def initialize_options(self): self.args = None def finalize_options(self): if self.args: self.args = split_quoted(self.args) else: self.args = [] def run(self): pass class sdist(cmd_sdist.sdist): def run (self): build_src = self.get_finalized_command('build_src') build_src.run() cmd_sdist.sdist.run(self) class clean(cmd_clean.clean): description = "clean up temporary files from 'build' command" user_options = \ cmd_clean.clean.user_options[:2] + [ ('build-exe=', None, "build directory for executable components " "(default: 'build_exe.build-exe')"), ] + cmd_clean.clean.user_options[2:] def initialize_options(self): cmd_clean.clean.initialize_options(self) self.build_exe = None def finalize_options(self): cmd_clean.clean.finalize_options(self) self.set_undefined_options('build_exe', ('build_exe', 'build_exe')) def run(self): from distutils.dir_util import remove_tree # remove the build/temp. directory # (unless it's already gone) if os.path.exists(self.build_temp): remove_tree(self.build_temp, dry_run=self.dry_run) else: log.debug("'%s' does not exist -- can't clean it", self.build_temp) if self.all: # remove build directories for directory in ( self.build_lib, self.build_exe, self.build_scripts, self.bdist_base, ): if os.path.exists(directory): remove_tree(directory, dry_run=self.dry_run) else: log.debug("'%s' does not exist -- can't clean it", directory) # just for the heck of it, try to remove the base build directory: # we might have emptied it right now, but if not we don't care if not self.dry_run: try: os.rmdir(self.build_base) log.info("removing '%s'", self.build_base) except OSError: pass if self.all: # remove the .egg_info directory try: egg_info = self.get_finalized_command('egg_info').egg_info if os.path.exists(egg_info): remove_tree(egg_info, dry_run=self.dry_run) else: log.debug("'%s' does not exist -- can't clean it", egg_info) except DistutilsError: pass # ----------------------------------------------------------------------------- if setuptools: try: from setuptools.command import egg_info as mod_egg_info _FileList = mod_egg_info.FileList class FileList(_FileList): def process_template_line(self, line): level = log.set_threshold(log.ERROR) try: _FileList.process_template_line(self, line) finally: log.set_threshold(level) mod_egg_info.FileList = FileList except: pass # ----------------------------------------------------------------------------- try: import msilib if not hasattr(msilib, 'Win64'): if hasattr(msilib, 'AMD64'): msilib.Win64 = msilib.AMD64 Directory_make_short = msilib.Directory.make_short def make_short(self, file): parts = file.split('.') if len(parts) > 1: file = '_'.join(parts[:-1])+'.'+parts[-1] return Directory_make_short(self, file) msilib.Directory.make_short = make_short except: pass # ----------------------------------------------------------------------------- mpi4py-3.1.6/conf/mpiregexes.py000066400000000000000000000053301460670727200164260ustar00rootroot00000000000000import re def anyof(*args): return r'(?:%s)' % '|'.join(args) def join(*args): tokens = [] for tok in args: if isinstance(tok, (list, tuple)): tok = '(%s)' % r'\s*'.join(tok) tokens.append(tok) return r'\s*'.join(tokens) lparen = r'\(' rparen = r'\)' colon = r'\:' asterisk = r'\*' ws = r'\s*' sol = r'^' eol = r'$' opt = r'?' enum = join('enum', colon) typedef = 'ctypedef' pointer = asterisk struct = join(typedef, 'struct') basic_type = r'(?:void|int|char\s*\*{1,3})' integral_type = r'MPI_(?:Aint|Offset|Count|Fint)' struct_type = r'MPI_(?:Status|F08_status)' opaque_type = r'MPI_(?:Datatype|Request|Message|Op|Info|Group|Errhandler|Comm|Win|File)' any_mpi_type = r'(?:%s|%s|%s)' % (struct_type, integral_type, opaque_type) upper_name = r'MPI_[A-Z0-9_]+' camel_name = r'MPI_[A-Z][a-z0-9_]+' usrfun_name = camel_name + r'_(?:function|fn)' arg_list = r'.*' ret_type = r'void|int|double|MPI_Aint' canyint = anyof(r'int', r'long(?:\s+long)?') canyptr = join(r'\w+', pointer+'?') annotation = r'\#\:\=' fallback_value = r'\(?[A-Za-z0-9_\+\-\(\)\*]+\)?' fallback = r'(?:%s)?' % join (annotation, [fallback_value]) INTEGRAL_TYPE = join( typedef, [canyint], [integral_type], fallback, eol) STRUCT_TYPE = join( struct, [struct_type], colon+opt, fallback, eol) OPAQUE_TYPE = join( typedef, canyptr, [opaque_type], eol) FUNCTION_TYPE = join( typedef, [ret_type], [camel_name], lparen, [arg_list], rparen, fallback, eol) ENUM_VALUE = join(sol, enum, [upper_name], fallback, eol) HANDLE_VALUE = join(sol, [opaque_type], [upper_name], fallback, eol) BASIC_PTRVAL = join(sol, [basic_type, pointer], [upper_name], fallback, eol) INTEGRAL_PTRVAL = join(sol, [integral_type, pointer], [upper_name], fallback, eol) STRUCT_PTRVAL = join(sol, [struct_type, pointer], [upper_name], fallback, eol) FUNCT_PTRVAL = join(sol, [usrfun_name, pointer], [upper_name], fallback, eol) FUNCTION_PROTO = join(sol, [ret_type], [camel_name], lparen, [arg_list], rparen, fallback, eol) fint_type = r'MPI_Fint' fmpi_type = opaque_type.replace('Datatype', 'Type') c2f_name = fmpi_type+'_c2f' f2c_name = fmpi_type+'_f2c' FUNCTION_C2F = join(sol, [fint_type], [c2f_name], lparen, [opaque_type], rparen, fallback, eol) FUNCTION_F2C = join(sol, [opaque_type], [f2c_name], lparen, [fint_type], rparen, fallback, eol) IGNORE = anyof(join(sol, r'cdef.*', eol), join(sol, struct, r'_mpi_\w+_t', eol), join(sol, 'int', r'MPI_(?:SOURCE|TAG|ERROR)', eol), join(sol, r'#.*', eol), join(sol, eol)) # compile the RE's glb = globals() all = [key for key in dict(glb) if key.isupper()] for key in all: glb[key] = re.compile(glb[key]) mpi4py-3.1.6/conf/mpiscanner.py000066400000000000000000000262341460670727200164230ustar00rootroot00000000000000# Very, very naive RE-based way for collecting declarations inside # 'cdef extern from *' Cython blocks in in source files, and next # generate compatibility headers for MPI-2 partially implemented or # built, or MPI-1 implementations, perhaps providing a subset of MPI-2 from textwrap import dedent from warnings import warn import mpiregexes as Re class Node(object): REGEX = None def match(self, line): m = self.REGEX.search(line) if m: return m.groups() match = classmethod(match) HEADER = None CONFIG = None MISSING = None MISSING_HEAD = """\ #ifndef PyMPI_HAVE_%(name)s #undef %(cname)s """ MISSING_TAIL = """ #endif """ def init(self, name, **kargs): assert name is not None self.name = name self.__dict__.update(kargs) def header(self): line = dedent(self.HEADER) % vars(self) line = line.replace('\n', '') line = line.replace(' ', ' ') return line + '\n' def config(self): return dedent(self.CONFIG) % vars(self) def missing(self, guard=True): if guard: head = dedent(self.MISSING_HEAD) tail = dedent(self.MISSING_TAIL) else: head = '#undef %(cname)s\n' tail = '\n\n' body = dedent(self.MISSING) return (head+body+tail) % vars(self) class NodeType(Node): CONFIG = """\ %(ctype)s v; %(ctype)s* p; (void)v; (void)p;""" def __init__(self, ctype): self.init(name=ctype, cname=ctype, ctype=ctype,) class NodeStructType(NodeType): HEADER = """\ typedef struct {%(cfields)s ...; } %(ctype)s;""" MISSING = """\ typedef struct PyMPI_%(ctype)s { %(cfields)s } PyMPI_%(ctype)s; #define %(ctype)s PyMPI_%(ctype)s""" def __init__(self, ctype, cfields): super(NodeStructType, self).__init__(ctype) self.cfields = '\n'.join([' %s %s;' % field for field in cfields]) class NodeFuncType(NodeType): HEADER = """\ typedef %(crett)s (%(cname)s)(%(cargs)s);""" MISSING = """\ typedef %(crett)s (MPIAPI PyMPI_%(cname)s)(%(cargs)s); #define %(cname)s PyMPI_%(cname)s""" def __init__(self, crett, cname, cargs, calias=None): self.init(name=cname, cname=cname, ctype=cname+'*',) self.crett = crett self.cargs = cargs or 'void' if calias is not None: self.MISSING = '#define %(cname)s %(calias)s' self.calias = calias class NodeValue(Node): HEADER = """\ const %(ctype)s %(cname)s;""" CONFIG = """\ %(ctype)s v; v = %(cname)s; (void)v;""" MISSING = '#define %(cname)s (%(calias)s)' def __init__(self, ctype, cname, calias): self.init(name=cname, cname=cname, ctype=ctype, calias=calias) if ctype.endswith('*'): ctype = ctype + ' const' self.HEADER = ctype + ' %(cname)s;' class NodePtrVal(NodeValue): MISSING = '#define %(cname)s ((%(ctype)s)%(calias)s)' def ctypefix(ct): ct = ct.strip() ct = ct.replace('[][3]',' (*)[3]') ct = ct.replace('[]','*') return ct class NodeFuncProto(Node): HEADER = """\ %(crett)s %(cname)s(%(cargs)s);""" CONFIG = """\ %(crett)s v; v = %(cname)s(%(cargscall)s); (void)v;""" MISSING = ' '. join(['#define %(cname)s(%(cargsnamed)s)', 'PyMPI_UNAVAILABLE("%(name)s"%(comma)s%(cargsnamed)s)']) def __init__(self, crett, cname, cargs, calias=None): self.init(name=cname, cname=cname) self.crett = crett self.cargs = cargs or 'void' if cargs == 'void': cargs = '' if cargs: cargs = cargs.split(',') if cargs[-1].strip() == '...': del cargs[-1] else: cargs = [] self.cargstype = cargs nargs = len(cargs) if nargs: self.comma = ',' else: self.comma = '' cargscall = ['(%s)0' % ctypefix(a) for a in cargs] self.cargscall = ','.join(cargscall) cargsnamed = ['a%d' % (a+1) for a in range(nargs)] self.cargsnamed = ','.join(cargsnamed) if calias is not None: self.MISSING = '#define %(cname)s %(calias)s' self.calias = calias class IntegralType(NodeType): REGEX = Re.INTEGRAL_TYPE HEADER = """\ typedef %(cbase)s... %(ctype)s;""" MISSING = """\ typedef %(ctdef)s PyMPI_%(ctype)s; #define %(ctype)s PyMPI_%(ctype)s""" def __init__(self, cbase, ctype, calias=None): super(IntegralType, self).__init__(ctype) self.cbase = cbase if calias is not None: self.ctdef = calias else: self.ctdef = cbase class StructType(NodeStructType): REGEX = Re.STRUCT_TYPE def __init__(self, ctype, calias=None): cfields = [] if ctype == 'MPI_Status': cnames = ['MPI_SOURCE', 'MPI_TAG', 'MPI_ERROR'] cfields = list(zip(['int']*3, cnames)) super(StructType, self).__init__(ctype, cfields) if calias is not None: self.MISSING = '#define %(cname)s %(calias)s' self.calias = calias class OpaqueType(NodeType): REGEX = Re.OPAQUE_TYPE HEADER = """\ typedef struct{...;} %(ctype)s;""" MISSING = """\ typedef void *PyMPI_%(ctype)s; #define %(ctype)s PyMPI_%(ctype)s""" class FunctionType(NodeFuncType): REGEX = Re.FUNCTION_TYPE class EnumValue(NodeValue): REGEX = Re.ENUM_VALUE def __init__(self, cname, calias): self.init(name=cname, cname=cname, ctype='int', calias=calias) class HandleValue(NodeValue): REGEX = Re.HANDLE_VALUE MISSING = '#define %(cname)s ((%(ctype)s)%(calias)s)' class BasicPtrVal(NodePtrVal): REGEX = Re.BASIC_PTRVAL class IntegralPtrVal(NodePtrVal): REGEX = Re.INTEGRAL_PTRVAL class StructPtrVal(NodePtrVal): REGEX = Re.STRUCT_PTRVAL class FunctionPtrVal(NodePtrVal): REGEX = Re.FUNCT_PTRVAL class FunctionProto(NodeFuncProto): REGEX = Re.FUNCTION_PROTO class FunctionC2F(NodeFuncProto): REGEX = Re.FUNCTION_C2F MISSING = ' '.join(['#define %(cname)s(%(cargsnamed)s)', '((%(crett)s)0)']) class FunctionF2C(NodeFuncProto): REGEX = Re.FUNCTION_F2C MISSING = ' '.join(['#define %(cname)s(%(cargsnamed)s)', '%(cretv)s']) def __init__(self, *a, **k): NodeFuncProto.__init__(self, *a, **k) self.cretv = self.crett.upper() + '_NULL' class Scanner(object): NODE_TYPES = [ IntegralType, StructType, OpaqueType, HandleValue, EnumValue, BasicPtrVal, IntegralPtrVal, StructPtrVal, FunctionType, FunctionPtrVal, FunctionProto, FunctionC2F, FunctionF2C, ] def __init__(self): self.nodes = [] self.nodemap = {} def parse_file(self, filename): with open(filename) as f: self.parse_lines(f) def parse_lines(self, lines): for line in lines: self.parse_line(line) def parse_line(self, line): if Re.IGNORE.match(line): return nodemap = self.nodemap nodelist = self.nodes for nodetype in self.NODE_TYPES: args = nodetype.match(line) if args: node = nodetype(*args) assert node.name not in nodemap, node.name nodemap[node.name] = len(nodelist) nodelist.append(node) break if not args: warn('unmatched line:\n%s' % line) def __iter__(self): return iter(self.nodes) def __getitem__(self, name): return self.nodes[self.nodemap[name]] def dump_header_h(self, fileobj): if isinstance(fileobj, str): with open(fileobj, 'w') as f: self.dump_header_h(f) return for node in self: fileobj.write(node.header()) CONFIG_HEAD = """\ #ifndef PyMPI_CONFIG_H #define PyMPI_CONFIG_H """ CONFIG_MACRO = 'PyMPI_HAVE_%s' CONFIG_TAIL = """\ #endif /* !PyMPI_CONFIG_H */ """ def dump_config_h(self, fileobj, suite): if isinstance(fileobj, str): with open(fileobj, 'w') as f: self.dump_config_h(f, suite) return head = dedent(self.CONFIG_HEAD) macro = dedent(self.CONFIG_MACRO) tail = dedent(self.CONFIG_TAIL) fileobj.write(head) if suite is None: for node in self: line = '#undef %s\n' % ((macro % node.name)) fileobj.write(line) else: for name, result in suite: assert name in self.nodemap if result: line = '#define %s 1\n' % ((macro % name)) else: line = '#undef %s\n' % ((macro % name)) fileobj.write(line) fileobj.write(tail) MISSING_HEAD = """\ #ifndef PyMPI_MISSING_H #define PyMPI_MISSING_H #ifndef PyMPI_UNUSED # if defined(__GNUC__) # if !defined(__cplusplus) || (__GNUC__>3||(__GNUC__==3&&__GNUC_MINOR__>=4)) # define PyMPI_UNUSED __attribute__ ((__unused__)) # else # define PyMPI_UNUSED # endif # elif defined(__INTEL_COMPILER) || defined(__ICC) # define PyMPI_UNUSED __attribute__ ((__unused__)) # else # define PyMPI_UNUSED # endif #endif #define PyMPI_ERR_UNAVAILABLE (-1431655766) /*0xaaaaaaaa*/ static PyMPI_UNUSED int PyMPI_UNAVAILABLE(const char *name,...) { (void)name; return PyMPI_ERR_UNAVAILABLE; } """ MISSING_TAIL = """\ #endif /* !PyMPI_MISSING_H */ """ def dump_missing_h(self, fileobj, suite): if isinstance(fileobj, str): with open(fileobj, 'w') as f: self.dump_missing_h(f, suite) return head = dedent(self.MISSING_HEAD) tail = dedent(self.MISSING_TAIL) # fileobj.write(head) if suite is None: for node in self: fileobj.write(node.missing()) else: for name, result in suite: node = self[name] if not result: fileobj.write(node.missing()) fileobj.write(tail) # ----------------------------------------- if __name__ == '__main__': import sys, os sources = [os.path.join('src', 'mpi4py', 'libmpi.pxd')] log = lambda msg: sys.stderr.write(msg + '\n') scanner = Scanner() for filename in sources: log('parsing file %s' % filename) scanner.parse_file(filename) log('processed %d definitions' % len(scanner.nodes)) config_h = os.path.join('src', 'lib-mpi', 'config', 'config.h') log('writing file %s' % config_h) scanner.dump_config_h(config_h, None) missing_h = os.path.join('src', 'lib-mpi', 'missing.h') log('writing file %s' % missing_h) scanner.dump_missing_h(missing_h, None) #libmpi_h = os.path.join('.', 'libmpi.h') #log('writing file %s' % libmpi_h) #scanner.dump_header_h(libmpi_h) # ----------------------------------------- mpi4py-3.1.6/conf/mpistubgen.py000066400000000000000000000336071460670727200164430ustar00rootroot00000000000000from textwrap import dedent import inspect def is_function(obj): return ( inspect.isbuiltin(obj) or type(obj) is type(ord) ) def is_method(obj): return ( inspect.ismethoddescriptor(obj) or type(obj) in ( type(str.index), type(str.__add__), type(str.__new__), ) ) def is_classmethod(obj): return ( inspect.isbuiltin(obj) or type(obj).__name__ in ( 'classmethod', 'classmethod_descriptor', ) ) def is_staticmethod(obj): return ( type(obj).__name__ in ( 'staticmethod', ) ) def is_datadescr(obj): return inspect.isdatadescriptor(obj) and not hasattr(obj, 'fget') def is_property(obj): return inspect.isdatadescriptor(obj) and hasattr(obj, 'fget') def is_class(obj): return inspect.isclass(obj) or type(obj) is type(int) class Lines(list): INDENT = " "*4 level = 0 @property def add(self): return self @add.setter def add(self, lines): if lines is None: return if isinstance(lines, str): lines = dedent(lines).strip().split("\n") indent = self.INDENT * self.level for line in lines: self.append(indent + line) def signature(obj): doc = obj.__doc__ sig = doc.split('\n', 1)[0].split('.', 1)[-1] return sig def stubgen_constant(constant): name, value = constant return f"{name}: Final[{type(value).__name__}] = ..." def stubgen_function(function): sig = signature(function) return f"def {sig}: ..." def stubgen_method(method): sig = signature(method) return f"def {sig}: ..." def stubgen_datadescr(datadescr): sig = signature(datadescr) return f"{sig}" def stubgen_property(prop, name=None): sig = signature(prop.fget) pname = name or prop.fget.__name__ ptype = sig.rsplit('->', 1)[-1].strip() return f"{pname}: {ptype}" def stubgen_constructor(cls, name='__init__'): init = (name == '__init__') argname = cls.__name__.lower() argtype = cls.__name__ initarg = f"{argname}: Optional[{argtype}] = None" selfarg = 'self' if init else 'cls' rettype = 'None' if init else argtype arglist = f"{selfarg}, {initarg}" sig = f"{name}({arglist}) -> {rettype}" return f"def {sig}: ..." def stubgen_class(cls, done=None): skip = { '__doc__', '__module__', '__weakref__', '__pyx_vtable__', '__lt__', '__le__', '__ge__', '__gt__', } special = { '__len__' : "__len__(self) -> int", '__bool__': "__bool__(self) -> bool", '__hash__': "__hash__(self) -> int", '__int__': "__int__(self) -> int", '__index__': "__int__(self) -> int", '__str__': "__str__(self) -> str", '__repr__': "__repr__(self) -> str", '__eq__': "__eq__(self, other: object) -> bool", '__ne__': "__ne__(self, other: object) -> bool", } override = OVERRIDE.get(cls.__name__, {}) done = set() if done is None else done lines = Lines() base = cls.__base__ if base is object: lines.add = f"class {cls.__name__}:" else: lines.add = f"class {cls.__name__}({base.__name__}):" lines.level += 1 for name in ('__new__', '__init__'): if name in override: done.add(name) lines.add = override[name] elif name in cls.__dict__: done.add(name) lines.add = stubgen_constructor(cls, name) if '__hash__' in cls.__dict__: if cls.__hash__ is None: done.add('__hash__') dct = cls.__dict__ keys = list(dct.keys()) for name in keys: if name in done: continue if name in skip: continue if name in override: done.add(name) lines.add = override[name] continue if name in special: done.add(name) sig = special[name] lines.add = f"def {sig}: ..." continue attr = getattr(cls, name) if is_method(attr): done.add(name) if name == attr.__name__: obj = dct[name] if is_classmethod(obj): lines.add = f"@classmethod" elif is_staticmethod(obj): lines.add = f"@staticmethod" lines.add = stubgen_method(attr) else: lines.add = f"{name} = {attr.__name__}" continue if is_datadescr(attr): done.add(name) lines.add = stubgen_datadescr(attr) continue if is_property(attr): done.add(name) lines.add = stubgen_property(attr, name) continue leftovers = [name for name in keys if name not in done and name not in skip] assert not leftovers, f"leftovers: {leftovers}" lines.level -= 1 return lines def stubgen_module(module, done=None): skip = { '__doc__', '__name__', '__loader__', '__spec__', '__file__', '__package__', '__builtins__', '__pyx_capi__', } done = set() if done is None else done lines = Lines() keys = list(module.__dict__.keys()) keys.sort(key=lambda name: name.startswith("_")) constants = [ (name, getattr(module, name)) for name in keys if ( name not in done and name not in skip and isinstance(getattr(module, name), int) ) ] for attr in constants: name, value = attr done.add(name) if name in OVERRIDE: lines.add = OVERRIDE[name] else: lines.add = stubgen_constant((name, value)) if constants: lines.add = "" for name in keys: if name in done or name in skip: continue value = getattr(module, name) if is_class(value): done.add(name) lines.add = stubgen_class(value) lines.add = "" instances = [ (k, getattr(module, k)) for k in keys if ( k not in done and k not in skip and type(getattr(module, k)) is value ) ] for attrname, attrvalue in instances: done.add(attrname) lines.add = stubgen_constant((attrname, attrvalue)) if instances: lines.add = "" continue if is_function(value): done.add(name) if name == value.__name__: lines.add = stubgen_function(value) else: lines.add = f"{name} = {value.__name__}" continue lines.add = "" for name in keys: if name in done or name in skip: continue value = getattr(module, name) done.add(name) if name in OVERRIDE: lines.add = OVERRIDE[name] else: lines.add = stubgen_constant((name, value)) leftovers = [name for name in keys if name not in done and name not in skip] assert not leftovers, f"leftovers: {leftovers}" return lines IMPORTS = """ from __future__ import annotations import sys from threading import Lock from typing import overload from typing import ( Any, Union, Literal, Optional, NoReturn, Final, ) if sys.version_info >= (3, 9): from collections.abc import ( Callable, Hashable, Iterable, Iterator, Sequence, Mapping, ) from builtins import ( tuple as Tuple, list as List, dict as Dict, ) else: from typing import ( Callable, Hashable, Iterable, Iterator, Sequence, Mapping, ) from typing import ( Tuple as Tuple, List as List, Dict as Dict, ) """ OVERRIDE = { 'BOTTOM': """ class _Bottom(int): ... Bottom = _Bottom BOTTOM: Final[Bottom] = ... """, 'IN_PLACE': """ class _InPlace(int): ... InPlace = _InPlace IN_PLACE: Final[InPlace] = ... """, 'Exception': { '__init__': "def __init__(self, ierr: int = SUCCESS) -> None: ...", "__lt__": "def __lt__(self, other: int) -> bool: ...", "__le__": "def __le__(self, other: int) -> bool: ...", "__gt__": "def __gt__(self, other: int) -> bool: ...", "__ge__": "def __ge__(self, other: int) -> bool: ...", }, 'Info': { '__iter__': "def __iter__(self) -> Iterator[str]: ...", '__getitem__': "def __getitem__(self, item: str) -> str: ...", '__setitem__': "def __setitem__(self, item: str, value: str) -> None: ...", '__delitem__': "def __delitem__(self, item: str) -> None: ...", '__contains__': "def __contains__(self, value: str) -> bool: ...", }, 'Op': { '__call__': "def __call__(self, x: Any, y: Any) -> Any: ...", }, 'memory': { '__new__': """ @overload def __new__(cls) -> memory: ... @overload def __new__(cls, buf: Buffer, /) -> memory: ... """, '__getitem__': """ @overload def __getitem__(self, item: int) -> int: ... @overload def __getitem__(self, item: slice) -> memory: ... """, '__setitem__': """ @overload def __setitem__(self, item: int, value: int) -> None: ... @overload def __setitem__(self, item: slice, value: Buffer) -> None: ... """, '__delitem__': None, }, 'Pickle': { '__new__': None, '__init__': """ @overload def __init__(self, dumps: Callable[[Any, int], bytes], loads: Callable[[Buffer], Any], protocol: Optional[int] = None, ) -> None: ... @overload def __init__(self, dumps: Optional[Callable[[Any], bytes]] = None, loads: Optional[Callable[[Buffer], Any]] = None, ) -> None: ... """, }, '_typedict': "_typedict: Final[Dict[str, Datatype]] = ...", '_typedict_c': "_typedict_c: Final[Dict[str, Datatype]] = ...", '_typedict_f': "_typedict_f: Final[Dict[str, Datatype]] = ...", '_keyval_registry': None, } OVERRIDE.update({ subtype: { '__new__': """ def __new__(cls, {}: Optional[{}] = None) -> {}: ... """.format(basetype.lower(), basetype, subtype) } for basetype, subtype in ( ('Comm', 'Comm'), ('Comm', 'Intracomm'), ('Comm', 'Topocomm'), ('Comm', 'Cartcomm'), ('Comm', 'Graphcomm'), ('Comm', 'Distgraphcomm'), ('Comm', 'Intercomm'), ('Request', 'Request'), ('Request', 'Prequest'), ('Request', 'Grequest'), ) }) bufspec = """ Buffer = Any # TODO Count = int Displ = int TypeSpec = Union[Datatype, str] BufSpec = Union[ Buffer, Tuple[Buffer, Count], # (buffer, count) Tuple[Buffer, TypeSpec], # (buffer, datatype) Tuple[Buffer, Count, TypeSpec], # (buffer, count, datatype) Tuple[Bottom, Count, Datatype], # (BOTTOM, count, datatype) List, # (buffer, count, datatype) ] BufSpecB = Union[ Buffer, Tuple[Buffer, TypeSpec], # (buffer, datatype) Tuple[Buffer, Count, TypeSpec], # (buffer, count, datatype) List, # (buffer, count, datatype) ] BufSpecV = Union[ Buffer, Tuple[Buffer, Sequence[Count]], # (buffer, counts) Tuple[Buffer, Tuple[Sequence[Count], Sequence[Displ]]], # (buffer, (counts, displs)) Tuple[Buffer, TypeSpec], # (buffer, datatype) Tuple[Buffer, Sequence[Count], TypeSpec], # (buffer, counts, datatype) Tuple[Buffer, Tuple[Sequence[Count], Sequence[Displ]], TypeSpec], # (buffer, (counts, displs), datatype) Tuple[Buffer, Sequence[Count], Sequence[Displ], TypeSpec], # (buffer, counts, displs, datatype) Tuple[Bottom, Tuple[Sequence[Count], Sequence[Displ]], Datatype], # (BOTTOM, (counts, displs), datatype) Tuple[Bottom, Sequence[Count], Sequence[Displ], Datatype], # (BOTTOM, counts, displs, datatype) List, # (buffer, counts, displs, datatypes) ] BufSpecW = Union[ Tuple[Buffer, Sequence[Datatype]], # (buffer, datatypes) Tuple[Buffer, Tuple[Sequence[Count], Sequence[Displ]], Sequence[Datatype]], # (buffer, (counts, displs), datatypes) Tuple[Buffer, Sequence[Count], Sequence[Displ], Sequence[Datatype]], # (buffer, counts, displs, datatypes) Tuple[Bottom, Tuple[Sequence[Count], Sequence[Displ]], Sequence[Datatype]], # (BOTTOM, (counts, displs), datatypes) Tuple[Bottom, Sequence[Count], Sequence[Displ], Sequence[Datatype]], # (BOTTOM, counts, displs, datatypes) List, # (buffer, counts, displs, datatypes) ] TargetSpec = Union[ Displ, # displ Tuple[()], # () Tuple[Displ], # (displ,) Tuple[Displ, Count], # (displ, count) Tuple[Displ, Count, Datatype], # (displ, count, datatype) List, # (displ, count, datatype) ] """ def stubgen_mpi4py_MPI(done=None): from mpi4py import MPI lines = Lines() lines.add = IMPORTS lines.add = "" lines.add = stubgen_module(MPI) lines.add = "" lines.add = bufspec return lines def main(): import sys, os output = os.path.join('src', 'mpi4py', 'MPI.pyi') with open(output, 'w') as f: for line in stubgen_mpi4py_MPI(): print(line, file=f) if __name__ == '__main__': main() mpi4py-3.1.6/conf/mpiuni/000077500000000000000000000000001460670727200152045ustar00rootroot00000000000000mpi4py-3.1.6/conf/mpiuni/mpi.h000066400000000000000000000006431460670727200161450ustar00rootroot00000000000000/* Author: Lisandro Dalcin */ /* Contact: dalcinl@gmail.com */ #ifndef PyMPI_MPIUNI_H #define PyMPI_MPIUNI_H #include #undef PETSC_HAVE_HIP #undef PETSC_HAVE_CUDA #undef PETSC_HAVE_FORTRAN #include #define PETSCSYS_H #define PETSCIMPL_H #include #include #include #include <../src/sys/mpiuni/mpi.c> #include <../src/sys/mpiuni/mpitime.c> #endif mpi4py-3.1.6/conf/mypy.ini000066400000000000000000000000651460670727200154030ustar00rootroot00000000000000[mypy] [mypy-numpy.*] ignore_missing_imports = True mpi4py-3.1.6/conf/mypy.stubtest.allow.txt000066400000000000000000000000271460670727200204320ustar00rootroot00000000000000mpi4py.futures._base.* mpi4py-3.1.6/conf/nompi/000077500000000000000000000000001460670727200150255ustar00rootroot00000000000000mpi4py-3.1.6/conf/nompi/mpi.h000066400000000000000000000007011460670727200157610ustar00rootroot00000000000000/* Author: Lisandro Dalcin */ /* Contact: dalcinl@gmail.com */ #ifndef PyMPI_NOMPI_H #define PyMPI_NOMPI_H #define MPI_Init(a,b) ((void)a,(void)b,0) #define MPI_Finalize() (0) #define MPI_Initialized(a) ((*(a)=1),0) #define MPI_Finalized(a) ((*(a)=1),0) #define MPI_COMM_WORLD ((void*)0) #define MPI_Comm_size(a,b) ((void)(a),(*(b)=1),0) #define MPI_Comm_rank(a,b) ((void)(a),(*(b)=0),0) #define MPI_Abort(a,b) ((void)(a),(void)(b),0) #endif mpi4py-3.1.6/conf/requirements-build-cython.txt000066400000000000000000000000171460670727200215640ustar00rootroot00000000000000cython < 3.0.0 mpi4py-3.1.6/conf/requirements-docs.txt000066400000000000000000000001051460670727200201110ustar00rootroot00000000000000sphinx == 7.2.6 sphinx-copybutton == 0.5.2 sphinx-rtd-theme == 1.3.0 mpi4py-3.1.6/demo/000077500000000000000000000000001460670727200137025ustar00rootroot00000000000000mpi4py-3.1.6/demo/README.txt000066400000000000000000000003451460670727200154020ustar00rootroot00000000000000Issuing at the command line:: $ mpiexec -n 5 python helloworld.py will launch a five-process run of the Python interpreter and execute the test script ``helloworld.py``, a parallelized version of the *Hello World!* program. mpi4py-3.1.6/demo/compute-pi/000077500000000000000000000000001460670727200157645ustar00rootroot00000000000000mpi4py-3.1.6/demo/compute-pi/README.txt000066400000000000000000000000631460670727200174610ustar00rootroot00000000000000Different approaches for computing PI in parallel. mpi4py-3.1.6/demo/compute-pi/cpi-cco.py000066400000000000000000000023651460670727200176610ustar00rootroot00000000000000#!/usr/bin/env python """ Parallel PI computation using Collective Communication Operations (CCO) within Python objects exposing memory buffers (requires NumPy). usage:: $ mpiexec -n python cpi-buf.py """ from mpi4py import MPI from math import pi as PI from numpy import array def get_n(): prompt = "Enter the number of intervals: (0 quits) " try: n = int(input(prompt)) if n < 0: n = 0 except: n = 0 return n def comp_pi(n, myrank=0, nprocs=1): h = 1.0 / n s = 0.0 for i in range(myrank + 1, n + 1, nprocs): x = h * (i - 0.5) s += 4.0 / (1.0 + x**2) return s * h def prn_pi(pi, PI): message = "pi is approximately %.16f, error is %.16f" print (message % (pi, abs(pi - PI))) comm = MPI.COMM_WORLD nprocs = comm.Get_size() myrank = comm.Get_rank() n = array(0, dtype=int) pi = array(0, dtype=float) mypi = array(0, dtype=float) while True: if myrank == 0: _n = get_n() n.fill(_n) comm.Bcast([n, MPI.INT], root=0) if n == 0: break _mypi = comp_pi(n, myrank, nprocs) mypi.fill(_mypi) comm.Reduce([mypi, MPI.DOUBLE], [pi, MPI.DOUBLE], op=MPI.SUM, root=0) if myrank == 0: prn_pi(pi, PI) mpi4py-3.1.6/demo/compute-pi/cpi-dpm.py000066400000000000000000000107071460670727200176740ustar00rootroot00000000000000#!/usr/bin/env python """ Parallel PI computation using Dynamic Process Management (DPM) within Python objects exposing memory buffers (requires NumPy). usage: + parent/child model:: $ mpiexec -n 1 python cpi-dpm.py [nchilds] + client/server model:: $ [xterm -e] mpiexec -n python cpi-dpm.py server [-v] & $ [xterm -e] mpiexec -n 1 python cpi-dpm.py client [-v] """ import sys from mpi4py import MPI import numpy as N def get_n(): prompt = "Enter the number of intervals: (0 quits) " try: n = int(input(prompt)) if n < 0: n = 0 except: n = 0 return n def view(pi, np=None, wt=None): from math import pi as PI prn = sys.stdout.write if pi is not None: prn("computed pi is: %.16f\n" % pi) prn("absolute error: %.16f\n" % abs(pi - PI)) if np is not None: prn("computing units: %d processes\n" % np) if wt is not None: prn("wall clock time: %g seconds\n" % wt) sys.stdout.flush() def comp_pi(n, comm, root=0): nprocs = comm.Get_size() myrank = comm.Get_rank() n = N.array(n, 'i') comm.Bcast([n, MPI.INT], root=root) if n == 0: return 0.0 h = 1.0 / n; s = 0.0; for i in range(myrank, n, nprocs): x = h * (i + 0.5); s += 4.0 / (1.0 + x**2); mypi = s * h mypi = N.array(mypi, 'd') pi = N.array(0, 'd') comm.Reduce([mypi, MPI.DOUBLE], [pi, MPI.DOUBLE], root=root, op=MPI.SUM) return pi def master(icomm): n = get_n() wt = MPI.Wtime() n = N.array(n, 'i') icomm.Send([n, MPI.INT], dest=0) pi = N.array(0, 'd') icomm.Recv([pi, MPI.DOUBLE], source=0) wt = MPI.Wtime() - wt if n == 0: return np = icomm.Get_remote_size() view(pi, np, wt) def worker(icomm): myrank = icomm.Get_rank() if myrank == 0: source = dest = 0 else: source = dest = MPI.PROC_NULL n = N.array(0, 'i') icomm.Recv([n, MPI.INT], source=source) pi = comp_pi(n, comm=MPI.COMM_WORLD, root=0) pi = N.array(pi, 'd') icomm.Send([pi, MPI.DOUBLE], dest=dest) # Parent/Child def main_parent(nprocs=1): assert nprocs > 0 assert MPI.COMM_WORLD.Get_size() == 1 icomm = MPI.COMM_WORLD.Spawn(command=sys.executable, args=[__file__, 'child'], maxprocs=nprocs) master(icomm) icomm.Disconnect() def main_child(): icomm = MPI.Comm.Get_parent() assert icomm != MPI.COMM_NULL worker(icomm) icomm.Disconnect() # Client/Server def main_server(COMM): nprocs = COMM.Get_size() myrank = COMM.Get_rank() service, port, info = None, None, MPI.INFO_NULL if myrank == 0: port = MPI.Open_port(info) log(COMM, "open port '%s'", port) service = 'cpi' MPI.Publish_name(service, port, info) log(COMM, "service '%s' published.", service) else: port = '' log(COMM, "waiting for client connection ...") icomm = COMM.Accept(port, info, root=0) log(COMM, "client connection accepted.") worker(icomm) log(COMM, "disconnecting from client ...") icomm.Disconnect() log(COMM, "client disconnected.") if myrank == 0: MPI.Unpublish_name(service, port, info) log(COMM, "service '%s' unpublished", port) MPI.Close_port(port) log(COMM, "closed port '%s' ", port) def main_client(COMM): assert COMM.Get_size() == 1 service, info = 'cpi', MPI.INFO_NULL port = MPI.Lookup_name(service, info) log(COMM, "service '%s' found in port '%s'.", service, port) log(COMM, "connecting to server ...") icomm = COMM.Connect(port, info, root=0) log(COMM, "server connected.") master(icomm) log(COMM, "disconnecting from server ...") icomm.Disconnect() log(COMM, "server disconnected.") def main(): assert len(sys.argv) <= 2 if 'server' in sys.argv: main_server(MPI.COMM_WORLD) elif 'client' in sys.argv: main_client(MPI.COMM_WORLD) elif 'child' in sys.argv: main_child() else: try: nchilds = int(sys.argv[1]) except: nchilds = 2 main_parent(nchilds) VERBOSE = False def log(COMM, fmt, *args): if not VERBOSE: return if COMM.rank != 0: return sys.stdout.write(fmt % args) sys.stdout.write('\n') sys.stdout.flush() if __name__ == '__main__': if '-v' in sys.argv: VERBOSE = True sys.argv.remove('-v') main() mpi4py-3.1.6/demo/compute-pi/cpi-rma.py000066400000000000000000000031041460670727200176640ustar00rootroot00000000000000#!/usr/bin/env python """ Parallel PI computation using Remote Memory Access (RMA) within Python objects exposing memory buffers (requires NumPy). usage:: $ mpiexec -n python cpi-rma.py """ from mpi4py import MPI from math import pi as PI from numpy import array def get_n(): prompt = "Enter the number of intervals: (0 quits) " try: n = int(input(prompt)); if n < 0: n = 0 except: n = 0 return n def comp_pi(n, myrank=0, nprocs=1): h = 1.0 / n; s = 0.0; for i in range(myrank + 1, n + 1, nprocs): x = h * (i - 0.5); s += 4.0 / (1.0 + x**2); return s * h def prn_pi(pi, PI): message = "pi is approximately %.16f, error is %.16f" print (message % (pi, abs(pi - PI))) nprocs = MPI.COMM_WORLD.Get_size() myrank = MPI.COMM_WORLD.Get_rank() n = array(0, dtype=int) pi = array(0, dtype=float) mypi = array(0, dtype=float) if myrank == 0: win_n = MPI.Win.Create(n, comm=MPI.COMM_WORLD) win_pi = MPI.Win.Create(pi, comm=MPI.COMM_WORLD) else: win_n = MPI.Win.Create(None, comm=MPI.COMM_WORLD) win_pi = MPI.Win.Create(None, comm=MPI.COMM_WORLD) while True: if myrank == 0: _n = get_n() n.fill(_n) pi.fill(0.0) win_n.Fence() if myrank != 0: win_n.Get([n, MPI.INT], 0) win_n.Fence() if n == 0: break _mypi = comp_pi(n, myrank, nprocs) mypi.fill(_mypi) win_pi.Fence() win_pi.Accumulate([mypi, MPI.DOUBLE], 0, op=MPI.SUM) win_pi.Fence() if myrank == 0: prn_pi(pi, PI) win_n.Free() win_pi.Free() mpi4py-3.1.6/demo/compute-pi/makefile000066400000000000000000000004151460670727200174640ustar00rootroot00000000000000MPIEXEC = mpiexec NP_FLAG = -n NP = 3 PYTHON = python$(py) .PHONY: test test: echo 100 | ${MPIEXEC} ${NP_FLAG} ${NP} ${PYTHON} cpi-cco.py echo 100 | ${MPIEXEC} ${NP_FLAG} ${NP} ${PYTHON} cpi-rma.py echo 100 | ${MPIEXEC} ${NP_FLAG} 1 ${PYTHON} cpi-dpm.py ${NP} mpi4py-3.1.6/demo/cuda-aware-mpi/000077500000000000000000000000001460670727200164765ustar00rootroot00000000000000mpi4py-3.1.6/demo/cuda-aware-mpi/use_cupy.py000066400000000000000000000024031460670727200207030ustar00rootroot00000000000000# Demonstrate how to work with Python GPU arrays using CUDA-aware MPI. # We choose the CuPy library for simplicity, but any CUDA array which # has the __cuda_array_interface__ attribute defined will work. # # Run this script using the following command: # mpiexec -n 2 python use_cupy.py from mpi4py import MPI import cupy comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() # Allreduce sendbuf = cupy.arange(10, dtype='i') recvbuf = cupy.empty_like(sendbuf) # always make sure the GPU buffer is ready before any MPI operation cupy.cuda.get_current_stream().synchronize() comm.Allreduce(sendbuf, recvbuf) assert cupy.allclose(recvbuf, sendbuf*size) # Bcast if rank == 0: buf = cupy.arange(100, dtype=cupy.complex64) else: buf = cupy.empty(100, dtype=cupy.complex64) cupy.cuda.get_current_stream().synchronize() comm.Bcast(buf) assert cupy.allclose(buf, cupy.arange(100, dtype=cupy.complex64)) # Send-Recv if rank == 0: buf = cupy.arange(20, dtype=cupy.float64) cupy.cuda.get_current_stream().synchronize() comm.Send(buf, dest=1, tag=88) else: buf = cupy.empty(20, dtype=cupy.float64) cupy.cuda.get_current_stream().synchronize() comm.Recv(buf, source=0, tag=88) assert cupy.allclose(buf, cupy.arange(20, dtype=cupy.float64)) mpi4py-3.1.6/demo/cuda-aware-mpi/use_numba.py000066400000000000000000000020611460670727200210250ustar00rootroot00000000000000# Demonstrate how to work with Python GPU arrays using CUDA-aware MPI. # A GPU array is allocated and manipulated through Numba, which is # compliant with the __cuda_array_interface__ standard. # # Run this script using the following command: # mpiexec -n 2 python use_cupy.py from mpi4py import MPI from numba import cuda import numpy @cuda.jit() def add_const(arr, value): x = cuda.grid(1) if x < arr.size: arr[x] += value comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() # Send-Recv if rank == 0: buf = cuda.device_array((20,), dtype='f') buf[:] = range(20) block = 32 grid = (buf.size + block - 1)//block add_const[grid, block](buf, 100) # always make sure the GPU buffer is ready before any MPI operation cuda.default_stream().synchronize() comm.Send(buf, dest=1, tag=77) else: buf = cuda.device_array((20,), dtype='f') cuda.default_stream().synchronize() comm.Recv(buf, source=0, tag=77) buf = buf.copy_to_host() assert numpy.allclose(buf, 100+numpy.arange(20, dtype='f')) mpi4py-3.1.6/demo/cython/000077500000000000000000000000001460670727200152065ustar00rootroot00000000000000mpi4py-3.1.6/demo/cython/helloworld.pyx000066400000000000000000000025461460670727200201320ustar00rootroot00000000000000cdef extern from "mpi-compat.h": pass # --------- # Python-level module import # (file: mpi4py/MPI.so) from mpi4py import MPI # Python-level objects and code size = MPI.COMM_WORLD.Get_size() rank = MPI.COMM_WORLD.Get_rank() pname = MPI.Get_processor_name() hwmess = "Hello, World! I am process %d of %d on %s." print (hwmess % (rank, size, pname)) # --------- # Cython-level cimport # this make available mpi4py's Python extension types # (file: mpi4py/include/mpi4py/MPI.pxd) from mpi4py cimport MPI from mpi4py.MPI cimport Intracomm as IntracommType # C-level cdef, typed, Python objects cdef MPI.Comm WORLD = MPI.COMM_WORLD cdef IntracommType SELF = MPI.COMM_SELF # --------- # Cython-level cimport with PXD file # this make available the native MPI C API # with namespace-protection (stuff accessed as mpi.XXX) # (file: mpi4py/include/mpi4py/libmpi.pxd) from mpi4py cimport libmpi as mpi cdef mpi.MPI_Comm world1 = WORLD.ob_mpi cdef int ierr1=0 cdef int size1 = 0 ierr1 = mpi.MPI_Comm_size(mpi.MPI_COMM_WORLD, &size1) cdef int rank1 = 0 ierr1 = mpi.MPI_Comm_rank(mpi.MPI_COMM_WORLD, &rank1) cdef int rlen1=0 cdef char pname1[mpi.MPI_MAX_PROCESSOR_NAME] ierr1 = mpi.MPI_Get_processor_name(pname1, &rlen1) pname1[rlen1] = 0 # just in case ;-) hwmess = "Hello, World! I am process %d of %d on %s." print (hwmess % (rank1, size1, pname1)) # --------- mpi4py-3.1.6/demo/cython/makefile000066400000000000000000000011211460670727200167010ustar00rootroot00000000000000.PHONY: default default: build test clean PYTHON = python$(py) PYTHON_CONFIG = ${PYTHON} ../python-config CYTHON = cython .PHONY: src src: helloworld.c helloworld.c: helloworld.pyx ${CYTHON} $< MPICC = mpicc CFLAGS = -fPIC ${shell ${PYTHON_CONFIG} --includes} LDFLAGS = -shared ${shell ${PYTHON_CONFIG} --libs} SO = ${shell ${PYTHON_CONFIG} --extension-suffix} .PHONY: build build: helloworld${SO} helloworld${SO}: helloworld.c ${MPICC} ${CFLAGS} -o $@ $< ${LDFLAGS} .PHONY: test test: build ${PYTHON} -c 'import helloworld' .PHONY: clean clean: ${RM} helloworld.c helloworld${SO} mpi4py-3.1.6/demo/cython/mpi-compat.h000066400000000000000000000004401460670727200174230ustar00rootroot00000000000000/* Author: Lisandro Dalcin */ /* Contact: dalcinl@gmail.com */ #ifndef MPI_COMPAT_H #define MPI_COMPAT_H #include #if (MPI_VERSION < 3) && !defined(PyMPI_HAVE_MPI_Message) typedef void *PyMPI_MPI_Message; #define MPI_Message PyMPI_MPI_Message #endif #endif/*MPI_COMPAT_H*/ mpi4py-3.1.6/demo/embedding/000077500000000000000000000000001460670727200156205ustar00rootroot00000000000000mpi4py-3.1.6/demo/embedding/helloworld.c000066400000000000000000000020021460670727200201310ustar00rootroot00000000000000/* * You can use safely use mpi4py between multiple * Py_Initialize()/Py_Finalize() calls ... * but do not blame me for the memory leaks ;-) * */ #include #include const char helloworld[] = \ "from mpi4py import MPI \n" "hwmess = 'Hello, World! I am process %d of %d on %s.' \n" "myrank = MPI.COMM_WORLD.Get_rank() \n" "nprocs = MPI.COMM_WORLD.Get_size() \n" "procnm = MPI.Get_processor_name() \n" "print (hwmess % (myrank, nprocs, procnm)) \n" ""; int main(int argc, char *argv[]) { int i,n=5; MPI_Init(&argc, &argv); for (i=0; i= (2, 6) else {} return urlopen(url, **kwargs).read() def download_urls_sequential(urls, timeout=60): url_to_content = {} for url in urls: try: url_to_content[url] = load_url(url, timeout=timeout) except: pass return url_to_content def download_urls_with_executor(executor, urls, timeout=60): if executor is None: return {} try: url_to_content = {} future_to_url = dict((executor.submit(load_url, url, timeout), url) for url in urls) for future in as_completed(future_to_url): try: url_to_content[future_to_url[future]] = future.result() except: pass return url_to_content finally: executor.shutdown() def main(): for meth, fn in [('sequential', functools.partial(download_urls_sequential, URLS)), ('threads', functools.partial(download_urls_with_executor, ThreadPoolExecutor(10), URLS)), ('processes', functools.partial(download_urls_with_executor, ProcessPoolExecutor(10), URLS)), ('mpi4py', functools.partial(download_urls_with_executor, MPIPoolExecutor(10), URLS))]: sys.stdout.write('%s: ' % meth.ljust(11)) sys.stdout.flush() start = time.time() url_map = fn() elapsed = time.time() - start sys.stdout.write('%5.2f seconds (%2d of %d downloaded)\n' % (elapsed, len(url_map), len(URLS))) sys.stdout.flush() if __name__ == '__main__': main() mpi4py-3.1.6/demo/futures/perf_primes.py000066400000000000000000000036371460670727200202750ustar00rootroot00000000000000""" Compare the speed of primes sequentially vs. using futures. """ import sys import time import math try: from concurrent.futures import ThreadPoolExecutor except ImportError: ThreadPoolExecutor = None try: from concurrent.futures import ProcessPoolExecutor except ImportError: ProcessPoolExecutor = None from mpi4py.futures import MPIPoolExecutor PRIMES = [ 112272535095293, 112582705942171, 112272535095293, 115280095190773, 115797848077099, 117450548693743, 993960000099397, ] def is_prime(n): if n % 2 == 0: return False sqrt_n = int(math.floor(math.sqrt(n))) for i in range(3, sqrt_n + 1, 2): if n % i == 0: return False return True def sequential(): return list(map(is_prime, PRIMES)) def with_thread_pool_executor(): if not ThreadPoolExecutor: return None with ThreadPoolExecutor(4) as executor: return list(executor.map(is_prime, PRIMES)) def with_process_pool_executor(): if not ProcessPoolExecutor: return None with ProcessPoolExecutor(4) as executor: return list(executor.map(is_prime, PRIMES)) def with_mpi_pool_executor(): with MPIPoolExecutor(4) as executor: return list(executor.map(is_prime, PRIMES)) def main(): for name, fn in [('sequential', sequential), ('threads', with_thread_pool_executor), ('processes', with_process_pool_executor), ('mpi4py', with_mpi_pool_executor)]: sys.stdout.write('%s: ' % name.ljust(11)) sys.stdout.flush() start = time.time() result = fn() if result is None: sys.stdout.write(' not available\n') elif result != [True] * len(PRIMES): sys.stdout.write(' failed\n') else: sys.stdout.write('%5.2f seconds\n' % (time.time() - start)) sys.stdout.flush() if __name__ == '__main__': main() mpi4py-3.1.6/demo/futures/run_crawl.py000066400000000000000000000015611460670727200177500ustar00rootroot00000000000000from __future__ import print_function from __future__ import division try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen from mpi4py.futures import MPIPoolExecutor URLS = [ 'http://www.google.com/', 'http://www.apple.com/', 'http://www.ibm.com/', 'http://www.slashdot.org/', 'http://www.python.org/', 'http://www.bing.com/', 'http://www.facebook.com/', 'http://www.yahoo.com/', 'http://www.youtube.com/', 'http://www.blogger.com/', ] def load_url(url): return url, urlopen(url).read() def test_crawl(): with MPIPoolExecutor(10) as executor: for url, content in executor.map(load_url, URLS, timeout=10, unordered=True): print('%-25s: %6.2f KiB' % (url, len(content)/(1 << 10))) if __name__ == '__main__': test_crawl() mpi4py-3.1.6/demo/futures/run_julia.py000066400000000000000000000024521460670727200177440ustar00rootroot00000000000000from __future__ import print_function from __future__ import division import sys import time from mpi4py.futures import MPICommExecutor x0 = -2.0 x1 = +2.0 y0 = -1.5 y1 = +1.5 w = 1600 h = 1200 dx = (x1 - x0) / w dy = (y1 - y0) / h def julia(x, y): c = complex(0, 0.65) z = complex(x, y) n = 255 while abs(z) < 3 and n > 1: z = z**2 + c n -= 1 return n def julia_line(k): line = bytearray(w) y = y1 - k * dy for j in range(w): x = x0 + j * dx line[j] = julia(x, y) return line def plot(image): import warnings warnings.simplefilter('ignore', UserWarning) try: from matplotlib import pyplot as plt except ImportError: return plt.figure() plt.imshow(image, aspect='equal', cmap='cubehelix') plt.axis('off') try: plt.draw() plt.pause(2) except: pass def test_julia(): with MPICommExecutor() as executor: if executor is None: return # worker process tic = time.time() image = list(executor.map(julia_line, range(h), chunksize=10)) toc = time.time() print("%s Set %dx%d in %.2f seconds." % ('Julia', w, h, toc-tic)) if len(sys.argv) > 1 and sys.argv[1] == '-plot': plot(image) if __name__ == '__main__': test_julia() mpi4py-3.1.6/demo/futures/run_mandelbrot.py000066400000000000000000000025221460670727200207650ustar00rootroot00000000000000from __future__ import print_function from __future__ import division import sys import time from mpi4py.futures import MPICommExecutor x0 = -2.0 x1 = +1.0 y0 = -1.0 y1 = +1.0 w = 750 h = 500 dx = (x1 - x0) / w dy = (y1 - y0) / h def mandelbrot(x, y, maxit=255): c = complex(x, y) z = complex(0, 0) n = 255 while abs(z) < 2 and n > 1: z = z**2 + c n -= 1 return n def mandelbrot_line(k): line = bytearray(w) y = y1 - k * dy for j in range(w): x = x0 + j * dx line[j] = mandelbrot(x, y) return line def plot(image): import warnings warnings.simplefilter('ignore', UserWarning) try: from matplotlib import pyplot as plt except ImportError: return plt.figure() plt.imshow(image, aspect='equal', cmap='spectral') plt.axis('off') try: plt.draw() plt.pause(2) except: pass def test_mandelbrot(): with MPICommExecutor() as executor: if executor is None: return # worker process tic = time.time() image = list(executor.map(mandelbrot_line, range(h), chunksize=10)) toc = time.time() print("%s Set %dx%d in %.2f seconds." % ('Mandelbrot', w, h, toc-tic)) if len(sys.argv) > 1 and sys.argv[1] == '-plot': plot(image) if __name__ == '__main__': test_mandelbrot() mpi4py-3.1.6/demo/futures/run_primes.py000066400000000000000000000012661460670727200201410ustar00rootroot00000000000000from __future__ import print_function import math from mpi4py.futures import MPIPoolExecutor PRIMES = [ 112272535095293, 112582705942171, 112272535095293, 115280095190773, 115797848077099, 117450548693743, 993960000099397, ] def is_prime(n): if n % 2 == 0: return False sqrt_n = int(math.floor(math.sqrt(n))) for i in range(3, sqrt_n + 1, 2): if n % i == 0: return False return True def test_primes(): with MPIPoolExecutor(4) as executor: for number, prime in zip(PRIMES, executor.map(is_prime, PRIMES)): print('%d is prime: %s' % (number, prime)) if __name__ == '__main__': test_primes() mpi4py-3.1.6/demo/futures/test_futures.py000066400000000000000000001323311460670727200205100ustar00rootroot00000000000000import os import sys import time import functools import unittest from mpi4py import MPI from mpi4py import futures try: from concurrent.futures._base import ( PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED) except ImportError: from mpi4py.futures._base import ( PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED) SHARED_POOL = futures._lib.SharedPool is not None WORLD_SIZE = MPI.COMM_WORLD.Get_size() def create_future(state=PENDING, exception=None, result=None): f = futures.Future() f._state = state f._exception = exception f._result = result return f PENDING_FUTURE = create_future(state=PENDING) RUNNING_FUTURE = create_future(state=RUNNING) CANCELLED_FUTURE = create_future(state=CANCELLED) CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED) EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError()) SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42) def mul(x, y): return x * y def sleep_and_raise(t): time.sleep(t) raise Exception('this is an exception') def check_global_var(x): return global_var == x def check_run_name(name): return __name__ == name class ExecutorMixin: worker_count = 2 def setUp(self): self.t1 = time.time() try: self.executor = self.executor_type(max_workers=self.worker_count) except NotImplementedError: e = sys.exc_info()[1] self.skipTest(str(e)) self._prime_executor() def tearDown(self): self.executor.shutdown(wait=True) dt = time.time() - self.t1 self.assertLess(dt, 60, 'synchronization issue: test lasted too long') def _prime_executor(self): # Make sure that the executor is ready to do work before running the # tests. This should reduce the probability of timeouts in the tests. futures = [self.executor.submit(time.sleep, 0) for _ in range(self.worker_count)] for f in futures: f.result() class ProcessPoolMixin(ExecutorMixin): executor_type = futures.MPIPoolExecutor if 'coverage' in sys.modules: executor_type = functools.partial( executor_type, python_args='-m coverage run'.split(), ) @unittest.skipIf(not SHARED_POOL, 'not-shared-pool') class ASharedPoolInitTest(unittest.TestCase): executor_type = futures.MPIPoolExecutor @unittest.skipIf(WORLD_SIZE == 1, 'world-size-1') def test_initializer_1(self): executor = self.executor_type( initializer=sleep_and_raise, initargs=(0.2,), ) executor.submit(time.sleep, 0).cancel() future = executor.submit(time.sleep, 0) with self.assertRaises(futures.BrokenExecutor): executor.submit(time.sleep, 0).result() with self.assertRaises(futures.BrokenExecutor): future.result() with self.assertRaises(futures.BrokenExecutor): executor.submit(time.sleep, 0) @unittest.skipIf(WORLD_SIZE == 1, 'world-size-1') def test_initializer_2(self): executor = self.executor_type( initializer=time.sleep, initargs=(0,), ) executor.bootup() with self.assertRaises(futures.BrokenExecutor): executor.submit(time.sleep, 0).result() with self.assertRaises(futures.BrokenExecutor): executor.submit(time.sleep, 0) @unittest.skipIf(WORLD_SIZE == 1, 'world-size-1') def test_initializer_3(self): executor = self.executor_type() executor.submit(time.sleep, 0).result() executor.shutdown() class ProcessPoolInitTest(ProcessPoolMixin, unittest.TestCase): def setUp(self): pass def tearDown(self): pass def _prime_executor(self): pass def test_init(self): self.executor_type() def test_init_args(self): self.executor_type(1) def test_init_kwargs(self): executor = self.executor_type( python_exe=sys.executable, max_workers=None, mpi_info=dict(soft="0:1"), globals=None, main=False, path=[], wdir=os.getcwd(), env={}, ) futures = [executor.submit(time.sleep, 0) for _ in range(self.worker_count)] for f in futures: f.result() executor.shutdown() def test_init_pyargs(self): executor_type = futures.MPIPoolExecutor executor = executor_type(python_args=['-B', '-Wi']) executor.submit(time.sleep, 0).result() executor.shutdown() @unittest.skipIf(SHARED_POOL, 'shared-pool') def test_init_globals(self): executor = self.executor_type(globals=dict(global_var=42)) future1 = executor.submit(check_global_var, 42) future2 = executor.submit(check_global_var, 24) self.assertTrue(future1.result()) self.assertFalse(future2.result()) executor.shutdown() @unittest.skipIf(SHARED_POOL and WORLD_SIZE == 1, 'shared-pool') def test_run_name(self): executor = self.executor_type() run_name = futures._lib.MAIN_RUN_NAME future = executor.submit(check_run_name, run_name) self.assertTrue(future.result(), run_name) @unittest.skipIf(SHARED_POOL and WORLD_SIZE > 2, 'shared-pool') def test_max_workers(self): executor = self.executor_type(max_workers=1) self.assertEqual(executor._max_workers, 1) executor.shutdown() self.assertEqual(executor._max_workers, None) @unittest.skipIf(SHARED_POOL and WORLD_SIZE > 2, 'shared-pool') def test_max_workers_environ(self): save = os.environ.get('MPI4PY_FUTURES_MAX_WORKERS') os.environ['MPI4PY_FUTURES_MAX_WORKERS'] = '1' try: executor = self.executor_type() executor.submit(time.sleep, 0).result() executor.shutdown() executor = self.executor_type() self.assertEqual(executor._max_workers, 1) executor.shutdown() self.assertEqual(executor._max_workers, None) finally: del os.environ['MPI4PY_FUTURES_MAX_WORKERS'] if save is not None: os.environ['MPI4PY_FUTURES_MAX_WORKERS'] = save def test_max_workers_negative(self): for number in (0, -1): self.assertRaises(ValueError, self.executor_type, max_workers=number) @unittest.skipIf(SHARED_POOL, 'shared-pool') def test_initializer(self): executor = self.executor_type( initializer=time.sleep, initargs=(0,), ) executor.submit(time.sleep, 0).result() @unittest.skipIf(SHARED_POOL, 'shared-pool') def test_initializer_bad(self): with self.assertRaises(TypeError): self.executor_type(initializer=123) @unittest.skipIf(SHARED_POOL, 'shared-pool') def test_initializer_error(self): executor = self.executor_type( initializer=sleep_and_raise, initargs=(0.2,), ) executor.submit(time.sleep, 0).cancel() future = executor.submit(time.sleep, 0) with self.assertRaises(futures.BrokenExecutor): executor.submit(time.sleep, 0).result() with self.assertRaises(futures.BrokenExecutor): future.result() with self.assertRaises(futures.BrokenExecutor): executor.submit(time.sleep, 0) self.assertEqual(executor._max_workers, None) @unittest.skipIf(SHARED_POOL, 'shared-pool') def test_initializer_error_del(self): executor = self.executor_type( initializer=sleep_and_raise, initargs=(0.2,), ) executor.bootup() del executor @unittest.skipIf(SHARED_POOL, 'shared-pool') def test_initializer_error_del_nowait(self): executor = self.executor_type( initializer=sleep_and_raise, initargs=(1.2,), ) executor.bootup(wait=False) executor.shutdown(wait=False) del executor class ProcessPoolBootupTest(ProcessPoolMixin, unittest.TestCase): def _prime_executor(self): pass def test_bootup(self): executor = self.executor_type(1) executor.bootup() executor.bootup() executor.shutdown() self.assertRaises(RuntimeError, executor.bootup) def test_bootup_wait(self): executor = self.executor_type(1) executor.bootup(wait=True) executor.bootup(wait=True) executor.shutdown(wait=True) self.assertRaises(RuntimeError, executor.bootup, True) def test_bootup_nowait(self): executor = self.executor_type(1) executor.bootup(wait=False) executor.bootup(wait=False) executor.shutdown(wait=False) self.assertRaises(RuntimeError, executor.bootup, False) executor.shutdown(wait=True) def test_bootup_nowait_wait(self): executor = self.executor_type(1) executor.bootup(wait=False) executor.bootup(wait=True) executor.shutdown() self.assertRaises(RuntimeError, executor.bootup) def test_bootup_shutdown_nowait(self): executor = self.executor_type(1) executor.bootup(wait=False) executor.shutdown(wait=False) worker = executor._pool del executor worker.join() class ExecutorShutdownTestMixin: def test_run_after_shutdown(self): self.executor.shutdown() self.assertRaises(RuntimeError, self.executor.submit, pow, 2, 5) def test_hang_issue12364(self): fs = [self.executor.submit(time.sleep, 0.01) for _ in range(50)] self.executor.shutdown() for f in fs: f.result() class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTestMixin, unittest.TestCase): def _prime_executor(self): pass def test_shutdown(self): executor = self.executor_type(max_workers=1) self.assertEqual(executor._pool, None) self.assertEqual(executor._shutdown, False) executor.submit(mul, 21, 2) executor.submit(mul, 6, 7) executor.submit(mul, 3, 14) self.assertNotEqual(executor._pool.thread, None) self.assertEqual(executor._shutdown, False) executor.shutdown(wait=False) self.assertNotEqual(executor._pool.thread, None) self.assertEqual(executor._shutdown, True) executor.shutdown(wait=True) self.assertEqual(executor._pool, None) self.assertEqual(executor._shutdown, True) def test_submit_shutdown_cancel(self): executor = self.executor_type(max_workers=1) executor.bootup() num_workers = executor._max_workers for _ in range(num_workers*10): executor.submit(time.sleep, 0.1) fut = executor.submit(time.sleep, 0) executor.shutdown(wait=False, cancel_futures=False) self.assertFalse(fut.cancelled()) executor.shutdown(wait=True, cancel_futures=True) self.assertTrue(fut.cancelled()) def test_shutdown_cancel(self): executor = self.executor_type(max_workers=1) executor.bootup() executor._pool.cancel() executor.shutdown(wait=False, cancel_futures=False) executor.shutdown(wait=False, cancel_futures=False) executor.shutdown(wait=False, cancel_futures=True) executor.shutdown(wait=False, cancel_futures=True) executor.shutdown(wait=True, cancel_futures=True) executor.shutdown(wait=True, cancel_futures=True) def test_init_bootup_shutdown(self): executor = self.executor_type(max_workers=1) self.assertEqual(executor._pool, None) self.assertEqual(executor._shutdown, False) executor.bootup() self.assertTrue(executor._pool.event.is_set()) self.assertEqual(executor._shutdown, False) executor.shutdown() self.assertEqual(executor._pool, None) self.assertEqual(executor._shutdown, True) def test_context_manager_shutdown(self): with self.executor_type(max_workers=1) as e: self.assertEqual(list(e.map(abs, range(-5, 5))), [5, 4, 3, 2, 1, 0, 1, 2, 3, 4]) threads = [e._pool.thread] queues = [e._pool.queue] events = [e._pool.event] for t in threads: t.join() for q in queues: self.assertRaises(LookupError, q.pop) for e in events: self.assertTrue(e.is_set()) def test_del_shutdown(self): executor = self.executor_type(max_workers=1) list(executor.map(abs, range(-5, 5))) threads = [executor._pool.thread] queues = [executor._pool.queue] events = [executor._pool.event] if hasattr(sys, 'pypy_version_info'): executor.shutdown(False) else: del executor for t in threads: t.join() for q in queues: self.assertRaises(LookupError, q.pop) for e in events: self.assertTrue(e.is_set()) class WaitTestMixin: def test_first_completed(self): future1 = self.executor.submit(mul, 21, 2) future2 = self.executor.submit(time.sleep, 0.2) done, not_done = futures.wait( [CANCELLED_FUTURE, future1, future2], return_when=futures.FIRST_COMPLETED) self.assertEqual(set([future1]), done) self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done) def test_first_completed_some_already_completed(self): future1 = self.executor.submit(time.sleep, 0.2) finished, pending = futures.wait( [CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1], return_when=futures.FIRST_COMPLETED) self.assertEqual( set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]), finished) self.assertEqual(set([future1]), pending) def test_first_exception(self): future1 = self.executor.submit(mul, 2, 21) future2 = self.executor.submit(sleep_and_raise, 0.2) future3 = self.executor.submit(time.sleep, 0.4) finished, pending = futures.wait( [future1, future2, future3], return_when=futures.FIRST_EXCEPTION) self.assertEqual(set([future1, future2]), finished) self.assertEqual(set([future3]), pending) def test_first_exception_some_already_complete(self): future1 = self.executor.submit(divmod, 21, 0) future2 = self.executor.submit(time.sleep, 0.2) finished, pending = futures.wait( [SUCCESSFUL_FUTURE, CANCELLED_FUTURE, CANCELLED_AND_NOTIFIED_FUTURE, future1, future2], return_when=futures.FIRST_EXCEPTION) self.assertEqual(set([SUCCESSFUL_FUTURE, CANCELLED_AND_NOTIFIED_FUTURE, future1]), finished) self.assertEqual(set([CANCELLED_FUTURE, future2]), pending) def test_first_exception_one_already_failed(self): future1 = self.executor.submit(time.sleep, 0.2) finished, pending = futures.wait( [EXCEPTION_FUTURE, future1], return_when=futures.FIRST_EXCEPTION) self.assertEqual(set([EXCEPTION_FUTURE]), finished) self.assertEqual(set([future1]), pending) def test_all_completed(self): future1 = self.executor.submit(divmod, 2, 0) future2 = self.executor.submit(mul, 2, 21) finished, pending = futures.wait( [SUCCESSFUL_FUTURE, CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, future1, future2], return_when=futures.ALL_COMPLETED) self.assertEqual(set([SUCCESSFUL_FUTURE, CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, future1, future2]), finished) self.assertEqual(set(), pending) def test_timeout(self): future1 = self.executor.submit(mul, 6, 7) future2 = self.executor.submit(time.sleep, 0.5) finished, pending = futures.wait( [CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1, future2], timeout=0.2, return_when=futures.ALL_COMPLETED) self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1]), finished) self.assertEqual(set([future2]), pending) class ProcessPoolWaitTest(ProcessPoolMixin, WaitTestMixin, unittest.TestCase): pass class AsCompletedTestMixin: def test_no_timeout(self): future1 = self.executor.submit(mul, 2, 21) future2 = self.executor.submit(mul, 7, 6) completed = set(futures.as_completed( [CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1, future2])) self.assertEqual(set( [CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1, future2]), completed) def test_zero_timeout(self): future1 = self.executor.submit(time.sleep, 0.5) completed_futures = set() try: for future in futures.as_completed( [CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1], timeout=0): completed_futures.add(future) except futures.TimeoutError: pass self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE]), completed_futures) def test_nonzero_timeout(self): future1 = self.executor.submit(time.sleep, 0.0) future2 = self.executor.submit(time.sleep, 0.5) completed_futures = set() try: for future in futures.as_completed( [CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1], timeout=0.2): completed_futures.add(future) except futures.TimeoutError: pass self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1]), completed_futures) def test_duplicate_futures(self): py_version = sys.version_info[:3] if py_version[0] == 3 and py_version < (3, 3, 5): return # Issue 20367. Duplicate futures should not raise exceptions or give # duplicate responses. future1 = self.executor.submit(time.sleep, 0.1) completed = [f for f in futures.as_completed([future1, future1])] self.assertEqual(len(completed), 1) class ProcessPoolAsCompletedTest(ProcessPoolMixin, AsCompletedTestMixin, unittest.TestCase): pass class ExecutorTestMixin: def test_submit(self): future = self.executor.submit(pow, 2, 8) self.assertEqual(256, future.result()) def test_submit_keyword(self): future = self.executor.submit(mul, 2, y=8) self.assertEqual(16, future.result()) future = self.executor.submit(mul, x=2, y=8) self.assertEqual(16, future.result()) def test_submit_cancel(self): future1 = self.executor.submit(time.sleep, 0.25) future2 = self.executor.submit(time.sleep, 0) future2.cancel() self.assertEqual(None, future1.result()) self.assertEqual(False, future1.cancelled()) self.assertEqual(True, future2.cancelled()) def test_map(self): self.assertEqual( list(self.executor.map(pow, range(10), range(10))), list(map(pow, range(10), range(10)))) def test_starmap(self): sequence = [(a,a) for a in range(10)] self.assertEqual( list(self.executor.starmap(pow, sequence)), list(map(pow, range(10), range(10)))) self.assertEqual( list(self.executor.starmap(pow, iter(sequence))), list(map(pow, range(10), range(10)))) def test_map_exception(self): i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5]) self.assertEqual(next(i), (0, 1)) self.assertEqual(next(i), (0, 1)) self.assertRaises(ZeroDivisionError, next, i) def test_map_timeout(self): results = [] try: for i in self.executor.map(time.sleep, [0, 0, 1], timeout=0.25): results.append(i) except futures.TimeoutError: pass else: self.fail('expected TimeoutError') self.assertEqual([None, None], results) def test_map_timeout_one(self): results = [] for i in self.executor.map(time.sleep, [0, 0, 0], timeout=1): results.append(i) self.assertEqual([None, None, None], results) class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTestMixin, unittest.TestCase): def test_map_chunksize(self): ref = list(map(pow, range(40), range(40))) self.assertEqual( list(self.executor.map(pow, range(40), range(40), chunksize=6)), ref) self.assertEqual( list(self.executor.map(pow, range(40), range(40), chunksize=50)), ref) self.assertEqual( list(self.executor.map(pow, range(40), range(40), chunksize=40)), ref) def bad(): list(self.executor.map(pow, range(40), range(40), chunksize=-1)) self.assertRaises(ValueError, bad) def test_starmap_chunksize(self): ref = list(map(pow, range(40), range(40))) sequence = [(a, a) for a in range(40)] self.assertEqual( list(self.executor.starmap(pow, sequence, chunksize=6)), ref) self.assertEqual( list(self.executor.starmap(pow, sequence, chunksize=50)), ref) self.assertEqual( list(self.executor.starmap(pow, sequence, chunksize=40)), ref) self.assertEqual( list(self.executor.starmap(pow, iter(sequence), chunksize=6)), ref) self.assertEqual( list(self.executor.starmap(pow, iter(sequence), chunksize=50)), ref) self.assertEqual( list(self.executor.starmap(pow, iter(sequence), chunksize=40)), ref) def bad(): list(self.executor.starmap(pow, sequence, chunksize=-1)) self.assertRaises(ValueError, bad) def test_map_unordered(self): map_unordered = functools.partial(self.executor.map, unordered=True) self.assertEqual( set(map_unordered(pow, range(10), range(10))), set(map(pow, range(10), range(10)))) def test_map_unordered_timeout(self): map_unordered = functools.partial(self.executor.map, unordered=True) num_workers = self.executor._max_workers results = [] try: args = [1] + [0]*(num_workers-1) for i in map_unordered(time.sleep, args, timeout=0.25): results.append(i) except futures.TimeoutError: pass else: self.fail('expected TimeoutError') self.assertEqual([None]*(num_workers-1), results) def test_map_unordered_timeout_one(self): map_unordered = functools.partial(self.executor.map, unordered=True) results = [] for i in map_unordered(time.sleep, [0, 0, 0], timeout=1): results.append(i) self.assertEqual([None, None, None], results) def test_map_unordered_exception(self): map_unordered = functools.partial(self.executor.map, unordered=True) i = map_unordered(divmod, [1, 1, 1, 1], [2, 3, 0, 5]) try: self.assertEqual(next(i), (0, 1)) except ZeroDivisionError: return def test_map_unordered_chunksize(self): map_unordered = functools.partial(self.executor.map, unordered=True) ref = set(map(pow, range(40), range(40))) self.assertEqual( set(map_unordered(pow, range(40), range(40), chunksize=6)), ref) self.assertEqual( set(map_unordered(pow, range(40), range(40), chunksize=50)), ref) self.assertEqual( set(map_unordered(pow, range(40), range(40), chunksize=40)), ref) def bad(): set(map_unordered(pow, range(40), range(40), chunksize=-1)) self.assertRaises(ValueError, bad) class ProcessPoolSubmitTest(unittest.TestCase): @unittest.skipIf(MPI.get_vendor()[0] == 'Microsoft MPI', 'msmpi') def test_multiple_executors(self): executor1 = futures.MPIPoolExecutor(1).bootup(wait=True) executor2 = futures.MPIPoolExecutor(1).bootup(wait=True) executor3 = futures.MPIPoolExecutor(1).bootup(wait=True) fs1 = [executor1.submit(abs, i) for i in range(100, 200)] fs2 = [executor2.submit(abs, i) for i in range(200, 300)] fs3 = [executor3.submit(abs, i) for i in range(300, 400)] futures.wait(fs3+fs2+fs1) for i, f in enumerate(fs1): self.assertEqual(f.result(), i + 100) for i, f in enumerate(fs2): self.assertEqual(f.result(), i + 200) for i, f in enumerate(fs3): self.assertEqual(f.result(), i + 300) executor1 = executor2 = executor3 = None def test_mpi_serialized_support(self): futures._lib.setup_mpi_threads() threading = futures._lib.threading serialized = futures._lib.serialized lock_save = serialized.lock try: if lock_save is None: serialized.lock = threading.Lock() executor = futures.MPIPoolExecutor(1).bootup() executor.submit(abs, 0).result() executor.shutdown() serialized.lock = lock_save else: serialized.lock = None with lock_save: executor = futures.MPIPoolExecutor(1).bootup() executor.submit(abs, 0).result() executor.shutdown() serialized.lock = lock_save finally: serialized.lock = lock_save def test_shared_executors(self): if not SHARED_POOL: return executors = [futures.MPIPoolExecutor() for _ in range(16)] fs = [] for i in range(128): fs.extend(e.submit(abs, i*16+j) for j, e in enumerate(executors)) assert sorted(f.result() for f in fs) == list(range(16*128)) world_size = MPI.COMM_WORLD.Get_size() num_workers = max(1, world_size - 1) for e in executors: self.assertEqual(e._max_workers, num_workers) del e, executors def inout(arg): return arg class GoodPickle(object): def __init__(self, value=0): self.value = value self.pickled = False self.unpickled = False def __getstate__(self): self.pickled = True return (self.value,) def __setstate__(self, state): self.unpickled = True self.value = state[0] class BadPickle(object): def __init__(self): self.pickled = False def __getstate__(self): self.pickled = True 1/0 def __setstate__(self, state): pass class BadUnpickle(object): def __init__(self): self.pickled = False def __getstate__(self): self.pickled = True return (None,) def __setstate__(self, state): if state[0] is not None: raise ValueError 1/0 @unittest.skipIf(SHARED_POOL and WORLD_SIZE == 1, 'shared-pool') class ProcessPoolPickleTest(unittest.TestCase): def setUp(self): self.executor = futures.MPIPoolExecutor(1) def tearDown(self): self.executor.shutdown() def test_good_pickle(self): o = GoodPickle(42) r = self.executor.submit(inout, o).result() self.assertEqual(o.value, r.value) self.assertTrue(o.pickled) self.assertTrue(r.unpickled) r = self.executor.submit(GoodPickle, 77).result() self.assertEqual(r.value, 77) self.assertTrue(r.unpickled) def test_bad_pickle(self): o = BadPickle() self.assertFalse(o.pickled) f = self.executor.submit(inout, o) self.assertRaises(ZeroDivisionError, f.result) self.assertTrue(o.pickled) f = self.executor.submit(BadPickle) self.assertRaises(ZeroDivisionError, f.result) f = self.executor.submit(abs, 42) self.assertEqual(f.result(), 42) def test_bad_unpickle(self): o = BadUnpickle() self.assertFalse(o.pickled) f = self.executor.submit(inout, o) self.assertRaises(ZeroDivisionError, f.result) self.assertTrue(o.pickled) f = self.executor.submit(BadUnpickle) self.assertRaises(ZeroDivisionError, f.result) f = self.executor.submit(abs, 42) self.assertEqual(f.result(), 42) class MPICommExecutorTest(unittest.TestCase): MPICommExecutor = futures.MPICommExecutor def test_default(self): with self.MPICommExecutor() as executor: if executor is not None: executor.bootup() future1 = executor.submit(time.sleep, 0) future2 = executor.submit(time.sleep, 0) executor.shutdown() self.assertEqual(None, future1.result()) self.assertEqual(None, future2.result()) def test_self(self): with self.MPICommExecutor(MPI.COMM_SELF) as executor: future = executor.submit(time.sleep, 0) self.assertEqual(None, future.result()) self.assertEqual(None, future.exception()) future = executor.submit(sleep_and_raise, 0) self.assertRaises(Exception, future.result) self.assertEqual(Exception, type(future.exception())) list(executor.map(time.sleep, [0, 0])) list(executor.map(time.sleep, [0, 0], timeout=1)) iterator = executor.map(time.sleep, [0.2, 0], timeout=0) self.assertRaises(futures.TimeoutError, list, iterator) def test_args(self): with self.MPICommExecutor(MPI.COMM_SELF) as executor: self.assertTrue(executor is not None) with self.MPICommExecutor(MPI.COMM_SELF, 0) as executor: self.assertTrue(executor is not None) def test_kwargs(self): with self.MPICommExecutor(comm=MPI.COMM_SELF) as executor: self.assertTrue(executor is not None) with self.MPICommExecutor(comm=MPI.COMM_SELF, root=0) as executor: self.assertTrue(executor is not None) @unittest.skipIf(SHARED_POOL, 'shared-pool') def test_arg_root(self): comm = MPI.COMM_WORLD rank = comm.Get_rank() for root in range(comm.Get_size()): with self.MPICommExecutor(comm, root) as executor: if rank != root: self.assertTrue(executor is None) with self.MPICommExecutor(root=root) as executor: if rank != root: self.assertTrue(executor is None) def test_arg_root_bad(self): size = MPI.COMM_WORLD.Get_size() self.assertRaises(ValueError, self.MPICommExecutor, root=-size) self.assertRaises(ValueError, self.MPICommExecutor, root=-1) self.assertRaises(ValueError, self.MPICommExecutor, root=+size) @unittest.skipIf(SHARED_POOL, 'shared-pool') def test_arg_comm_bad(self): if MPI.COMM_WORLD.Get_size() == 1: return intercomm = futures._lib.comm_split(MPI.COMM_WORLD) try: self.assertRaises(ValueError, self.MPICommExecutor, intercomm) finally: intercomm.Free() def test_with_bad(self): mpicommexecutor = self.MPICommExecutor(MPI.COMM_SELF) with mpicommexecutor as executor: try: with mpicommexecutor: pass except RuntimeError: pass else: self.fail('expected RuntimeError') @unittest.skipIf(SHARED_POOL, 'shared-pool') def test_initializer(self): mpicommexecutor = self.MPICommExecutor( initializer=time.sleep, initargs=(0,), ) with mpicommexecutor as executor: if executor is not None: executor.bootup() del executor with mpicommexecutor as executor: if executor is not None: executor.submit(time.sleep, 0).result() @unittest.skipIf(SHARED_POOL, 'shared-pool') def test_initializer_error(self): mpicommexecutor = self.MPICommExecutor( initializer=sleep_and_raise, initargs=(0.2,), ) with mpicommexecutor as executor: if executor is not None: executor.submit(time.sleep, 0).cancel() future = executor.submit(time.sleep, 0) with self.assertRaises(futures.BrokenExecutor): executor.submit(time.sleep, 0).result() with self.assertRaises(futures.BrokenExecutor): future.result() @unittest.skipIf(SHARED_POOL, 'shared-pool') def test_initializer_error_del(self): mpicommexecutor = self.MPICommExecutor( initializer=sleep_and_raise, initargs=(0.2,), ) with mpicommexecutor as executor: if executor is not None: executor.bootup() del executor @unittest.skipIf(SHARED_POOL, 'shared-pool') def test_initializer_error_del_nowait(self): mpicommexecutor = self.MPICommExecutor( initializer=sleep_and_raise, initargs=(0.2,), ) with mpicommexecutor as executor: if executor is not None: executor.bootup(wait=False) executor.shutdown(wait=False) del executor from mpi4py.futures.aplus import ThenableFuture class ThenTest(unittest.TestCase): assert_ = unittest.TestCase.assertTrue def test_not_done(self): base_f = ThenableFuture() new_f = base_f.then() self.assert_(base_f is not new_f) self.assert_(not base_f.done()) self.assert_(not new_f.done()) base_f._invoke_callbacks() self.assert_(new_f.cancelled()) def test_cancel(self): base_f = ThenableFuture() new_f = base_f.then() self.assert_(base_f is not new_f) self.assert_(not base_f.done()) self.assert_(not new_f.done()) base_f.cancel() self.assert_(base_f.done()) self.assert_(new_f.done()) self.assert_(base_f.cancelled()) self.assert_(new_f.cancelled()) def test_then_multiple(self): base_f = ThenableFuture() new_f1 = base_f.then() new_f2 = base_f.then() new_f3 = base_f.then() self.assert_(base_f is not new_f1) self.assert_(base_f is not new_f2) self.assert_(base_f is not new_f3) self.assert_(not base_f.done()) self.assert_(not new_f1.done()) self.assert_(not new_f2.done()) self.assert_(not new_f3.done()) base_f.set_result('done') self.assert_(base_f.done()) self.assert_(new_f1.done()) self.assert_(new_f2.done()) self.assert_(new_f3.done()) self.assert_(not new_f1.exception()) self.assert_(not new_f2.exception()) self.assert_(not new_f3.exception()) self.assert_(new_f1.result() == 'done') self.assert_(new_f2.result() == 'done') self.assert_(new_f3.result() == 'done') def test_no_callbacks_and_success(self): base_f = ThenableFuture() new_f = base_f.then() self.assert_(base_f is not new_f) self.assert_(not base_f.done()) self.assert_(not new_f.done()) base_f.set_result('done') self.assert_(base_f.done()) self.assert_(new_f.done()) self.assert_(not new_f.exception()) self.assert_(new_f.result() == 'done') def test_no_callbacks_and_failure(self): class MyException(Exception): pass base_f = ThenableFuture() new_f = base_f.then() self.assert_(base_f is not new_f) self.assert_(not base_f.done()) self.assert_(not new_f.done()) base_f.set_exception(MyException('sad')) self.assert_(base_f.done()) self.assert_(new_f.done()) self.assert_(new_f.exception()) with self.assertRaises(MyException) as catcher: new_f.result() self.assert_(catcher.exception.args[0] == 'sad') def test_success_callback_and_success(self): base_f = ThenableFuture() new_f = base_f.then(lambda result: result + ' manipulated') self.assert_(base_f is not new_f) self.assert_(not base_f.done()) self.assert_(not new_f.done()) base_f.set_result('done') self.assert_(base_f.done()) self.assert_(new_f.done()) self.assert_(not new_f.exception()) self.assert_(new_f.result() == 'done manipulated') def test_err_callback_and_failure_repackage(self): class MyException(Exception): pass class MyRepackagedException(Exception): pass class NotMatched(Exception): pass def on_failure(ex): if isinstance(ex, MyException): return MyRepackagedException(ex.args[0] + ' repackaged') else: return NotMatched('?') base_f = ThenableFuture() new_f = base_f.then(None, on_failure) self.assert_(base_f is not new_f) self.assert_(not base_f.done()) self.assert_(not new_f.done()) base_f.set_exception(MyException('sad')) self.assert_(base_f.done()) self.assert_(new_f.done()) self.assert_(new_f.exception()) with self.assertRaises(MyRepackagedException) as catcher: new_f.result() self.assert_(catcher.exception.args[0] == 'sad repackaged') def test_err_callback_and_failure_raised(self): class MyException(Exception): pass class MyRepackagedException(Exception): pass def raise_something_else(ex): raise MyRepackagedException(ex.args[0] + ' repackaged') base_f = ThenableFuture() new_f = base_f.then(None, raise_something_else) self.assert_(base_f is not new_f) self.assert_(not base_f.done()) self.assert_(not new_f.done()) base_f.set_exception(MyException('sad')) self.assert_(base_f.done()) self.assert_(new_f.done()) self.assert_(new_f.exception()) with self.assertRaises(MyRepackagedException) as catcher: new_f.result() self.assert_(catcher.exception.args[0] == 'sad repackaged') def test_err_callback_convert_to_success(self): class MyException(Exception): pass class NotMatched(Exception): pass def on_failure(ex): if isinstance(ex, MyException): return ex.args[0] + ' repackaged' else: return NotMatched('?') base_f = ThenableFuture() new_f = base_f.catch(on_failure) self.assert_(base_f is not new_f) self.assert_(not base_f.done()) self.assert_(not new_f.done()) base_f.set_exception(MyException('sad')) self.assert_(base_f.done()) self.assert_(new_f.done()) self.assert_(not new_f.exception()) self.assert_(new_f.result() == 'sad repackaged') def test_err_catch_ignore(self): base_f = ThenableFuture() new_f = base_f.catch() self.assert_(base_f is not new_f) self.assert_(not base_f.done()) self.assert_(not new_f.done()) base_f.set_exception(Exception('sad')) self.assert_(base_f.done()) self.assert_(new_f.done()) self.assert_(new_f.exception() is None) self.assert_(new_f.result() is None) def test_success_callback_and_failure_raised(self): class MyException(Exception): pass def raise_something_else(value): raise MyException(value + ' repackaged') base_f = ThenableFuture() new_f = base_f.then(raise_something_else) self.assert_(base_f is not new_f) self.assert_(not base_f.done()) self.assert_(not new_f.done()) base_f.set_result('sad') self.assert_(base_f.done()) self.assert_(new_f.done()) self.assert_(new_f.exception()) with self.assertRaises(MyException) as catcher: new_f.result() assert catcher.exception.args[0] == 'sad repackaged' def test_chained_success_callback_and_success(self): def transform(value): f = ThenableFuture() if value < 5: f.set_result(transform(value+1)) else: f.set_result(value) return f base_f = ThenableFuture() new_f = base_f.then(transform) self.assert_(base_f is not new_f) self.assert_(not base_f.done()) self.assert_(not new_f.done()) base_f.set_result(1) self.assert_(base_f.done()) self.assert_(new_f.done()) self.assert_(not new_f.exception()) self.assert_(new_f.result() == 5) def test_detect_circular_chains(self): f1 = ThenableFuture() f2 = ThenableFuture() chain = [f1, f2, f1] def transform(a): try: f = chain.pop(0) r = transform(a) f.__init__() f.set_result(r) return f except IndexError: return 42 base_f = ThenableFuture() new_f = base_f.then(transform) self.assert_(base_f is not new_f) self.assert_(not base_f.done()) self.assert_(not new_f.done()) base_f.set_result(1) self.assert_(base_f.done()) self.assert_(new_f.done()) self.assert_(new_f.exception()) with self.assertRaises(RuntimeError) as catcher: new_f.result() self.assert_('Circular future chain detected' in catcher.exception.args[0]) SKIP_POOL_TEST = False name, version = MPI.get_vendor() if name == 'Open MPI': if version < (3,0,0): SKIP_POOL_TEST = True if version == (4,0,0): SKIP_POOL_TEST = True if version == (4,0,1) and sys.platform=='darwin': SKIP_POOL_TEST = True if version == (4,0,2) and sys.platform=='darwin': SKIP_POOL_TEST = True if name == 'MPICH': if sys.platform == 'darwin': if version >= (3, 4) and version < (4, 0): SKIP_POOL_TEST = True if MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is None: SKIP_POOL_TEST = True try: port = MPI.Open_port() MPI.Close_port(port) except: port = "" if port == "": SKIP_POOL_TEST = True del port if name == 'MVAPICH2': SKIP_POOL_TEST = True if name == 'MPICH2': if MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is None: SKIP_POOL_TEST = True if name == 'Microsoft MPI': if version < (8,1,0): SKIP_POOL_TEST = True if MPI.COMM_WORLD.Get_attr(MPI.APPNUM) is None: SKIP_POOL_TEST = True if name == 'Platform MPI': SKIP_POOL_TEST = True if MPI.Get_version() < (2,0): SKIP_POOL_TEST = True if SHARED_POOL: del MPICommExecutorTest.test_arg_root del MPICommExecutorTest.test_arg_comm_bad del MPICommExecutorTest.test_initializer del MPICommExecutorTest.test_initializer_error del MPICommExecutorTest.test_initializer_error_del del MPICommExecutorTest.test_initializer_error_del_nowait del ProcessPoolInitTest.test_init_globals del ProcessPoolInitTest.test_initializer del ProcessPoolInitTest.test_initializer_bad del ProcessPoolInitTest.test_initializer_error del ProcessPoolInitTest.test_initializer_error_del del ProcessPoolInitTest.test_initializer_error_del_nowait if WORLD_SIZE == 1: del ASharedPoolInitTest del ProcessPoolInitTest.test_run_name del ProcessPoolPickleTest if WORLD_SIZE > 2: del ProcessPoolInitTest.test_max_workers del ProcessPoolInitTest.test_max_workers_environ elif WORLD_SIZE > 1 or SKIP_POOL_TEST: del ProcessPoolInitTest del ProcessPoolBootupTest del ProcessPoolShutdownTest del ProcessPoolWaitTest del ProcessPoolAsCompletedTest del ProcessPoolExecutorTest del ProcessPoolSubmitTest del ProcessPoolPickleTest if not SHARED_POOL: del ASharedPoolInitTest if __name__ == '__main__': unittest.main() mpi4py-3.1.6/demo/futures/test_service.py000066400000000000000000000014601460670727200204510ustar00rootroot00000000000000import sys from mpi4py.futures import MPIPoolExecutor def main(): def getarg(opt, default=None): try: return sys.argv[sys.argv.index('--'+opt)+1] except ValueError: return default options = {} if '--host' in sys.argv or '--port' in sys.argv: service = (getarg('host'), getarg('port')) else: service = getarg('service') if '--info' in sys.argv: info = getarg('info').split(',') info = dict(entry.split('=') for entry in info if entry) else: info = None with MPIPoolExecutor(service=service, mpi_info=info) as executor: fut1 = executor.submit(abs, +42) fut2 = executor.submit(abs, -42) assert fut1.result(0) == 42 assert fut2.result(0) == 42 if __name__ == '__main__': main() mpi4py-3.1.6/demo/futures/test_service.sh000077500000000000000000000024231460670727200204360ustar00rootroot00000000000000#!/bin/bash PYTHON=${1-${PYTHON-python}} MPIEXEC=${MPIEXEC-mpiexec} testdir=$(dirname "$0") set -e if [ $(command -v mpichversion) ]; then $MPIEXEC -n 1 $PYTHON -m mpi4py.futures.server --xyz > /dev/null 2>&1 || true $MPIEXEC -n 2 $PYTHON -m mpi4py.futures.server --bind localhost & mpi4pyserver=$!; sleep 0.25; $MPIEXEC -n 1 $PYTHON $testdir/test_service.py --host localhost wait $mpi4pyserver $MPIEXEC -n 2 $PYTHON -m mpi4py.futures.server --port 31414 --info "a=x,b=y" & mpi4pyserver=$!; sleep 0.25; $MPIEXEC -n 1 $PYTHON $testdir/test_service.py --port 31414 --info "a=x,b=y" wait $mpi4pyserver fi if [ $(command -v mpichversion) ] && [ $(command -v hydra_nameserver) ]; then hydra_nameserver & nameserver=$!; sleep 0.25; $MPIEXEC -nameserver localhost -n 2 $PYTHON -m mpi4py.futures.server & mpi4pyserver=$!; sleep 0.25; $MPIEXEC -nameserver localhost -n 1 $PYTHON $testdir/test_service.py wait $mpi4pyserver $MPIEXEC -nameserver localhost -n 2 $PYTHON -m mpi4py.futures.server --service test-service & mpi4pyserver=$!; sleep 0.25; $MPIEXEC -nameserver localhost -n 1 $PYTHON $testdir/test_service.py --service test-service wait $mpi4pyserver kill -TERM $nameserver wait $nameserver 2>/dev/null || true fi mpi4py-3.1.6/demo/helloworld.c000066400000000000000000000011551460670727200162230ustar00rootroot00000000000000#include #include int main(int argc, char *argv[]) { int size, rank, len; char name[MPI_MAX_PROCESSOR_NAME]; #if defined(MPI_VERSION) && (MPI_VERSION >= 2) int provided; MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided); #else MPI_Init(&argc, &argv); #endif MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Get_processor_name(name, &len); printf("Hello, World! I am process %d of %d on %s.\n", rank, size, name); MPI_Finalize(); return 0; } /* * Local Variables: * mode: C * c-basic-offset: 2 * indent-tabs-mode: nil * End: */ mpi4py-3.1.6/demo/helloworld.cxx000066400000000000000000000012111460670727200165740ustar00rootroot00000000000000#include #include int main(int argc, char *argv[]) { #if defined(MPI_VERSION) && (MPI_VERSION >= 2) MPI::Init_thread(MPI_THREAD_MULTIPLE); #else MPI::Init(); #endif int size = MPI::COMM_WORLD.Get_size(); int rank = MPI::COMM_WORLD.Get_rank(); int len; char name[MPI_MAX_PROCESSOR_NAME]; MPI::Get_processor_name(name, len); std::cout << "Hello, World! " << "I am process " << rank << " of " << size << " on " << name << "." << std::endl; MPI::Finalize(); return 0; } // Local Variables: // mode: C++ // c-basic-offset: 2 // indent-tabs-mode: nil // End: mpi4py-3.1.6/demo/helloworld.f08000066400000000000000000000007711460670727200164010ustar00rootroot00000000000000program main use mpi_f08 implicit none integer :: provided, size, rank, len character (len=MPI_MAX_PROCESSOR_NAME) :: name call MPI_Init_thread(MPI_THREAD_MULTIPLE, provided) call MPI_Comm_rank(MPI_COMM_WORLD, rank) call MPI_Comm_size(MPI_COMM_WORLD, size) call MPI_Get_processor_name(name, len) write(*, '(2A,I2,A,I2,3A)') & 'Hello, World! ', & 'I am process ', rank, & ' of ', size, & ' on ', name(1:len), '.' call MPI_Finalize() end program main mpi4py-3.1.6/demo/helloworld.f90000066400000000000000000000010271460670727200163750ustar00rootroot00000000000000program main use mpi implicit none integer :: provided, ierr, size, rank, len character (len=MPI_MAX_PROCESSOR_NAME) :: name call MPI_Init_thread(MPI_THREAD_MULTIPLE, provided, ierr) call MPI_Comm_rank(MPI_COMM_WORLD, rank, ierr) call MPI_Comm_size(MPI_COMM_WORLD, size, ierr) call MPI_Get_processor_name(name, len, ierr) write(*, '(2A,I2,A,I2,3A)') & 'Hello, World! ', & 'I am process ', rank, & ' of ', size, & ' on ', name(1:len), '.' call MPI_Finalize(ierr) end program main mpi4py-3.1.6/demo/helloworld.py000066400000000000000000000004311460670727200164250ustar00rootroot00000000000000#!/usr/bin/env python """ Parallel Hello World """ from mpi4py import MPI import sys size = MPI.COMM_WORLD.Get_size() rank = MPI.COMM_WORLD.Get_rank() name = MPI.Get_processor_name() sys.stdout.write( "Hello, World! I am process %d of %d on %s.\n" % (rank, size, name)) mpi4py-3.1.6/demo/init-fini/000077500000000000000000000000001460670727200155705ustar00rootroot00000000000000mpi4py-3.1.6/demo/init-fini/makefile000066400000000000000000000006511460670727200172720ustar00rootroot00000000000000MPIEXEC = mpiexec NP_FLAG = -n NP = 3 PYTHON = python$(py) .PHONY: test test: ${MPIEXEC} ${NP_FLAG} ${NP} ${PYTHON} test_0.py ${MPIEXEC} ${NP_FLAG} ${NP} ${PYTHON} test_1.py ${MPIEXEC} ${NP_FLAG} ${NP} ${PYTHON} test_2a.py ${MPIEXEC} ${NP_FLAG} ${NP} ${PYTHON} test_2b.py ${MPIEXEC} ${NP_FLAG} ${NP} ${PYTHON} test_3.py ${MPIEXEC} ${NP_FLAG} ${NP} ${PYTHON} test_4.py ${MPIEXEC} ${NP_FLAG} ${NP} ${PYTHON} test_5.py mpi4py-3.1.6/demo/init-fini/runtests.bat000066400000000000000000000003251460670727200201470ustar00rootroot00000000000000@echo off setlocal ENABLEEXTENSIONS set PYTHON=python @echo on %PYTHON% test_0.py %PYTHON% test_1.py %PYTHON% test_2a.py %PYTHON% test_2b.py %PYTHON% test_3.py %PYTHON% test_4.py %PYTHON% test_5.py mpi4py-3.1.6/demo/init-fini/runtests.sh000077500000000000000000000005341460670727200200200ustar00rootroot00000000000000#!/bin/sh MPIEXEC=mpiexec NP_FLAG=-n NP=3 PYTHON=python set -x $MPIEXEC $NP_FLAG $NP $PYTHON test_0.py $MPIEXEC $NP_FLAG $NP $PYTHON test_1.py $MPIEXEC $NP_FLAG $NP $PYTHON test_2a.py $MPIEXEC $NP_FLAG $NP $PYTHON test_2b.py $MPIEXEC $NP_FLAG $NP $PYTHON test_3.py $MPIEXEC $NP_FLAG $NP $PYTHON test_4.py $MPIEXEC $NP_FLAG $NP $PYTHON test_5.py mpi4py-3.1.6/demo/init-fini/test_0.py000066400000000000000000000000551460670727200173400ustar00rootroot00000000000000from mpi4py import rc from mpi4py import MPI mpi4py-3.1.6/demo/init-fini/test_1.py000066400000000000000000000004161460670727200173420ustar00rootroot00000000000000from mpi4py import rc rc.initialize = False from mpi4py import MPI assert not MPI.Is_initialized() assert not MPI.Is_finalized() MPI.Init() assert MPI.Is_initialized() assert not MPI.Is_finalized() MPI.Finalize() assert MPI.Is_initialized() assert MPI.Is_finalized() mpi4py-3.1.6/demo/init-fini/test_2a.py000066400000000000000000000004501460670727200175020ustar00rootroot00000000000000from mpi4py import rc rc.initialize = False from mpi4py import MPI assert not MPI.Is_initialized() assert not MPI.Is_finalized() MPI.Init_thread(MPI.THREAD_MULTIPLE) assert MPI.Is_initialized() assert not MPI.Is_finalized() MPI.Finalize() assert MPI.Is_initialized() assert MPI.Is_finalized() mpi4py-3.1.6/demo/init-fini/test_2b.py000066400000000000000000000007551460670727200175130ustar00rootroot00000000000000from mpi4py import rc rc.initialize = False from mpi4py import MPI assert not MPI.Is_initialized() assert not MPI.Is_finalized() MPI.Init_thread() assert MPI.Is_initialized() assert not MPI.Is_finalized() import sys name, _ = MPI.get_vendor() if name == 'MPICH': assert MPI.Query_thread() == MPI.THREAD_MULTIPLE if name == 'MPICH2' and sys.platform[:3] != 'win': assert MPI.Query_thread() == MPI.THREAD_MULTIPLE MPI.Finalize() assert MPI.Is_initialized() assert MPI.Is_finalized() mpi4py-3.1.6/demo/init-fini/test_3.py000066400000000000000000000001751460670727200173460ustar00rootroot00000000000000from mpi4py import rc rc.finalize = False from mpi4py import MPI assert MPI.Is_initialized() assert not MPI.Is_finalized() mpi4py-3.1.6/demo/init-fini/test_4.py000066400000000000000000000003031460670727200173400ustar00rootroot00000000000000from mpi4py import rc rc.finalize = False from mpi4py import MPI assert MPI.Is_initialized() assert not MPI.Is_finalized() MPI.Finalize() assert MPI.Is_initialized() assert MPI.Is_finalized() mpi4py-3.1.6/demo/init-fini/test_5.py000066400000000000000000000006051460670727200173460ustar00rootroot00000000000000from mpi4py import rc del rc.initialize del rc.threads del rc.thread_level del rc.finalize from mpi4py import MPI assert MPI.Is_initialized() assert not MPI.Is_finalized() import sys name, _ = MPI.get_vendor() if name == 'MPICH': assert MPI.Query_thread() == MPI.THREAD_MULTIPLE if name == 'MPICH2' and sys.platform[:3] != 'win': assert MPI.Query_thread() == MPI.THREAD_MULTIPLE mpi4py-3.1.6/demo/libmpi-cffi/000077500000000000000000000000001460670727200160635ustar00rootroot00000000000000mpi4py-3.1.6/demo/libmpi-cffi/apigen.py000066400000000000000000000015141460670727200177010ustar00rootroot00000000000000import sys, os.path as p wdir = p.abspath(p.dirname(__file__)) topdir = p.normpath(p.join(wdir, p.pardir, p.pardir)) srcdir = p.join(topdir, 'src') sys.path.insert(0, p.join(topdir, 'conf')) from mpiscanner import Scanner scanner = Scanner() libmpi_pxd = p.join(srcdir, 'mpi4py', 'libmpi.pxd') scanner.parse_file(libmpi_pxd) libmpi_h = p.join(wdir, 'libmpi.h') scanner.dump_header_h(libmpi_h) #try: # from cStringIO import StringIO #except ImportError: # from io import StringIO #libmpi_h = StringIO() #scanner.dump_header_h(libmpi_h) #print libmpi_h.read() libmpi_c = p.join(wdir, 'libmpi.c.in') with open(libmpi_c, 'w') as f: f.write("""\ #include #include "%(srcdir)s/lib-mpi/config.h" #include "%(srcdir)s/lib-mpi/missing.h" #include "%(srcdir)s/lib-mpi/fallback.h" #include "%(srcdir)s/lib-mpi/compat.h" """ % vars()) mpi4py-3.1.6/demo/libmpi-cffi/build.py000066400000000000000000000031501460670727200175330ustar00rootroot00000000000000import os import cffi ffi = cffi.FFI() with open("libmpi.c.in") as f: ffi.set_source("libmpi", f.read()) with open("libmpi.h") as f: ffi.cdef(f.read()) class mpicompiler(object): from cffi import ffiplatform def __init__(self, cc, ld=None): self.cc = cc self.ld = ld if ld else cc self.ffi_compile = self.ffiplatform.compile def __enter__(self): self.ffiplatform.compile = self.compile def __exit__(self, *args): self.ffiplatform.compile = self.ffi_compile def configure(self, compiler): from distutils.util import split_quoted from distutils.spawn import find_executable def fix_command(command, cmd): if not cmd: return cmd = split_quoted(cmd) exe = find_executable(cmd[0]) if not exe: return command[0] = exe command += cmd[1:] fix_command(compiler.compiler_so, self.cc) fix_command(compiler.linker_so, self.ld) def compile(self, *args, **kargs): from distutils.command import build_ext customize_compiler_orig = build_ext.customize_compiler def customize_compiler(compiler): customize_compiler_orig(compiler) self.configure(compiler) build_ext.customize_compiler = customize_compiler try: return self.ffi_compile(*args, **kargs) finally: build_ext.customize_compiler = customize_compiler_orig if __name__ == '__main__': cc = os.environ.get('MPICC', 'mpicc') ld = os.environ.get('MPILD') with mpicompiler(cc, ld): ffi.compile() mpi4py-3.1.6/demo/libmpi-cffi/makefile000066400000000000000000000007101460670727200175610ustar00rootroot00000000000000.PHONY: default default: build test clean PYTHON = python$(py) .PHONY: build build: libmpi.h libmpi.c.in $(PYTHON) build.py libmpi.h libmpi.c.in: $(PYTHON) apigen.py MPIEXEC = mpiexec NP_FLAG = -n .PHONY: test test: build $(MPIEXEC) $(NP_FLAG) 5 $(PYTHON) test_helloworld.py $(MPIEXEC) $(NP_FLAG) 4 $(PYTHON) test_ringtest.py $(MPIEXEC) $(NP_FLAG) 2 $(PYTHON) test_latency.py .PHONY: clean clean: $(RM) -r libmpi.* $(RM) -r *py[co] __pycache__ mpi4py-3.1.6/demo/libmpi-cffi/test_helloworld.py000066400000000000000000000010171460670727200216460ustar00rootroot00000000000000from libmpi import ffi, lib NULL = ffi.NULL size_p = ffi.new('int*') rank_p = ffi.new('int*') nlen_p = ffi.new('int*') name_p = ffi.new('char[]', lib.MPI_MAX_PROCESSOR_NAME); lib.MPI_Init(NULL, NULL); lib.MPI_Comm_size(lib.MPI_COMM_WORLD, size_p) lib.MPI_Comm_rank(lib.MPI_COMM_WORLD, rank_p) lib.MPI_Get_processor_name(name_p, nlen_p) size = size_p[0] rank = rank_p[0] nlen = nlen_p[0] name = ffi.string(name_p[0:nlen]) print("Hello, World! I am process %d of %d on %s." % (rank, size, name)) lib.MPI_Finalize() mpi4py-3.1.6/demo/libmpi-cffi/test_latency.py000066400000000000000000000040371460670727200211370ustar00rootroot00000000000000# http://mvapich.cse.ohio-state.edu/benchmarks/ from libmpi import ffi, lib def osu_latency( BENCHMARH = "MPI Latency Test", skip = 1000, loop = 10000, skip_large = 10, loop_large = 100, large_message_size = 8192, MAX_MSG_SIZE = 1<<22, ): myid = ffi.new('int*') numprocs = ffi.new('int*') lib.MPI_Comm_rank(lib.MPI_COMM_WORLD, myid) lib.MPI_Comm_size(lib.MPI_COMM_WORLD, numprocs) myid = myid[0] numprocs = numprocs[0] if numprocs != 2: if myid == 0: errmsg = "This test requires exactly two processes" else: errmsg = None raise SystemExit(errmsg) sbuf = ffi.new('unsigned char[]', MAX_MSG_SIZE) rbuf = ffi.new('unsigned char[]', MAX_MSG_SIZE) dtype = lib.MPI_BYTE tag = 1 comm = lib.MPI_COMM_WORLD status = lib.MPI_STATUS_IGNORE if myid == 0: print ('# %s' % (BENCHMARH,)) if myid == 0: print ('# %-8s%20s' % ("Size [B]", "Latency [us]")) message_sizes = [0] + [2**i for i in range(30)] for size in message_sizes: if size > MAX_MSG_SIZE: break if size > large_message_size: skip = skip_large loop = loop_large iterations = list(range(loop+skip)) # lib.MPI_Barrier(comm) if myid == 0: for i in iterations: if i == skip: t_start = lib.MPI_Wtime() lib.MPI_Send(sbuf, size, dtype, 1, tag, comm) lib.MPI_Recv(rbuf, size, dtype, 1, tag, comm, status) t_end = lib.MPI_Wtime() elif myid == 1: for i in iterations: lib.MPI_Recv(rbuf, size, dtype, 0, tag, comm, status) lib.MPI_Send(sbuf, size, dtype, 0, tag, comm) # if myid == 0: latency = (t_end - t_start) * 1e6 / (2 * loop) print ('%-10d%20.2f' % (size, latency)) def main(): lib.MPI_Init(ffi.NULL, ffi.NULL) osu_latency() lib.MPI_Finalize() if __name__ == '__main__': main() mpi4py-3.1.6/demo/libmpi-cffi/test_ringtest.py000066400000000000000000000043761460670727200213450ustar00rootroot00000000000000from libmpi import ffi, lib def ring(comm, count=1, loop=1, skip=0): size_p = ffi.new('int*') rank_p = ffi.new('int*') lib.MPI_Comm_size(comm, size_p) lib.MPI_Comm_rank(comm, rank_p) size = size_p[0] rank = rank_p[0] source = (rank - 1) % size dest = (rank + 1) % size sbuf = ffi.new('unsigned char[]', [42]*count) rbuf = ffi.new('unsigned char[]', [ 0]*count) iterations = list(range((loop+skip))) if size == 1: for i in iterations: if i == skip: tic = lib.MPI_Wtime() lib.MPI_Sendrecv(sbuf, count, lib.MPI_BYTE, dest, 0, rbuf, count, lib.MPI_BYTE, source, 0, comm, lib.MPI_STATUS_IGNORE) else: if rank == 0: for i in iterations: if i == skip: tic = lib.MPI_Wtime() lib.MPI_Send(sbuf, count, lib.MPI_BYTE, dest, 0, comm) lib.MPI_Recv(rbuf, count, lib.MPI_BYTE, source, 0, comm, lib.MPI_STATUS_IGNORE) else: sbuf = rbuf for i in iterations: if i == skip: tic = lib.MPI_Wtime() lib.MPI_Recv(rbuf, count, lib.MPI_BYTE, source, 0, comm, lib.MPI_STATUS_IGNORE) lib.MPI_Send(sbuf, count, lib.MPI_BYTE, dest, 0, comm) toc = lib.MPI_Wtime() if rank == 0 and ffi.string(sbuf) != ffi.string(rbuf): import warnings, traceback try: warnings.warn("received message does not match!") except UserWarning: traceback.print_exc() lib.MPI_Abort(comm, 2) return toc - tic def ringtest(comm): size = ( 1 ) loop = ( 1 ) skip = ( 0 ) lib.MPI_Barrier(comm) elapsed = ring(comm, size, loop, skip) size_p = ffi.new('int*') rank_p = ffi.new('int*') lib.MPI_Comm_size(comm, size_p) lib.MPI_Comm_rank(comm, rank_p) comm_size = size_p[0] comm_rank = rank_p[0] if comm_rank == 0: print ("time for %d loops = %g seconds (%d processes, %d bytes)" % (loop, elapsed, comm_size, size)) def main(): lib.MPI_Init(ffi.NULL, ffi.NULL) ringtest(lib.MPI_COMM_WORLD) lib.MPI_Finalize() if __name__ == '__main__': main() mpi4py-3.1.6/demo/makefile000066400000000000000000000007231460670727200154040ustar00rootroot00000000000000.PHONY: default default: ${MAKE} -C compute-pi ${MAKE} -C mandelbrot ${MAKE} -C nxtval ${MAKE} -C reductions ${MAKE} -C sequential ${MAKE} -C spawning ${MAKE} -C wrap-c ${MAKE} -C wrap-f2py ${MAKE} -C wrap-swig ${MAKE} -C wrap-boost ${MAKE} -C wrap-cython ${MAKE} -C wrap-ctypes ${MAKE} -C wrap-cffi ${MAKE} -C cython ${MAKE} -C embedding ${MAKE} -C libmpi-cffi ${MAKE} -C mpi-ref-v1 ${MAKE} -C init-fini ${MAKE} -C threads ${MAKE} -C futures mpi4py-3.1.6/demo/mandelbrot/000077500000000000000000000000001460670727200160315ustar00rootroot00000000000000mpi4py-3.1.6/demo/mandelbrot/makefile000066400000000000000000000007671460670727200175430ustar00rootroot00000000000000.PHONY: default build test clean default: build test clean build: mandelbrot-worker.exe MPIF90=mpif90 FFLAGS= -O3 ifneq (${MPI_FORTRAN_MOD_DIR},) FFLAGS += -I${MPI_FORTRAN_MOD_DIR} endif mandelbrot-worker.exe: mandelbrot-worker.f90 ${MPIF90} ${FFLAGS} -o $@ $< MPIEXEC = mpiexec NP_FLAG = -n PYTHON = python$(py) test: build ${MPIEXEC} ${NP_FLAG} 1 ${PYTHON} mandelbrot-master.py ${MPIEXEC} ${NP_FLAG} 7 ${PYTHON} mandelbrot.py ${PYTHON} mandelbrot-seq.py clean: ${RM} mandelbrot-worker.exe mpi4py-3.1.6/demo/mandelbrot/mandelbrot-master.py000066400000000000000000000026041460670727200220250ustar00rootroot00000000000000from mpi4py import MPI import numpy as np x1 = -2.0 x2 = 1.0 y1 = -1.0 y2 = 1.0 w = 600 h = 400 maxit = 255 import os dirname = os.path.abspath(os.path.dirname(__file__)) executable = os.path.join(dirname, 'mandelbrot-worker.exe') # spawn worker worker = MPI.COMM_SELF.Spawn(executable, maxprocs=7) size = worker.Get_remote_size() # send parameters rmsg = np.array([x1, x2, y1, y2], dtype='f') imsg = np.array([w, h, maxit], dtype='i') worker.Bcast([rmsg, MPI.REAL], root=MPI.ROOT) worker.Bcast([imsg, MPI.INTEGER], root=MPI.ROOT) # gather results counts = np.empty(size, dtype='i') indices = np.empty(h, dtype='i') cdata = np.empty([h, w], dtype='i') worker.Gather(sendbuf=None, recvbuf=[counts, MPI.INTEGER], root=MPI.ROOT) worker.Gatherv(sendbuf=None, recvbuf=[indices, (counts, None), MPI.INTEGER], root=MPI.ROOT) worker.Gatherv(sendbuf=None, recvbuf=[cdata, (counts * w, None), MPI.INTEGER], root=MPI.ROOT) # disconnect worker worker.Disconnect() # reconstruct full result M = np.zeros([h, w], dtype='i') M[indices, :] = cdata # eye candy (requires matplotlib) if 1: try: from matplotlib import pyplot as plt plt.imshow(M, aspect='equal') try: plt.nipy_spectral() except AttributeError: plt.spectral() plt.pause(2) except: pass mpi4py-3.1.6/demo/mandelbrot/mandelbrot-seq.py000066400000000000000000000015211460670727200213170ustar00rootroot00000000000000import numpy as np import time tic = time.time() x1 = -2.0 x2 = 1.0 y1 = -1.0 y2 = 1.0 w = 150 h = 100 maxit = 127 def mandelbrot(x, y, maxit): c = x + y*1j z = 0 + 0j it = 0 while abs(z) < 2 and it < maxit: z = z**2 + c it += 1 return it dx = (x2 - x1) / w dy = (y2 - y1) / h C = np.empty([h, w], dtype='i') for k in np.arange(h): y = y1 + k * dy for j in np.arange(w): x = x1 + j * dx C[k, j] = mandelbrot(x, y, maxit) M = C toc = time.time() print('wall clock time: %8.2f seconds' % (toc-tic)) # eye candy (requires matplotlib) if 1: try: from matplotlib import pyplot as plt plt.imshow(M, aspect='equal') try: plt.nipy_spectral() except AttributeError: plt.spectral() plt.pause(2) except: pass mpi4py-3.1.6/demo/mandelbrot/mandelbrot-worker.f90000066400000000000000000000044731460670727200220170ustar00rootroot00000000000000! $ mpif90 -o mandelbrot.exe mandelbrot.f90 program main use MPI implicit none integer master, nprocs, myrank, ierr real :: rmsg(4), x1, x2, y1, y2 integer :: imsg(3), w, h, maxit integer :: N integer, allocatable :: I(:) integer, allocatable :: C(:,:) integer :: j, k real :: x, dx, y, dy call MPI_Init(ierr) call MPI_Comm_get_parent(master, ierr) if (master == MPI_COMM_NULL) then print *, "parent communicator is MPI_COMM_NULL" call MPI_Abort(MPI_COMM_WORLD, 1, ierr) end if call MPI_Comm_size(master, nprocs, ierr) call MPI_Comm_rank(master, myrank, ierr) ! receive parameters and unpack call MPI_Bcast(rmsg, 4, MPI_REAL, 0, master, ierr) call MPI_Bcast(imsg, 3, MPI_INTEGER, 0, master, ierr) x1 = rmsg(1); x2 = rmsg(2) y1 = rmsg(3); y2 = rmsg(4) w = imsg(1); h = imsg(2); maxit = imsg(3) dx = (x2-x1)/real(w) dy = (y2-y1)/real(h) ! number of lines to compute here N = h / nprocs if (modulo(h, nprocs) > myrank) then N = N + 1 end if ! indices of lines to compute here allocate( I(0:N-1) ) I = (/ (k, k=myrank, h-1, nprocs) /) ! compute local lines allocate( C(0:w-1, 0:N-1) ) do k = 0, N-1 y = y1 + real(I(k)) * dy do j = 0, w-1 x = x1 + real(j) * dx C(j, k) = mandelbrot(x, y, maxit) end do end do ! send number of lines computed here call MPI_Gather(N, 1, MPI_INTEGER, & MPI_BOTTOM, 0, MPI_BYTE, & 0, master, ierr) ! send indices of lines computed here call MPI_Gatherv(I, N, MPI_INTEGER, & MPI_BOTTOM, MPI_BOTTOM, MPI_BOTTOM, MPI_BYTE, & 0, master, ierr) ! send data of lines computed here call MPI_Gatherv(C, N*w, MPI_INTEGER, & MPI_BOTTOM, MPI_BOTTOM, MPI_BOTTOM, MPI_BYTE, & 0, master, ierr) deallocate(C) deallocate(I) ! we are done call MPI_Comm_disconnect(master, ierr) call MPI_Finalize(ierr) contains function mandelbrot(x, y, maxit) result (it) implicit none real, intent(in) :: x, y integer, intent(in) :: maxit integer :: it complex :: z, c z = cmplx(0, 0) c = cmplx(x, y) it = 0 do while (abs(z) < 2.0 .and. it < maxit) z = z*z + c it = it + 1 end do end function mandelbrot end program main mpi4py-3.1.6/demo/mandelbrot/mandelbrot.py000066400000000000000000000046251460670727200205410ustar00rootroot00000000000000from mpi4py import MPI import numpy as np tic = MPI.Wtime() x1 = -2.0 x2 = 1.0 y1 = -1.0 y2 = 1.0 w = 150 h = 100 maxit = 127 def mandelbrot(x, y, maxit): c = x + y*1j z = 0 + 0j it = 0 while abs(z) < 2 and it < maxit: z = z**2 + c it += 1 return it comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() rmsg = np.empty(4, dtype='f') imsg = np.empty(3, dtype='i') if rank == 0: rmsg[:] = [x1, x2, y1, y2] imsg[:] = [w, h, maxit] comm.Bcast([rmsg, MPI.FLOAT], root=0) comm.Bcast([imsg, MPI.INT], root=0) x1, x2, y1, y2 = [float(r) for r in rmsg] w, h, maxit = [int(i) for i in imsg] dx = (x2 - x1) / w dy = (y2 - y1) / h # number of lines to compute here N = h // size + (h % size > rank) N = np.array(N, dtype='i') # indices of lines to compute here I = np.arange(rank, h, size, dtype='i') # compute local lines C = np.empty([N, w], dtype='i') for k in np.arange(N): y = y1 + I[k] * dy for j in np.arange(w): x = x1 + j * dx C[k, j] = mandelbrot(x, y, maxit) # gather results at root counts = 0 indices = None cdata = None if rank == 0: counts = np.empty(size, dtype='i') indices = np.empty(h, dtype='i') cdata = np.empty([h, w], dtype='i') comm.Gather(sendbuf=[N, MPI.INT], recvbuf=[counts, MPI.INT], root=0) comm.Gatherv(sendbuf=[I, MPI.INT], recvbuf=[indices, (counts, None), MPI.INT], root=0) comm.Gatherv(sendbuf=[C, MPI.INT], recvbuf=[cdata, (counts*w, None), MPI.INT], root=0) # reconstruct full result at root if rank == 0: M = np.zeros([h,w], dtype='i') M[indices, :] = cdata toc = MPI.Wtime() wct = comm.gather(toc-tic, root=0) if rank == 0: for task, time in enumerate(wct): print('wall clock time: %8.2f seconds (task %d)' % (time, task)) def mean(seq): return sum(seq)/len(seq) print ('all tasks, mean: %8.2f seconds' % mean(wct)) print ('all tasks, min: %8.2f seconds' % min(wct)) print ('all tasks, max: %8.2f seconds' % max(wct)) print ('all tasks, sum: %8.2f seconds' % sum(wct)) # eye candy (requires matplotlib) if rank == 0: try: from matplotlib import pyplot as plt plt.imshow(M, aspect='equal') try: plt.nipy_spectral() except AttributeError: plt.spectral() plt.pause(2) except: pass MPI.COMM_WORLD.Barrier() mpi4py-3.1.6/demo/mpe-logging/000077500000000000000000000000001460670727200161075ustar00rootroot00000000000000mpi4py-3.1.6/demo/mpe-logging/cpilog.py000066400000000000000000000021261460670727200177370ustar00rootroot00000000000000#!/usr/bin/env python # If you want MPE to log MPI calls, you have to add the two lines # below at the very beginning of your main bootstrap script. import mpi4py mpi4py.profile('mpe', logfile='cpilog') # Import the MPI extension module from mpi4py import MPI if 0: # <- use '1' to disable logging of MPI calls MPI.Pcontrol(0) # Import the 'array' module from array import array # This is just to make the logging # output a bit more interesting from time import sleep comm = MPI.COMM_WORLD nprocs = comm.Get_size() myrank = comm.Get_rank() n = array('i', [0]) pi = array('d', [0]) mypi = array('d', [0]) def comp_pi(n, myrank=0, nprocs=1): h = 1.0 / n; s = 0.0; for i in range(myrank + 1, n + 1, nprocs): x = h * (i - 0.5); s += 4.0 / (1.0 + x**2); return s * h comm.Barrier() for N in [10000]*10: if myrank == 0: n[0] = N comm.Bcast([n, MPI.INT], root=0) mypi[0] = comp_pi(n[0], myrank, nprocs) comm.Reduce([mypi, MPI.DOUBLE], [pi, MPI.DOUBLE], op=MPI.SUM, root=0) comm.Barrier() sleep(0.01) mpi4py-3.1.6/demo/mpe-logging/makefile000066400000000000000000000013331460670727200176070ustar00rootroot00000000000000MPIEXEC = mpiexec N = 8 PYTHON = python$(py) .PHONY: default default: build test clean .PHONY: run-cpilog run-ring run-threads run run: run-cpilog run-ring run-threads run-cpilog: ${MPIEXEC} -n ${N} ${PYTHON} cpilog.py run-ring: ${MPIEXEC} -n ${N} ${PYTHON} ring.py run-threads: ${MPIEXEC} -n ${N} ${PYTHON} threads.py .PHONY: view-cpilog view-ring view-threads view view: view-cpilog view-ring view-threads view-cpilog: cpilog.slog2 jumpshot $< view-ring: ring.slog2 jumpshot $< view-threads: threads.slog2 jumpshot $< cpilog.clog2: run-cpilog ring.clog2: run-ring threads.clog2: run-threads %.slog2: %.clog2 clog2TOslog2 $< .PHONY: build build: run .PHONY: test test: .PHONY: clean clean: ${RM} *.[cs]log2 mpi4py-3.1.6/demo/mpe-logging/ring.py000066400000000000000000000013101460670727200174130ustar00rootroot00000000000000#!/usr/bin/env python import os os.environ['MPE_LOGFILE_PREFIX'] = 'ring' import mpi4py mpi4py.profile('mpe') from mpi4py import MPI from array import array comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() src = rank-1 dest = rank+1 if rank == 0: src = size-1 if rank == size-1: dest = 0 try: from numpy import zeros a1 = zeros(1000000, 'd') a2 = zeros(1000000, 'd') except ImportError: from array import array a1 = array('d', [0]*1000); a1 *= 1000 a2 = array('d', [0]*1000); a2 *= 1000 comm.Sendrecv(sendbuf=a1, recvbuf=a2, source=src, dest=dest) MPI.Request.Waitall([ comm.Isend(a1, dest=dest), comm.Irecv(a2, source=src), ]) mpi4py-3.1.6/demo/mpe-logging/threads.py000066400000000000000000000013711460670727200201150ustar00rootroot00000000000000import sys import mpi4py mpi4py.profile('mpe', logfile='threads') from mpi4py import MPI from array import array try: import threading except ImportError: sys.stderr.write("threading module not available\n") sys.exit(0) send_msg = array('i', [7]*1000); send_msg *= 1000 recv_msg = array('i', [0]*1000); recv_msg *= 1000 def self_send(comm, rank): comm.Send([send_msg, MPI.INT], dest=rank, tag=0) def self_recv(comm, rank): comm.Recv([recv_msg, MPI.INT], source=rank, tag=0) comm = MPI.COMM_WORLD rank = comm.Get_rank() send_thread = threading.Thread(target=self_send, args=(comm, rank)) recv_thread = threading.Thread(target=self_recv, args=(comm, rank)) send_thread.start() recv_thread.start() recv_thread.join() send_thread.join() mpi4py-3.1.6/demo/mpi-ref-v1/000077500000000000000000000000001460670727200155655ustar00rootroot00000000000000mpi4py-3.1.6/demo/mpi-ref-v1/README.txt000066400000000000000000000006601460670727200172650ustar00rootroot00000000000000@Book{MPI-Ref-V1, title = {{MPI} - The Complete Reference: Volume 1, The {MPI} Core}, author = {Marc Snir and Steve Otto and Steven Huss-Lederman and David Walker and Jack Dongarra}, edition = {2nd.}, year = 1998, publisher = {MIT Press}, volume = {1, The MPI Core}, series = {Scientific and Engineering Computation}, address = {Cambridge, MA, USA}, } mpi4py-3.1.6/demo/mpi-ref-v1/ex-2.01.py000066400000000000000000000017571460670727200171430ustar00rootroot00000000000000## mpiexec -n 2 python ex-2.01.py # Process 0 sends a message to process 1 # -------------------------------------------------------------------- from mpi4py import MPI import array if MPI.COMM_WORLD.Get_size() < 2: raise SystemExit # -------------------------------------------------------------------- s = "Hello there" msg = array.array('c', '\0'*20) tag = 99 status = MPI.Status() myrank = MPI.COMM_WORLD.Get_rank() if myrank == 0: msg[:len(s)] = array.array('c', s) MPI.COMM_WORLD.Send([msg, len(s)+1, MPI.CHAR], 1, tag) elif myrank == 1: MPI.COMM_WORLD.Recv([msg, 20, MPI.CHAR], 0, tag, status) # -------------------------------------------------------------------- if myrank == 1: assert list(msg[:len(s)]) == list(s) assert msg[len(s)] == '\0' assert status.source == 0 assert status.tag == tag assert status.error == MPI.SUCCESS assert status.Get_count(MPI.CHAR) == len(s)+1 # -------------------------------------------------------------------- mpi4py-3.1.6/demo/mpi-ref-v1/ex-2.08.py000066400000000000000000000024651460670727200171470ustar00rootroot00000000000000## mpiexec -n 2 python ex-2.08.py # An exchange of messages # -------------------------------------------------------------------- from mpi4py import MPI import array if MPI.COMM_WORLD.Get_size() < 2: raise SystemExit # -------------------------------------------------------------------- sendbuf = array.array('d', [0]*10) recvbuf = array.array('d', [0]*10) tag = 0 status = MPI.Status() myrank = MPI.COMM_WORLD.Get_rank() if myrank == 0: sendbuf[:] = array.array('d', range(len(sendbuf))) MPI.COMM_WORLD.Send([sendbuf, MPI.DOUBLE], 1, tag) MPI.COMM_WORLD.Recv([recvbuf, MPI.DOUBLE], 1, tag, status) elif myrank == 1: MPI.COMM_WORLD.Recv([recvbuf, MPI.DOUBLE], 0, tag, status) sendbuf[:] = recvbuf MPI.COMM_WORLD.Send([sendbuf, MPI.DOUBLE], 0, tag) # -------------------------------------------------------------------- if myrank == 0: assert status.source == 1 assert status.tag == tag assert status.error == MPI.SUCCESS assert status.Get_count(MPI.DOUBLE) == len(recvbuf) assert sendbuf == recvbuf elif myrank == 1: assert status.source == 0 assert status.tag == tag assert status.error == MPI.SUCCESS assert status.Get_count(MPI.DOUBLE) == len(recvbuf) assert sendbuf == recvbuf # -------------------------------------------------------------------- mpi4py-3.1.6/demo/mpi-ref-v1/ex-2.16.py000066400000000000000000000035141460670727200171420ustar00rootroot00000000000000## mpiexec -n 4 python ex-2.16.py # Jacobi code # version of parallel code using sendrecv and null proceses. # -------------------------------------------------------------------- from mpi4py import MPI try: import numpy except ImportError: raise SystemExit # -------------------------------------------------------------------- n = 5 * MPI.COMM_WORLD.Get_size() # compute number of processes and myrank p = MPI.COMM_WORLD.Get_size() myrank = MPI.COMM_WORLD.Get_rank() # compute size of local block m = n/p if myrank < (n - p * m): m = m + 1 #compute neighbors if myrank == 0: left = MPI.PROC_NULL else: left = myrank - 1 if myrank == p - 1: right = MPI.PROC_NULL else: right = myrank + 1 # allocate local arrays A = numpy.empty((n+2, m+2), dtype='d', order='fortran') B = numpy.empty((n, m), dtype='d', order='fortran') A.fill(1) A[0, :] = A[-1, :] = 0 A[:, 0] = A[:, -1] = 0 # main loop converged = False while not converged: # compute, B = 0.25 * ( N + S + E + W) N, S = A[:-2, 1:-1], A[2:, 1:-1] E, W = A[1:-1, :-2], A[1:-1, 2:] numpy.add(N, S, B) numpy.add(E, B, B) numpy.add(W, B, B) B *= 0.25 A[1:-1, 1:-1] = B # communicate tag = 0 MPI.COMM_WORLD.Sendrecv([B[:, -1], MPI.DOUBLE], right, tag, [A[:, 0], MPI.DOUBLE], left, tag) MPI.COMM_WORLD.Sendrecv((B[:, 0], MPI.DOUBLE), left, tag, (A[:, -1], MPI.DOUBLE), right, tag) # convergence myconv = numpy.allclose(B, 0) loc_conv = numpy.asarray(myconv, dtype='i') glb_conv = numpy.asarray(0, dtype='i') MPI.COMM_WORLD.Allreduce([loc_conv, MPI.INT], [glb_conv, MPI.INT], op=MPI.LAND) converged = bool(glb_conv) # -------------------------------------------------------------------- mpi4py-3.1.6/demo/mpi-ref-v1/ex-2.29.py000066400000000000000000000022301460670727200171400ustar00rootroot00000000000000## mpiexec -n 3 python ex-2.29.py # Use a blocking probe to wait for an incoming message # -------------------------------------------------------------------- from mpi4py import MPI import array if MPI.COMM_WORLD.Get_size() < 3: raise SystemExit # -------------------------------------------------------------------- comm = MPI.COMM_WORLD rank = comm.Get_rank() if rank == 0: i = array.array('i', [7]*5) comm.Send([i, MPI.INT], 2, 0) elif rank == 1: x = array.array('f', [7]*5) comm.Send([x, MPI.FLOAT], 2, 0) elif rank == 2: i = array.array('i', [0]*5) x = array.array('f', [0]*5) status = MPI.Status() for j in range(2): comm.Probe(MPI.ANY_SOURCE, 0, status) if status.Get_source() == 0: comm.Recv([i, MPI.INT], 0, 0, status) else: comm.Recv([x, MPI.FLOAT], 1, 0, status) # -------------------------------------------------------------------- if rank == 2: for v in i: assert v == 7 for v in x: assert v == 7 assert status.source in (0, 1) assert status.tag == 0 assert status.error == 0 # -------------------------------------------------------------------- mpi4py-3.1.6/demo/mpi-ref-v1/ex-2.32.py000066400000000000000000000044131460670727200171370ustar00rootroot00000000000000# Jacobi computation, using persitent requests from mpi4py import MPI try: import numpy except ImportError: raise SystemExit n = 5 * MPI.COMM_WORLD.Get_size() # compute number of processes and myrank p = MPI.COMM_WORLD.Get_size() myrank = MPI.COMM_WORLD.Get_rank() # compute size of local block m = n/p if myrank < (n - p * m): m = m + 1 #compute neighbors if myrank == 0: left = MPI.PROC_NULL else: left = myrank - 1 if myrank == p - 1: right = MPI.PROC_NULL else: right = myrank + 1 # allocate local arrays A = numpy.empty((n+2, m+2), dtype=float, order='fortran') B = numpy.empty((n, m), dtype=float, order='fortran') A.fill(1) A[0, :] = A[-1, :] = 0 A[:, 0] = A[:, -1] = 0 # create persintent requests tag = 0 sreq1 = MPI.COMM_WORLD.Send_init((B[:, 0], MPI.DOUBLE), left, tag) sreq2 = MPI.COMM_WORLD.Send_init((B[:, -1], MPI.DOUBLE), right, tag) rreq1 = MPI.COMM_WORLD.Recv_init((A[:, 0], MPI.DOUBLE), left, tag) rreq2 = MPI.COMM_WORLD.Recv_init((A[:, -1], MPI.DOUBLE), right, tag) reqlist = [sreq1, sreq2, rreq1, rreq2] for req in reqlist: assert req != MPI.REQUEST_NULL # main loop converged = False while not converged: # compute boundary columns N, S = A[ :-2, 1], A[2:, 1] E, W = A[1:-1, 0], A[1:-1, 2] C = B[:, 0] numpy.add(N, S, C) numpy.add(C, E, C) numpy.add(C, W, C) C *= 0.25 N, S = A[ :-2, -2], A[2:, -2] E, W = A[1:-1, -3], A[1:-1, -1] C = B[:, -1] numpy.add(N, S, C) numpy.add(C, E, C) numpy.add(C, W, C) C *= 0.25 # start communication #MPI.Prequest.Startall(reqlist) for r in reqlist: r.Start() # compute interior N, S = A[ :-2, 2:-2], A[2, 2:-2] E, W = A[1:-1, 2:-2], A[1:-1, 2:-2] C = B[:, 1:-1] numpy.add(N, S, C) numpy.add(E, C, C) numpy.add(W, C, C) C *= 0.25 A[1:-1, 1:-1] = B # complete communication MPI.Prequest.Waitall(reqlist) # convergence myconv = numpy.allclose(B, 0) loc_conv = numpy.asarray(myconv, dtype='i') glb_conv = numpy.asarray(0, dtype='i') MPI.COMM_WORLD.Allreduce([loc_conv, MPI.INT], [glb_conv, MPI.INT], op=MPI.LAND) converged = bool(glb_conv) # free persintent requests for req in reqlist: req.Free() mpi4py-3.1.6/demo/mpi-ref-v1/ex-2.34.py000066400000000000000000000022461460670727200171430ustar00rootroot00000000000000## mpiexec -n 2 python ex-2.34.py # Use of ready-mode and synchonous-mode # -------------------------------------------------------------------- from mpi4py import MPI try: import numpy except ImportError: raise SystemExit if MPI.COMM_WORLD.Get_size() < 2: raise SystemExit # -------------------------------------------------------------------- comm = MPI.COMM_WORLD buff = numpy.empty((1000,2), dtype='f', order='fortran') rank = comm.Get_rank() if rank == 0: req1 = comm.Irecv([buff[:, 0], MPI.FLOAT], 1, 1) req2 = comm.Irecv([buff[:, 1], MPI.FLOAT], 1, 2) status = [MPI.Status(), MPI.Status()] MPI.Request.Waitall([req1, req2], status) elif rank == 1: buff[:, 0] = 5 buff[:, 1] = 7 comm.Ssend([buff[:, 1], MPI.FLOAT], 0, 2) comm.Rsend([buff[:, 0], MPI.FLOAT], 0, 1) # -------------------------------------------------------------------- all = numpy.all if rank == 0: assert all(buff[:, 0] == 5) assert all(buff[:, 1] == 7) assert status[0].source == 1 assert status[0].tag == 1 assert status[1].source == 1 assert status[1].tag == 2 # -------------------------------------------------------------------- mpi4py-3.1.6/demo/mpi-ref-v1/ex-2.35.py000066400000000000000000000013371460670727200171440ustar00rootroot00000000000000## mpiexec -n 1 python ex-2.35.py # Calls to attach and detach buffers # -------------------------------------------------------------------- from mpi4py import MPI try: from numpy import empty except ImportError: from array import array def empty(size, dtype): return array(dtype, [0]*size) # -------------------------------------------------------------------- BUFSISE = 10000 + MPI.BSEND_OVERHEAD buff = empty(BUFSISE, dtype='b') MPI.Attach_buffer(buff) buff2 = MPI.Detach_buffer() MPI.Attach_buffer(buff2) MPI.Detach_buffer() # -------------------------------------------------------------------- assert len(buff2) == BUFSISE # -------------------------------------------------------------------- mpi4py-3.1.6/demo/mpi-ref-v1/ex-3.01.py000066400000000000000000000013661460670727200171400ustar00rootroot00000000000000from mpi4py import MPI try: import numpy except ImportError: raise SystemExit # send a upper triangular matrix N = 10 a = numpy.empty((N, N), dtype=float, order='c') b = numpy.zeros((N, N), dtype=float, order='c') a.flat = numpy.arange(a.size, dtype=float) # compute start and size of each row i = numpy.arange(N) blocklen = N - i disp = N * i + i # create datatype for upper triangular part upper = MPI.DOUBLE.Create_indexed(blocklen, disp) upper.Commit() # send and recv matrix myrank = MPI.COMM_WORLD.Get_rank() MPI.COMM_WORLD.Sendrecv((a, 1, upper), myrank, 0, [b, 1, upper], myrank, 0) assert numpy.allclose(numpy.triu(b), numpy.triu(a)) assert numpy.allclose(numpy.tril(b, -1), numpy.zeros((N,N))) upper.Free() mpi4py-3.1.6/demo/mpi-ref-v1/ex-3.02.py000066400000000000000000000003551460670727200171360ustar00rootroot00000000000000from mpi4py import MPI # Type = { (double, 0), (char, 8) } blens = (1, 1) disps = (0, MPI.DOUBLE.size) types = (MPI.DOUBLE, MPI.CHAR) dtype = MPI.Datatype.Create_struct(blens, disps, types) if 'ex-3.02' in __file__: dtype.Free() mpi4py-3.1.6/demo/mpi-ref-v1/ex-3.03.py000066400000000000000000000001751460670727200171370ustar00rootroot00000000000000execfile('ex-3.02.py') assert dtype.size == MPI.DOUBLE.size + MPI.CHAR.size assert dtype.extent >= dtype.size dtype.Free() mpi4py-3.1.6/demo/mpi-ref-v1/ex-3.04.py000066400000000000000000000002271460670727200171360ustar00rootroot00000000000000execfile('ex-3.02.py') count = 3 newtype = dtype.Create_contiguous(count) assert newtype.extent == dtype.extent * count dtype.Free() newtype.Free() mpi4py-3.1.6/demo/mpi-ref-v1/ex-3.05.py000066400000000000000000000002771460670727200171440ustar00rootroot00000000000000execfile('ex-3.02.py') count = 2 blklen = 3 stride = 4 newtype = dtype.Create_vector(count, blklen, stride) assert newtype.size == dtype.size * count * blklen dtype.Free() newtype.Free() mpi4py-3.1.6/demo/mpi-ref-v1/ex-3.06.py000066400000000000000000000003001460670727200171300ustar00rootroot00000000000000execfile('ex-3.02.py') count = 3 blklen = 1 stride = -2 newtype = dtype.Create_vector(count, blklen, stride) assert newtype.size == dtype.size * count * blklen dtype.Free() newtype.Free() mpi4py-3.1.6/demo/mpi-ref-v1/ex-3.07.py000066400000000000000000000003171460670727200171410ustar00rootroot00000000000000execfile('ex-3.02.py') count = 2 blklen = 3 stride = 4 * dtype.extent newtype = dtype.Create_hvector(count, blklen, stride) assert newtype.size == dtype.size * count * blklen dtype.Free() newtype.Free() mpi4py-3.1.6/demo/mpi-ref-v1/ex-3.08.py000066400000000000000000000016021460670727200171400ustar00rootroot00000000000000from mpi4py import MPI try: import numpy except ImportError: raise SystemExit # extract the section a[0:6:2, 0:5:2] and store it in e[:,:] a = numpy.empty((6, 5), dtype=float, order='fortran') e = numpy.empty((3, 3), dtype=float, order='fortran') a.flat = numpy.arange(a.size, dtype=float) lb, sizeofdouble = MPI.DOUBLE.Get_extent() # create datatype for a 1D section oneslice = MPI.DOUBLE.Create_vector(3, 1, 2) # create datatype for a 2D section twoslice = oneslice.Create_hvector(3, 1, 12*sizeofdouble) twoslice.Commit() # send and recv on same process myrank = MPI.COMM_WORLD.Get_rank() status = MPI.Status() MPI.COMM_WORLD.Sendrecv([a, 1, twoslice], myrank, 0, (e, MPI.DOUBLE), myrank, 0, status) assert numpy.allclose(a[::2, ::2], e) assert status.Get_count(twoslice) == 1 assert status.Get_count(MPI.DOUBLE) == e.size oneslice.Free() twoslice.Free() mpi4py-3.1.6/demo/mpi-ref-v1/ex-3.09.py000066400000000000000000000020401460670727200171360ustar00rootroot00000000000000from mpi4py import MPI try: import numpy except ImportError: raise SystemExit # transpose a matrix a into b a = numpy.empty((100, 100), dtype=float, order='fortran') b = numpy.empty((100, 100), dtype=float, order='fortran') a.flat = numpy.arange(a.size, dtype=float) lb, sizeofdouble = MPI.DOUBLE.Get_extent() # create datatype dor one row # (vector with 100 double entries and stride 100) row = MPI.DOUBLE.Create_vector(100, 1, 100) # create datatype for matrix in row-major order # (one hundred copies of the row datatype, strided one word # apart; the succesive row datatypes are interleaved) xpose = row.Create_hvector(100, 1, sizeofdouble) xpose.Commit() # send matrix in row-major order and receive in column major order abuf = (a, xpose) bbuf = (b, MPI.DOUBLE) myrank = MPI.COMM_WORLD.Get_rank() status = MPI.Status() MPI.COMM_WORLD.Sendrecv(abuf, myrank, 0, bbuf, myrank, 0, status) assert numpy.allclose(a, b.transpose()) assert status.Get_count(xpose) == 1 assert status.Get_count(MPI.DOUBLE) == b.size row.Free() xpose.Free() mpi4py-3.1.6/demo/mpi-ref-v1/ex-3.11.py000066400000000000000000000001601460670727200171300ustar00rootroot00000000000000execfile('ex-3.02.py') B = (3, 1) D = (4, 0) newtype = dtype.Create_indexed(B, D) dtype.Free() newtype.Free() mpi4py-3.1.6/demo/mpi-ref-v1/ex-3.12.py000066400000000000000000000002001460670727200171240ustar00rootroot00000000000000execfile('ex-3.02.py') B = (3, 1) D = (4 * dtype.extent, 0) newtype = dtype.Create_hindexed(B, D) dtype.Free() newtype.Free() mpi4py-3.1.6/demo/mpi-ref-v1/ex-3.13.py000066400000000000000000000004421460670727200171350ustar00rootroot00000000000000from mpi4py import MPI blens = (1, 1) disps = (0, MPI.DOUBLE.size) types = (MPI.DOUBLE, MPI.CHAR) type1 = MPI.Datatype.Create_struct(blens, disps, types) B = (2, 1, 3) D = (0, 16, 26) T = (MPI.FLOAT, type1, MPI.CHAR) dtype = MPI.Datatype.Create_struct(B, D, T) type1.Free() dtype.Free() mpi4py-3.1.6/demo/mpi-ref-v1/makefile000066400000000000000000000006001460670727200172610ustar00rootroot00000000000000.PHONY: default build test clean test_seq test_mpi default: build test clean build: PYTHON = python MPIEXEC = mpiexec NP_FLAG = -n NP = 3 test_seq: ${MAKE} MPIEXEC= NP_FLAG= NP= test_mpi test_mpi: -@for i in `ls ex-*.py`; do \ echo ${MPIEXEC} ${NP_FLAG} ${NP} ${PYTHON} $$i; \ ${MPIEXEC} ${NP_FLAG} ${NP} ${PYTHON} $$i; \ done test: test_seq test_mpi clean: mpi4py-3.1.6/demo/mpi-ref-v1/runtests.bat000066400000000000000000000022221460670727200201420ustar00rootroot00000000000000@echo off setlocal ENABLEEXTENSIONS set MPI=Microsoft MPI set PATH="%ProgramFiles%\%MPI%\bin";%PATH% set MPIEXEC=mpiexec set NP_FLAG=-n set NP=5 set PYTHON=C:\Python27\python.exe set PYTHON=C:\Python36\python.exe set PYTHON=python @echo on set MPIEXEC= set NP_FLAG= set NP= %MPIEXEC% %NP_FLAG% %NP% %PYTHON% ex-2.01.py %MPIEXEC% %NP_FLAG% %NP% %PYTHON% ex-2.08.py %MPIEXEC% %NP_FLAG% %NP% %PYTHON% ex-2.16.py %MPIEXEC% %NP_FLAG% %NP% %PYTHON% ex-2.29.py %MPIEXEC% %NP_FLAG% %NP% %PYTHON% ex-2.32.py %MPIEXEC% %NP_FLAG% %NP% %PYTHON% ex-2.34.py %MPIEXEC% %NP_FLAG% %NP% %PYTHON% ex-2.35.py %MPIEXEC% %NP_FLAG% %NP% %PYTHON% ex-3.01.py %MPIEXEC% %NP_FLAG% %NP% %PYTHON% ex-3.02.py %MPIEXEC% %NP_FLAG% %NP% %PYTHON% ex-3.03.py %MPIEXEC% %NP_FLAG% %NP% %PYTHON% ex-3.04.py %MPIEXEC% %NP_FLAG% %NP% %PYTHON% ex-3.05.py %MPIEXEC% %NP_FLAG% %NP% %PYTHON% ex-3.06.py %MPIEXEC% %NP_FLAG% %NP% %PYTHON% ex-3.07.py %MPIEXEC% %NP_FLAG% %NP% %PYTHON% ex-3.08.py %MPIEXEC% %NP_FLAG% %NP% %PYTHON% ex-3.09.py %MPIEXEC% %NP_FLAG% %NP% %PYTHON% ex-3.11.py %MPIEXEC% %NP_FLAG% %NP% %PYTHON% ex-3.12.py %MPIEXEC% %NP_FLAG% %NP% %PYTHON% ex-3.13.py mpi4py-3.1.6/demo/mpi-ref-v1/runtests.sh000077500000000000000000000015151460670727200200150ustar00rootroot00000000000000#!/bin/sh MPIEXEC=mpiexec NP_FLAG=-n NP=3 PYTHON=python set -x $MPIEXEC $NP_FLAG $NP $PYTHON ex-2.01.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-2.08.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-2.16.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-2.29.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-2.32.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-2.34.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-2.35.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-3.01.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-3.02.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-3.03.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-3.04.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-3.05.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-3.06.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-3.07.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-3.08.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-3.09.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-3.11.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-3.12.py $MPIEXEC $NP_FLAG $NP $PYTHON ex-3.13.py mpi4py-3.1.6/demo/nxtval/000077500000000000000000000000001460670727200152165ustar00rootroot00000000000000mpi4py-3.1.6/demo/nxtval/makefile000066400000000000000000000005541460670727200167220ustar00rootroot00000000000000MPIEXEC = mpiexec NP_FLAG = -n NP = 5 PYTHON = python$(py) .PHONY: test test: ${MPIEXEC} ${NP_FLAG} ${NP} ${PYTHON} nxtval-threads.py ${MPIEXEC} ${NP_FLAG} ${NP} ${PYTHON} nxtval-dynproc.py ${MPIEXEC} ${NP_FLAG} ${NP} ${PYTHON} nxtval-onesided.py ${MPIEXEC} ${NP_FLAG} ${NP} ${PYTHON} nxtval-scalable.py ${MPIEXEC} ${NP_FLAG} ${NP} ${PYTHON} nxtval-mpi3.py mpi4py-3.1.6/demo/nxtval/nxtval-dynproc.py000066400000000000000000000040271460670727200205630ustar00rootroot00000000000000# -------------------------------------------------------------------- from mpi4py import MPI import sys, os class Counter(object): def __init__(self, comm): assert not comm.Is_inter() self.comm = comm.Dup() # start counter process script = os.path.abspath(__file__) if script[-4:] in ('.pyc', '.pyo'): script = script[:-1] self.child = self.comm.Spawn(sys.executable, [script, '--child'], 1) def free(self): self.comm.Barrier() # stop counter process rank = self.child.Get_rank() if rank == 0: self.child.send(None, 0, 1) self.child.Disconnect() # self.comm.Free() def next(self): # incr = 1 self.child.send(incr, 0, 0) ival = self.child.recv(None, 0, 0) nxtval = ival # return nxtval # -------------------------------------------------------------------- def _counter_child(): parent = MPI.Comm.Get_parent() assert parent != MPI.COMM_NULL try: counter = 0 status = MPI.Status() any_src, any_tag = MPI.ANY_SOURCE, MPI.ANY_TAG while True: # server loop incr = parent.recv(None, any_src, any_tag, status) if status.tag == 1: break parent.send(counter, status.source, 0) counter += incr finally: parent.Disconnect() if __name__ == '__main__': if (len(sys.argv) > 1 and sys.argv[0] == __file__ and sys.argv[1] == '--child'): _counter_child() sys.exit(0) # -------------------------------------------------------------------- def test(): vals = [] counter = Counter(MPI.COMM_WORLD) for i in range(5): c = counter.next() vals.append(c) counter.free() # vals = MPI.COMM_WORLD.allreduce(vals) assert sorted(vals) == list(range(len(vals))) if __name__ == '__main__': test() # -------------------------------------------------------------------- mpi4py-3.1.6/demo/nxtval/nxtval-mpi3.py000066400000000000000000000041051460670727200177520ustar00rootroot00000000000000from mpi4py import MPI from array import array as _array import struct as _struct # -------------------------------------------------------------------- class Counter(object): def __init__(self, comm): rank = comm.Get_rank() itemsize = MPI.INT.Get_size() if rank == 0: n = 1 else: n = 0 self.win = MPI.Win.Allocate(n*itemsize, itemsize, MPI.INFO_NULL, comm) if rank == 0: mem = self.win.tomemory() mem[:] = _struct.pack('i', 0) def free(self): self.win.Free() def next(self, increment=1): incr = _array('i', [increment]) nval = _array('i', [0]) self.win.Lock(0) self.win.Get_accumulate([incr, 1, MPI.INT], [nval, 1, MPI.INT], 0, op=MPI.SUM) self.win.Unlock(0) return nval[0] # ----------------------------------------------------------------------------- class Mutex(object): def __init__(self, comm): self.counter = Counter(comm) def __enter__(self): self.lock() return self def __exit__(self, *exc): self.unlock() return None def free(self): self.counter.free() def lock(self): value = self.counter.next(+1) while value != 0: value = self.counter.next(-1) value = self.counter.next(+1) def unlock(self): self.counter.next(-1) # ----------------------------------------------------------------------------- def test_counter(): vals = [] counter = Counter(MPI.COMM_WORLD) for i in range(5): c = counter.next() vals.append(c) counter.free() vals = MPI.COMM_WORLD.allreduce(vals) assert sorted(vals) == list(range(len(vals))) def test_mutex(): mutex = Mutex(MPI.COMM_WORLD) mutex.lock() mutex.unlock() mutex.free() if __name__ == '__main__': test_counter() test_mutex() # ----------------------------------------------------------------------------- mpi4py-3.1.6/demo/nxtval/nxtval-onesided.py000066400000000000000000000036161460670727200207020ustar00rootroot00000000000000# -------------------------------------------------------------------- from mpi4py import MPI from array import array as _array import struct as _struct class Counter(object): def __init__(self, comm): # size = comm.Get_size() rank = comm.Get_rank() # itemsize = MPI.INT.Get_size() if rank == 0: mem = MPI.Alloc_mem(itemsize*size, MPI.INFO_NULL) mem[:] = _struct.pack('i', 0) * size else: mem = MPI.BOTTOM self.win = MPI.Win.Create(mem, itemsize, MPI.INFO_NULL, comm) # blens = [rank, size-rank-1] disps = [0, rank+1] self.dt_get = MPI.INT.Create_indexed(blens, disps).Commit() # self.myval = 0 def free(self): self.dt_get.Free() mem = self.win.tomemory() self.win.Free() if mem: MPI.Free_mem(mem) def next(self): # group = self.win.Get_group() size = group.Get_size() rank = group.Get_rank() group.Free() # incr = _array('i', [1]) vals = _array('i', [0])*size self.win.Lock(0) self.win.Accumulate([incr, 1, MPI.INT], 0, [rank, 1, MPI.INT], MPI.SUM) self.win.Get([vals, 1, self.dt_get], 0, [ 0, 1, self.dt_get]) self.win.Unlock(0) # vals[rank] = self.myval self.myval += 1 nxtval = sum(vals) # return nxtval # -------------------------------------------------------------------- def test(): vals = [] counter = Counter(MPI.COMM_WORLD) for i in range(5): c = counter.next() vals.append(c) counter.free() vals = MPI.COMM_WORLD.allreduce(vals) assert sorted(vals) == list(range(len(vals))) if __name__ == '__main__': test() # -------------------------------------------------------------------- mpi4py-3.1.6/demo/nxtval/nxtval-scalable.py000066400000000000000000000100271460670727200206500ustar00rootroot00000000000000from mpi4py import MPI # ----------------------------------------------------------------------------- import struct as _struct try: from numpy import empty as _empty def _array_new(size, typecode, init=0): a = _empty(size, typecode) a.fill(init) return a def _array_set(ary, value): ary.fill(value) def _array_sum(ary): return ary.sum() except ImportError: from array import array as _array def _array_new(size, typecode, init=0): return _array(typecode, [init]) * size def _array_set(ary, value): for i, _ in enumerate(ary): ary[i] = value def _array_sum(ary): return sum(ary, 0) # ----------------------------------------------------------------------------- class Counter(object): def __init__(self, comm, init=0): # size = comm.Get_size() rank = comm.Get_rank() mask = 1 while mask < size: mask <<= 1 mask >>= 1 idx = 0 get_idx = [] acc_idx = [] while mask >= 1: left = idx + 1 right = idx + (mask<<1) if rank < mask: acc_idx.append( left ) get_idx.append( right ) idx = left else: acc_idx.append( right ) get_idx.append( left ) idx = right rank = rank % mask mask >>= 1 # typecode = 'i' datatype = MPI.INT itemsize = datatype.Get_size() # root = 0 rank = comm.Get_rank() if rank == root: nlevels = len(get_idx) + 1 nentries = (1< large_message_size: skip = skip_large loop = loop_large iterations = list(range(loop+skip)) s_msg = MPI.IN_PLACE r_msg = [r_buf, size, MPI.BYTE] # comm.Barrier() for i in iterations: if i == skip: t_start = MPI.Wtime() comm.Allgather(s_msg, r_msg) t_end = MPI.Wtime() comm.Barrier() # if myid == 0: latency = (t_end - t_start) * 1e6 / loop print ('%-10d%20.2f' % (size, latency)) def message_sizes(max_size): return [0] + [(1< large_message_size: skip = skip_large loop = loop_large iterations = list(range(loop+skip)) s_msg = [s_buf, size, MPI.BYTE] r_msg = [r_buf, size, MPI.BYTE] # comm.Barrier() for i in iterations: if i == skip: t_start = MPI.Wtime() comm.Alltoall(s_msg, r_msg) t_end = MPI.Wtime() comm.Barrier() # if myid == 0: latency = (t_end - t_start) * 1e6 / loop print ('%-10d%20.2f' % (size, latency)) def message_sizes(max_size): return [0] + [(1< large_message_size: skip = skip_large loop = loop_large iterations = list(range(loop+skip)) disp = 0 for i in range (numprocs): s_counts[i] = r_counts[i] = size s_displs[i] = r_displs[i] = disp disp += size s_msg = [s_buf, (s_counts, s_displs), MPI.BYTE] r_msg = [r_buf, (r_counts, r_displs), MPI.BYTE] # comm.Barrier() for i in iterations: if i == skip: t_start = MPI.Wtime() comm.Alltoallv(s_msg, r_msg) t_end = MPI.Wtime() comm.Barrier() # if myid == 0: latency = (t_end - t_start) * 1e6 / loop print ('%-10d%20.2f' % (size, latency)) def message_sizes(max_size): return [0] + [(1< large_message_size: skip = skip_large loop = loop_large iterations = list(range(loop+skip)) msg = [buf, size, MPI.BYTE] # comm.Barrier() for i in iterations: if i == skip: t_start = MPI.Wtime() comm.Bcast(msg, 0) t_end = MPI.Wtime() comm.Barrier() # if myid == 0: latency = (t_end - t_start) * 1e6 / loop print ('%-10d%20.2f' % (size, latency)) def message_sizes(max_size): return [0] + [(1< MAX_MSG_SIZE: break if size > large_message_size: skip = skip_large loop = loop_large window_size = window_size_large iterations = list(range(loop+skip)) window_sizes = list(range(window_size)) s_msg = [s_buf, size, MPI.BYTE] r_msg = [r_buf, size, MPI.BYTE] send_request = [MPI.REQUEST_NULL] * window_size recv_request = [MPI.REQUEST_NULL] * window_size # comm.Barrier() if myid == 0: for i in iterations: if i == skip: t_start = MPI.Wtime() for j in window_sizes: recv_request[j] = comm.Irecv(r_msg, 1, 10) for j in window_sizes: send_request[j] = comm.Isend(s_msg, 1, 100) MPI.Request.Waitall(send_request) MPI.Request.Waitall(recv_request) t_end = MPI.Wtime() elif myid == 1: for i in iterations: for j in window_sizes: recv_request[j] = comm.Irecv(r_msg, 0, 100) for j in window_sizes: send_request[j] = comm.Isend(s_msg, 0, 10) MPI.Request.Waitall(send_request) MPI.Request.Waitall(recv_request) # if myid == 0: MB = size / 1e6 * loop * window_size s = t_end - t_start print ('%-10d%20.2f' % (size, MB/s)) def allocate(n): try: import mmap return mmap.mmap(-1, n) except (ImportError, EnvironmentError): try: from numpy import zeros return zeros(n, 'B') except ImportError: from array import array return array('B', [0]) * n if __name__ == '__main__': osu_bibw() mpi4py-3.1.6/demo/osu_bw.py000066400000000000000000000047171460670727200155630ustar00rootroot00000000000000# http://mvapich.cse.ohio-state.edu/benchmarks/ from mpi4py import MPI def osu_bw( BENCHMARH = "MPI Bandwidth Test", skip = 10, loop = 100, window_size = 64, skip_large = 2, loop_large = 20, window_size_large = 64, large_message_size = 8192, MAX_MSG_SIZE = 1<<22, ): comm = MPI.COMM_WORLD myid = comm.Get_rank() numprocs = comm.Get_size() if numprocs != 2: if myid == 0: errmsg = "This test requires exactly two processes" else: errmsg = None raise SystemExit(errmsg) s_buf = allocate(MAX_MSG_SIZE) r_buf = allocate(MAX_MSG_SIZE) if myid == 0: print ('# %s' % (BENCHMARH,)) if myid == 0: print ('# %-8s%20s' % ("Size [B]", "Bandwidth [MB/s]")) message_sizes = [2**i for i in range(30)] for size in message_sizes: if size > MAX_MSG_SIZE: break if size > large_message_size: skip = skip_large loop = loop_large window_size = window_size_large iterations = list(range(loop+skip)) window_sizes = list(range(window_size)) requests = [MPI.REQUEST_NULL] * window_size # comm.Barrier() if myid == 0: s_msg = [s_buf, size, MPI.BYTE] r_msg = [r_buf, 4, MPI.BYTE] for i in iterations: if i == skip: t_start = MPI.Wtime() for j in window_sizes: requests[j] = comm.Isend(s_msg, 1, 100) MPI.Request.Waitall(requests) comm.Recv(r_msg, 1, 101) t_end = MPI.Wtime() elif myid == 1: s_msg = [s_buf, 4, MPI.BYTE] r_msg = [r_buf, size, MPI.BYTE] for i in iterations: for j in window_sizes: requests[j] = comm.Irecv(r_msg, 0, 100) MPI.Request.Waitall(requests) comm.Send(s_msg, 0, 101) # if myid == 0: MB = size / 1e6 * loop * window_size s = t_end - t_start print ('%-10d%20.2f' % (size, MB/s)) def allocate(n): try: import mmap return mmap.mmap(-1, n) except (ImportError, EnvironmentError): try: from numpy import zeros return zeros(n, 'B') except ImportError: from array import array return array('B', [0]) * n if __name__ == '__main__': osu_bw() mpi4py-3.1.6/demo/osu_gather.py000066400000000000000000000037031460670727200164170ustar00rootroot00000000000000# http://mvapich.cse.ohio-state.edu/benchmarks/ from mpi4py import MPI def osu_gather( BENCHMARH = "MPI Gather Latency Test", skip = 1000, loop = 10000, skip_large = 10, loop_large = 100, large_message_size = 8192, MAX_MSG_SIZE = 1<<20, ): comm = MPI.COMM_WORLD myid = comm.Get_rank() numprocs = comm.Get_size() if numprocs < 2: if myid == 0: errmsg = "This test requires at least two processes" else: errmsg = None raise SystemExit(errmsg) if myid == 0: r_buf = allocate(MAX_MSG_SIZE*numprocs) else: s_buf = allocate(MAX_MSG_SIZE) if myid == 0: print ('# %s' % (BENCHMARH,)) if myid == 0: print ('# %-8s%20s' % ("Size [B]", "Latency [us]")) for size in message_sizes(MAX_MSG_SIZE): if size > large_message_size: skip = skip_large loop = loop_large iterations = list(range(loop+skip)) if myid == 0: s_msg = MPI.IN_PLACE r_msg = [r_buf, size, MPI.BYTE] else: s_msg = [s_buf, size, MPI.BYTE] r_msg = None # comm.Barrier() for i in iterations: if i == skip: t_start = MPI.Wtime() comm.Gather(s_msg, r_msg, 0) t_end = MPI.Wtime() comm.Barrier() # if myid == 0: latency = (t_end - t_start) * 1e6 / loop print ('%-10d%20.2f' % (size, latency)) def message_sizes(max_size): return [0] + [(1< MAX_MSG_SIZE: break if size > large_message_size: skip = skip_large loop = loop_large iterations = list(range(loop+skip)) s_msg = [s_buf, size, MPI.BYTE] r_msg = [r_buf, size, MPI.BYTE] # comm.Barrier() if myid == 0: for i in iterations: if i == skip: t_start = MPI.Wtime() comm.Send(s_msg, 1, 1) comm.Recv(r_msg, 1, 1) t_end = MPI.Wtime() elif myid == 1: for i in iterations: comm.Recv(r_msg, 0, 1) comm.Send(s_msg, 0, 1) # if myid == 0: latency = (t_end - t_start) * 1e6 / (2 * loop) print ('%-10d%20.2f' % (size, latency)) def allocate(n): try: import mmap return mmap.mmap(-1, n) except (ImportError, EnvironmentError): try: from numpy import zeros return zeros(n, 'B') except ImportError: from array import array return array('B', [0]) * n if __name__ == '__main__': osu_latency() mpi4py-3.1.6/demo/osu_multi_lat.py000066400000000000000000000042621460670727200171400ustar00rootroot00000000000000# http://mvapich.cse.ohio-state.edu/benchmarks/ from mpi4py import MPI def osu_multi_lat( BENCHMARH = "MPI Multi Latency Test", skip_small = 100, loop_small = 10000, skip_large = 10, loop_large = 1000, large_message_size = 8192, MAX_MSG_SIZE = 1<<22, ): comm = MPI.COMM_WORLD myid = comm.Get_rank() nprocs = comm.Get_size() pairs = nprocs/2 s_buf = allocate(MAX_MSG_SIZE) r_buf = allocate(MAX_MSG_SIZE) if myid == 0: print ('# %s' % (BENCHMARH,)) if myid == 0: print ('# %-8s%20s' % ("Size [B]", "Latency [us]")) message_sizes = [0] + [2**i for i in range(30)] for size in message_sizes: if size > MAX_MSG_SIZE: break if size > large_message_size: skip = skip_large loop = loop_large else: skip = skip_small loop = loop_small iterations = list(range(loop+skip)) s_msg = [s_buf, size, MPI.BYTE] r_msg = [r_buf, size, MPI.BYTE] # comm.Barrier() if myid < pairs: partner = myid + pairs for i in iterations: if i == skip: t_start = MPI.Wtime() comm.Send(s_msg, partner, 1) comm.Recv(r_msg, partner, 1) t_end = MPI.Wtime() else: partner = myid - pairs for i in iterations: if i == skip: t_start = MPI.Wtime() comm.Recv(r_msg, partner, 1) comm.Send(s_msg, partner, 1) t_end = MPI.Wtime() # latency = (t_end - t_start) * 1e6 / (2 * loop) total_lat = comm.reduce(latency, root=0, op=MPI.SUM) if myid == 0: avg_lat = total_lat/(pairs * 2) print ('%-10d%20.2f' % (size, avg_lat)) def allocate(n): try: import mmap return mmap.mmap(-1, n) except (ImportError, EnvironmentError): try: from numpy import zeros return zeros(n, 'B') except ImportError: from array import array return array('B', [0]) * n if __name__ == '__main__': osu_multi_lat() mpi4py-3.1.6/demo/osu_scatter.py000066400000000000000000000037071460670727200166160ustar00rootroot00000000000000# http://mvapich.cse.ohio-state.edu/benchmarks/ from mpi4py import MPI def osu_scatter( BENCHMARH = "MPI Scatter Latency Test", skip = 1000, loop = 10000, skip_large = 10, loop_large = 100, large_message_size = 8192, MAX_MSG_SIZE = 1<<20, ): comm = MPI.COMM_WORLD myid = comm.Get_rank() numprocs = comm.Get_size() if numprocs < 2: if myid == 0: errmsg = "This test requires at least two processes" else: errmsg = None raise SystemExit(errmsg) if myid == 0: s_buf = allocate(MAX_MSG_SIZE*numprocs) else: r_buf = allocate(MAX_MSG_SIZE) if myid == 0: print ('# %s' % (BENCHMARH,)) if myid == 0: print ('# %-8s%20s' % ("Size [B]", "Latency [us]")) for size in message_sizes(MAX_MSG_SIZE): if size > large_message_size: skip = skip_large loop = loop_large iterations = list(range(loop+skip)) if myid == 0: s_msg = [s_buf, size, MPI.BYTE] r_msg = MPI.IN_PLACE else: s_msg = None r_msg = [r_buf, size, MPI.BYTE] # comm.Barrier() for i in iterations: if i == skip: t_start = MPI.Wtime() comm.Scatter(s_msg, r_msg, 0) t_end = MPI.Wtime() comm.Barrier() # if myid == 0: latency = (t_end - t_start) * 1e6 / loop print ('%-10d%20.2f' % (size, latency)) def message_sizes(max_size): return [0] + [(1< target: partial = op(tmp, partial) recvobj = op(tmp, recvobj) else: tmp = op(partial, tmp) partial = tmp mask <<= 1 return recvobj def exscan(self, sendobj=None, recvobj=None, op=MPI.SUM): size = self.size rank = self.rank tag = MPI.COMM_WORLD.Get_attr(MPI.TAG_UB)-1 recvobj = sendobj partial = sendobj mask = 1 flag = False while mask < size: target = rank ^ mask if target < size: tmp = self.sendrecv(partial, dest=target, source=target, sendtag=tag, recvtag=tag) if rank > target: partial = op(tmp, partial) if rank != 0: if not flag: recvobj = tmp flag = True else: recvobj = op(tmp, recvobj) else: tmp = op(partial, tmp) partial = tmp mask <<= 1 if rank == 0: recvobj = None return recvobj mpi4py-3.1.6/demo/reductions/runtests.bat000066400000000000000000000004741460670727200204450ustar00rootroot00000000000000@echo off setlocal ENABLEEXTENSIONS set MPI=Microsoft MPI set PATH="%ProgramFiles%\%MPI%\bin";%PATH% set MPIEXEC=mpiexec set NP_FLAG=-n set NP=5 set PYTHON=C:\Python27\python.exe set PYTHON=C:\Python36\python.exe set PYTHON=python @echo on %MPIEXEC% %NP_FLAG% %NP% %PYTHON% test_reductions.py -q mpi4py-3.1.6/demo/reductions/runtests.sh000077500000000000000000000001661460670727200203120ustar00rootroot00000000000000#!/bin/sh MPIEXEC=mpiexec NP_FLAG=-n NP=5 PYTHON=python set -x $MPIEXEC $NP_FLAG $NP $PYTHON test_reductions.py -q mpi4py-3.1.6/demo/reductions/test_reductions.py000066400000000000000000000142431460670727200216550ustar00rootroot00000000000000#import mpi4py #mpi4py.profile("mpe") from mpi4py import MPI import unittest import sys, os sys.path.insert(0, os.path.dirname(__file__)) from reductions import Intracomm del sys.path[0] class BaseTest(object): def test_reduce(self): rank = self.comm.rank size = self.comm.size for root in range(size): msg = rank res = self.comm.reduce(sendobj=msg, root=root) if self.comm.rank == root: self.assertEqual(res, sum(range(size))) else: self.assertEqual(res, None) def test_reduce_min(self): rank = self.comm.rank size = self.comm.size for root in range(size): msg = rank res = self.comm.reduce(sendobj=msg, op=MPI.MIN, root=root) if self.comm.rank == root: self.assertEqual(res, 0) else: self.assertEqual(res, None) def test_reduce_max(self): rank = self.comm.rank size = self.comm.size for root in range(size): msg = rank res = self.comm.reduce(sendobj=msg, op=MPI.MAX, root=root) if self.comm.rank == root: self.assertEqual(res, size-1) else: self.assertEqual(res, None) def test_reduce_minloc(self): rank = self.comm.rank size = self.comm.size for root in range(size): msg = rank res = self.comm.reduce(sendobj=(msg, rank), op=MPI.MINLOC, root=root) if self.comm.rank == root: self.assertEqual(res, (0, 0)) else: self.assertEqual(res, None) def test_reduce_maxloc(self): rank = self.comm.rank size = self.comm.size for root in range(size): msg = rank res = self.comm.reduce(sendobj=(msg, rank), op=MPI.MAXLOC, root=root) if self.comm.rank == root: self.assertEqual(res, (size-1, size-1)) else: self.assertEqual(res, None) def test_allreduce(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.allreduce(sendobj=msg) self.assertEqual(res, sum(range(size))) def test_allreduce_min(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.allreduce(sendobj=msg, op=MPI.MIN) self.assertEqual(res, 0) def test_allreduce_max(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.allreduce(sendobj=msg, op=MPI.MAX) self.assertEqual(res, size-1) def test_allreduce_minloc(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.allreduce(sendobj=(msg, rank), op=MPI.MINLOC) self.assertEqual(res, (0, 0)) def test_allreduce_maxloc(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.allreduce(sendobj=(msg, rank), op=MPI.MAXLOC) self.assertEqual(res, (size-1, size-1)) def test_scan(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.scan(sendobj=msg) self.assertEqual(res, sum(list(range(size))[:rank+1])) def test_scan_min(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.scan(sendobj=msg, op=MPI.MIN) self.assertEqual(res, 0) def test_scan_max(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.scan(sendobj=msg, op=MPI.MAX) self.assertEqual(res, rank) def test_scan_minloc(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.scan(sendobj=(msg, rank), op=MPI.MINLOC) self.assertEqual(res, (0, 0)) def test_scan_maxloc(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.scan(sendobj=(msg, rank), op=MPI.MAXLOC) self.assertEqual(res, (rank, rank)) def test_exscan(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.exscan(sendobj=msg) if self.comm.rank == 0: self.assertEqual(res, None) else: self.assertEqual(res, sum(list(range(size))[:rank])) def test_exscan_min(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.exscan(sendobj=msg, op=MPI.MIN) if self.comm.rank == 0: self.assertEqual(res, None) else: self.assertEqual(res, 0) def test_exscan_max(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.exscan(sendobj=msg, op=MPI.MAX) if self.comm.rank == 0: self.assertEqual(res, None) else: self.assertEqual(res, rank-1) def test_exscan_minloc(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.exscan(sendobj=(msg, rank), op=MPI.MINLOC) if self.comm.rank == 0: self.assertEqual(res, None) else: self.assertEqual(res, (0, 0)) def test_exscan_maxloc(self): rank = self.comm.rank size = self.comm.size msg = rank res = self.comm.exscan(sendobj=(msg, rank), op=MPI.MAXLOC) if self.comm.rank == 0: self.assertEqual(res, None) else: self.assertEqual(res, (rank-1, rank-1)) class TestS(BaseTest, unittest.TestCase): def setUp(self): self.comm = Intracomm(MPI.COMM_SELF) class TestW(BaseTest, unittest.TestCase): def setUp(self): self.comm = Intracomm(MPI.COMM_WORLD) class TestSD(BaseTest, unittest.TestCase): def setUp(self): self.comm = Intracomm(MPI.COMM_SELF.Dup()) def tearDown(self): self.comm.Free() class TestWD(BaseTest, unittest.TestCase): def setUp(self): self.comm = Intracomm(MPI.COMM_WORLD.Dup()) def tearDown(self): self.comm.Free() if __name__ == "__main__": unittest.main() mpi4py-3.1.6/demo/sequential/000077500000000000000000000000001460670727200160545ustar00rootroot00000000000000mpi4py-3.1.6/demo/sequential/makefile000066400000000000000000000002421460670727200175520ustar00rootroot00000000000000MPIEXEC = mpiexec NP_FLAG = -n NP = 5 PYTHON = python$(py) .PHONY: test test: ${MPIEXEC} ${NP_FLAG} ${NP} ${PYTHON} test_seq.py ${RM} -r *.py[co] __pycache__ mpi4py-3.1.6/demo/sequential/runtests.bat000066400000000000000000000004621460670727200204350ustar00rootroot00000000000000@echo off setlocal ENABLEEXTENSIONS set MPI=Microsoft MPI set PATH="%ProgramFiles%\%MPI%\bin";%PATH% set MPIEXEC=mpiexec set NP_FLAG=-n set NP=5 set PYTHON=C:\Python27\python.exe set PYTHON=C:\Python36\python.exe set PYTHON=python @echo on %MPIEXEC% %NP_FLAG% %NP% %PYTHON% test_seq.py mpi4py-3.1.6/demo/sequential/runtests.sh000077500000000000000000000001541460670727200203020ustar00rootroot00000000000000#!/bin/sh MPIEXEC=mpiexec NP_FLAG=-n NP=5 PYTHON=python set -x $MPIEXEC $NP_FLAG $NP $PYTHON test_seq.py mpi4py-3.1.6/demo/sequential/seq.py000066400000000000000000000024341460670727200172210ustar00rootroot00000000000000class Seq(object): """ Sequential execution """ def __init__(self, comm, ng=1, tag=0): ng = int(ng) tag = int(tag) assert ng >= 1 assert ng <= comm.Get_size() self.comm = comm self.ng = ng self.tag = tag def __enter__(self): self.begin() return self def __exit__(self, *exc): self.end() return None def begin(self): """ Begin a sequential execution of a section of code """ comm = self.comm size = comm.Get_size() if size == 1: return rank = comm.Get_rank() ng = self.ng tag = self.tag if rank != 0: comm.Recv([None, 'B'], rank - 1, tag) if rank != (size - 1) and (rank % ng) < (ng - 1): comm.Send([None, 'B'], rank + 1, tag) def end(self): """ End a sequential execution of a section of code """ comm = self.comm size = comm.Get_size() if size == 1: return rank = comm.Get_rank() ng = self.ng tag = self.tag if rank == (size - 1) or (rank % ng) == (ng - 1): comm.Send([None, 'B'], (rank + 1) % size, tag) if rank == 0: comm.Recv([None, 'B'], size - 1, tag) mpi4py-3.1.6/demo/sequential/test_seq.py000066400000000000000000000007411460670727200202570ustar00rootroot00000000000000#import mpi4py #mpi4py.profile("mpe") from mpi4py import MPI import unittest import sys, os sys.path.insert(0, os.path.dirname(__file__)) from seq import Seq del sys.path[0] def test(): size = MPI.COMM_WORLD.Get_size() rank = MPI.COMM_WORLD.Get_rank() name = MPI.Get_processor_name() with Seq(MPI.COMM_WORLD, 1, 10): print( "Hello, World! I am process %d of %d on %s." % (rank, size, name)) if __name__ == "__main__": test() mpi4py-3.1.6/demo/spawning/000077500000000000000000000000001460670727200155305ustar00rootroot00000000000000mpi4py-3.1.6/demo/spawning/cpi-master.c000066400000000000000000000013211460670727200177350ustar00rootroot00000000000000#include #include #include #include int main(int argc, char *argv[]) { char cmd[32] = "./cpi-worker-c.exe"; MPI_Comm worker; int n; double pi; MPI_Init(&argc, &argv); if (argc > 1) strcpy(cmd, argv[1]); printf("%s -> %s\n", argv[0], cmd); MPI_Comm_spawn(cmd, MPI_ARGV_NULL, 5, MPI_INFO_NULL, 0, MPI_COMM_SELF, &worker, MPI_ERRCODES_IGNORE); n = 100; MPI_Bcast(&n, 1, MPI_INT, MPI_ROOT, worker); MPI_Reduce(MPI_BOTTOM, &pi, 1, MPI_DOUBLE, MPI_SUM, MPI_ROOT, worker); MPI_Comm_disconnect(&worker); printf("pi: %.16f, error: %.16f\n", pi, fabs(M_PI-pi)); MPI_Finalize(); return 0; } mpi4py-3.1.6/demo/spawning/cpi-master.cxx000066400000000000000000000012411460670727200203160ustar00rootroot00000000000000#include #include #include #include int main(int argc, char *argv[]) { MPI::Init(); char cmd[32] = "./cpi-worker-cxx.exe"; if (argc > 1) std::strcpy(cmd, argv[1]); std::printf("%s -> %s\n", argv[0], cmd); MPI::Intercomm worker; worker = MPI::COMM_SELF.Spawn(cmd, MPI::ARGV_NULL, 5, MPI::INFO_NULL, 0); int n = 100; worker.Bcast(&n, 1, MPI::INT, MPI::ROOT); double pi; worker.Reduce(MPI::BOTTOM, &pi, 1, MPI::DOUBLE, MPI::SUM, MPI::ROOT); worker.Disconnect(); std::printf("pi: %.16f, error: %.16f\n", pi, std::fabs(M_PI-pi)); MPI::Finalize(); return 0; } mpi4py-3.1.6/demo/spawning/cpi-master.f90000066400000000000000000000017421460670727200201200ustar00rootroot00000000000000PROGRAM main USE mpi implicit none real (kind=8), parameter :: PI = 3.1415926535897931D0 integer argc character(len=32) argv(0:1) character(len=32) cmd integer ierr, n, worker real(kind=8) cpi call MPI_INIT(ierr) argc = iargc() + 1 call getarg(0, argv(0)) call getarg(1, argv(1)) cmd = './cpi-worker-f90.exe' if (argc > 1) then cmd = argv(1) end if write(*,'(A,A,A)') trim(argv(0)), ' -> ', trim(cmd) call MPI_COMM_SPAWN(cmd, MPI_ARGV_NULL, 5, & MPI_INFO_NULL, 0, & MPI_COMM_SELF, worker, & MPI_ERRCODES_IGNORE, ierr) n = 100 call MPI_BCAST(n, 1, MPI_INTEGER, & MPI_ROOT, worker, ierr) call MPI_REDUCE(MPI_BOTTOM, cpi, 1, MPI_DOUBLE_PRECISION, & MPI_SUM, MPI_ROOT, worker, ierr) call MPI_COMM_DISCONNECT(worker, ierr) write(*,'(A,F18.16,A,F18.16)') 'pi: ', cpi, ', error: ', abs(PI-cpi) call MPI_FINALIZE(ierr) END PROGRAM main mpi4py-3.1.6/demo/spawning/cpi-master.py000066400000000000000000000010051460670727200201420ustar00rootroot00000000000000from mpi4py import MPI from array import array from math import pi as PI from sys import argv cmd = './cpi-worker-py.exe' if len(argv) > 1: cmd = argv[1] print("%s -> %s" % (argv[0], cmd)) worker = MPI.COMM_SELF.Spawn(cmd, None, 5) n = array('i', [100]) worker.Bcast([n,MPI.INT], root=MPI.ROOT) pi = array('d', [0.0]) worker.Reduce(sendbuf=None, recvbuf=[pi, MPI.DOUBLE], op=MPI.SUM, root=MPI.ROOT) pi = pi[0] worker.Disconnect() print('pi: %.16f, error: %.16f' % (pi, abs(PI-pi))) mpi4py-3.1.6/demo/spawning/cpi-worker.c000066400000000000000000000011251460670727200177550ustar00rootroot00000000000000#include int main(int argc, char *argv[]) { int myrank, nprocs; int n, i; double h, s, pi; MPI_Comm master; MPI_Init(&argc, &argv); MPI_Comm_get_parent(&master); MPI_Comm_size(master, &nprocs); MPI_Comm_rank(master, &myrank); MPI_Bcast(&n, 1, MPI_INT, 0, master); h = 1.0 / (double) n; s = 0.0; for (i = myrank+1; i < n+1; i += nprocs) { double x = h * (i - 0.5); s += 4.0 / (1.0 + x*x); } pi = s * h; MPI_Reduce(&pi, MPI_BOTTOM, 1, MPI_DOUBLE, MPI_SUM, 0, master); MPI_Comm_disconnect(&master); MPI_Finalize(); return 0; } mpi4py-3.1.6/demo/spawning/cpi-worker.cxx000066400000000000000000000010471460670727200203400ustar00rootroot00000000000000#include int main(int argc, char *argv[]) { MPI::Init(); MPI::Intercomm master = MPI::Comm::Get_parent(); int nprocs = master.Get_size(); int myrank = master.Get_rank(); int n; master.Bcast(&n, 1, MPI_INT, 0); double h = 1.0 / (double) n; double s = 0.0; for (int i = myrank+1; i < n+1; i += nprocs) { double x = h * (i - 0.5); s += 4.0 / (1.0 + x*x); } double pi = s * h; master.Reduce(&pi, MPI_BOTTOM, 1, MPI_DOUBLE, MPI_SUM, 0); master.Disconnect(); MPI::Finalize(); return 0; } mpi4py-3.1.6/demo/spawning/cpi-worker.f90000066400000000000000000000012521460670727200201320ustar00rootroot00000000000000PROGRAM main USE mpi implicit none integer ierr integer n, i, master, myrank, nprocs real (kind=8) h, s, x, cpi call MPI_INIT(ierr) call MPI_COMM_GET_PARENT(master, ierr) call MPI_COMM_SIZE(master, nprocs, ierr) call MPI_COMM_RANK(master, myrank, ierr) call MPI_BCAST(n, 1, MPI_INTEGER, & 0, master, ierr) h = 1 / DFLOAT(n) s = 0.0 DO i=myrank+1,n,nprocs x = h * (DFLOAT(i) - 0.5) s = s + 4.0 / (1.0 + x*x) END DO cpi = s * h call MPI_REDUCE(cpi, MPI_BOTTOM, 1, MPI_DOUBLE_PRECISION, & MPI_SUM, 0, master, ierr) call MPI_COMM_DISCONNECT(master, ierr) call MPI_FINALIZE(ierr) END PROGRAM main mpi4py-3.1.6/demo/spawning/cpi-worker.py000066400000000000000000000007221460670727200201650ustar00rootroot00000000000000from mpi4py import MPI from array import array master = MPI.Comm.Get_parent() nprocs = master.Get_size() myrank = master.Get_rank() n = array('i', [0]) master.Bcast([n, MPI.INT], root=0) n = n[0] h = 1.0 / n s = 0.0 for i in range(myrank+1, n+1, nprocs): x = h * (i - 0.5) s += 4.0 / (1.0 + x**2) pi = s * h pi = array('d', [pi]) master.Reduce(sendbuf=[pi, MPI.DOUBLE], recvbuf=None, op=MPI.SUM, root=0) master.Disconnect() mpi4py-3.1.6/demo/spawning/makefile000066400000000000000000000022221460670727200172260ustar00rootroot00000000000000.PHONY: default build test clean MPIEXEC=mpiexec -n 1 default: build test clean MASTERS = cpi-master-py.exe cpi-master-c.exe cpi-master-cxx.exe cpi-master-f90.exe WORKERS = cpi-worker-py.exe cpi-worker-c.exe cpi-worker-cxx.exe cpi-worker-f90.exe build: ${MASTERS} ${WORKERS} LANGS=py c cxx f90 test: build @for i in ${LANGS}; do \ for j in ${LANGS}; do \ ${MPIEXEC} ./cpi-master-$$i.exe ./cpi-worker-$$j.exe; \ done; \ done clean: ${RM} -r ${MASTERS} ${WORKERS} MPICC=mpicc MPICXX=mpicxx MPIF90=mpif90 ifneq (${MPI_FORTRAN_MOD_DIR},) FFLAGS += -I${MPI_FORTRAN_MOD_DIR} endif # Python cpi-master-py.exe: cpi-master.py echo '#!'`which python` > $@ cat $< >> $@ chmod +x $@ cpi-worker-py.exe: cpi-worker.py echo '#!'`which python` > $@ cat $< >> $@ chmod +x $@ # C cpi-master-c.exe: cpi-master.c ${MPICC} $< -o $@ cpi-worker-c.exe: cpi-worker.c ${MPICC} $< -o $@ # C++ cpi-master-cxx.exe: cpi-master.cxx ${MPICXX} $< -o $@ cpi-worker-cxx.exe: cpi-worker.cxx ${MPICXX} $< -o $@ # Fortran 90 cpi-master-f90.exe: cpi-master.f90 ${MPIF90} ${FFLAGS} $< -o $@ cpi-worker-f90.exe: cpi-worker.f90 ${MPIF90} ${FFLAGS} $< -o $@ mpi4py-3.1.6/demo/test-run/000077500000000000000000000000001460670727200154635ustar00rootroot00000000000000mpi4py-3.1.6/demo/test-run/makefile000066400000000000000000000004721460670727200171660ustar00rootroot00000000000000.PHONY: default default: build test clean PYTHON = python$(py) .PHONY: build build: mkdir -p run-directory/ cp run-script.py run-directory/__main__.py zip -qj run-zipfile.zip run-directory/__main__.py .PHONY: test test: ${PYTHON} test_run.py -v .PHONY: clean clean: ${RM} -r run-directory run-zipfile.zip mpi4py-3.1.6/demo/test-run/run-script.py000066400000000000000000000024701460670727200201460ustar00rootroot00000000000000from mpi4py import MPI import sys, os, optparse assert __name__ == '__main__' from os.path import split, splitext, dirname, realpath dirname = dirname(__file__) assert sys.path[0] == realpath(dirname) if split(__file__)[1] == '__main__.py': if splitext(dirname)[0] == '.zip': assert sys.argv[0] == dirname else: assert realpath(sys.argv[0]) == realpath(dirname) else: assert sys.argv[0] == __file__ parser = optparse.OptionParser() parser.add_option("--rank", action='store', type='int', dest="rank", default=0) parser.add_option("--sys-exit", action='store', type='int', dest="sys_exit", default=None) parser.add_option("--sys-exit-msg", action="store", type="string", dest="sys_exit", default=None) parser.add_option("--exception", action="store", type="string", dest="exception", default=None) (options, args) = parser.parse_args() assert not args comm = MPI.COMM_WORLD if comm.rank == options.rank: if options.sys_exit: sys.exit(options.sys_exit) if options.exception: raise RuntimeError(options.exception) comm.Barrier() if comm.rank > 0: comm.Recv([None, 'B'], comm.rank - 1) print("Hello, World!") if comm.rank < comm.size - 1: comm.Send([None, 'B'], comm.rank + 1) comm.Barrier() sys.exit() mpi4py-3.1.6/demo/test-run/test_run.py000066400000000000000000000137621460670727200177110ustar00rootroot00000000000000import sys, os, shlex import subprocess as sp import unittest import mpi4py def find_executable(exe): from distutils.spawn import find_executable as find_exe command = shlex.split(exe) executable = find_exe(command[0]) if executable: command[0] = executable return ' '.join(command) def find_mpiexec(mpiexec='mpiexec'): mpiexec = os.environ.get('MPIEXEC') or mpiexec mpiexec = find_executable(mpiexec) if not mpiexec and sys.platform.startswith('win'): MSMPI_BIN = os.environ.get('MSMPI_BIN', '') mpiexec = os.path.join(MSMPI_BIN, mpiexec) mpiexec = find_executable(mpiexec) if not mpiexec: mpiexec = find_executable('mpirun') return mpiexec def launcher(np): mpiexec = find_mpiexec() python = sys.executable if 'coverage' in sys.modules: python += ' -m coverage run -p -m' module = 'mpi4py.run -rc threads=False' command = '{mpiexec} -n {np} {python} -m {module}' return shlex.split(command.format(**vars())) def execute(np, command, args=''): env = os.environ.copy() pypath = os.environ.get('PYTHONPATH', '').split(os.pathsep) pypath.insert(0, os.path.abspath(os.path.dirname(mpi4py.__path__[0]))) env['PYTHONPATH'] = os.pathsep.join(pypath) if isinstance(command, str): command = shlex.split(command) if isinstance(args, str): args = shlex.split(args) cmdline = launcher(np) + command + args p = sp.Popen(cmdline, stdout=sp.PIPE, stderr=sp.PIPE, env=env, bufsize=0) stdout, stderr = p.communicate() return p.returncode, stdout.decode(), stderr.decode() class BaseTestRun(object): def assertMPIAbort(self, stdout, stderr): if not ('MPI_Abort' in stdout or 'MPI_ABORT' in stdout or 'MPI_Abort' in stderr or 'MPI_ABORT' in stderr): msg = ("expecting MPI_Abort() message in stdout/stderr:\n" "[stdout]:\n{0}\n[stderr]:\n{1}\n").format(stdout, stderr) raise self.failureException(msg) class TestRunScript(BaseTestRun, unittest.TestCase): pyfile = 'run-script.py' def execute(self, args='', np=1): dirname = os.path.abspath(os.path.dirname(__file__)) script = os.path.join(dirname, self.pyfile) return execute(np, script, args) def testSuccess(self): success = 'Hello, World!' for np in (1, 2, 3): status, stdout, stderr = self.execute(np=np) self.assertEqual(status, 0) self.assertEqual(stderr, '') self.assertEqual(stdout.count(success), np) def testException(self): message = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' excmess = 'RuntimeError: {0}'.format(message) for np in (1, 2, 3): for rank in range(0, np): args = ['--rank', str(rank), '--exception', message] status, stdout, stderr = self.execute(args, np) self.assertEqual(status, 1) self.assertMPIAbort(stdout, stderr) self.assertTrue(excmess in stderr) def testSysExitCode(self): errcode = 7 for np in (1, 2, 3): for r in sorted(set([0, np-1])): args = ['--rank', str(r), '--sys-exit', str(errcode)] status, stdout, stderr = self.execute(args, np) self.assertTrue(status in (errcode, 1)) self.assertMPIAbort(stdout, stderr) self.assertTrue('Traceback' not in stderr) def testSysExitMess(self): exitmsg = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' for np in (1, 2, 3): for r in sorted(set([0, np-1])): args = ['--rank', str(r), '--sys-exit-msg', exitmsg] status, stdout, stderr = self.execute(args, np) self.assertEqual(status, 1) self.assertMPIAbort(stdout, stderr) self.assertTrue('Traceback' not in stderr) self.assertTrue(exitmsg in stderr) if os.path.exists(os.path.join(os.path.dirname(__file__), 'run-directory')): class TestRunDirectory(TestRunScript): pyfile = 'run-directory' if os.path.exists(os.path.join(os.path.dirname(__file__), 'run-zipfile.zip')): class TestRunZipFile(TestRunScript): pyfile = 'run-zipfile.zip' class TestRunModule(BaseTestRun, unittest.TestCase): def execute(self, module, np=1): return execute(np, '-m', module) def testSuccess(self): module = 'mpi4py.bench --no-threads helloworld' message = 'Hello, World!' for np in (1, 2, 3): status, stdout, stderr = self.execute(module, np) self.assertEqual(status, 0) self.assertEqual(stdout.count(message), np) self.assertEqual(stderr, '') class TestRunCommand(BaseTestRun, unittest.TestCase): def execute(self, command, np=1): return execute(np, '-c', command) def testArgv0(self): command = '"import sys; print(sys.argv[0])"' status, stdout, stderr = self.execute(command, 1) self.assertEqual(status, 0) self.assertEqual(stdout, '-c\n') self.assertEqual(stderr, '') def testSuccess(self): command = '"from mpi4py import MPI"' for np in (1, 2, 3): status, stdout, stderr = self.execute(command, np) self.assertEqual(status, 0) self.assertEqual(stdout, '') self.assertEqual(stderr, '') def testException(self): command = '"from mpi4py import MPI; 1/0 if MPI.COMM_WORLD.Get_rank()==0 else 0;"' excmess = 'ZeroDivisionError:' for np in (1, 2, 3): for rank in range(0, np): status, stdout, stderr = self.execute(command, np) self.assertEqual(status, 1) self.assertMPIAbort(stdout, stderr) self.assertTrue(excmess in stderr) if not find_mpiexec(): del TestRunScript try: del TestRunDirectory except: pass try: del TestRunZipFile except: pass del TestRunModule del TestRunCommand if __name__ == '__main__': unittest.main() mpi4py-3.1.6/demo/threads/000077500000000000000000000000001460670727200153345ustar00rootroot00000000000000mpi4py-3.1.6/demo/threads/makefile000066400000000000000000000002001460670727200170240ustar00rootroot00000000000000.PHONY: default build test clean default: build test clean PYTHON = python$(py) build: test: ${PYTHON} sendrecv.py clean: mpi4py-3.1.6/demo/threads/sendrecv.py000066400000000000000000000021371460670727200175220ustar00rootroot00000000000000from mpi4py import MPI import sys if MPI.Query_thread() < MPI.THREAD_MULTIPLE: sys.stderr.write("MPI does not provide enough thread support\n") sys.exit(0) try: import threading except ImportError: sys.stderr.write("threading module not available\n") sys.exit(0) try: import numpy except ImportError: sys.stderr.write("NumPy package not available\n") sys.exit(0) send_msg = numpy.arange(1000000, dtype='i') recv_msg = numpy.zeros_like(send_msg) start_event = threading.Event() def self_send(): start_event.wait() comm = MPI.COMM_WORLD rank = comm.Get_rank() comm.Send([send_msg, MPI.INT], dest=rank, tag=0) def self_recv(): start_event.wait() comm = MPI.COMM_WORLD rank = comm.Get_rank() comm.Recv([recv_msg, MPI.INT], source=rank, tag=0) send_thread = threading.Thread(target=self_send) recv_thread = threading.Thread(target=self_recv) for t in (recv_thread, send_thread): t.start() assert not numpy.allclose(send_msg, recv_msg) start_event.set() for t in (recv_thread, send_thread): t.join() assert numpy.allclose(send_msg, recv_msg) mpi4py-3.1.6/demo/vampirtrace/000077500000000000000000000000001460670727200162175ustar00rootroot00000000000000mpi4py-3.1.6/demo/vampirtrace/cpilog.py000066400000000000000000000020601460670727200200440ustar00rootroot00000000000000#!/usr/bin/env python # If you want VampirTrace to log MPI calls, you have to add the two # lines below at the very beginning of your main bootstrap script. import mpi4py mpi4py.rc.threads = False mpi4py.profile('vt', logfile='cpilog') # Import the MPI extension module from mpi4py import MPI # Import the 'array' module from array import array # This is just to make the logging # output a bit more interesting from time import sleep comm = MPI.COMM_WORLD nprocs = comm.Get_size() myrank = comm.Get_rank() n = array('i', [0]) pi = array('d', [0]) mypi = array('d', [0]) def comp_pi(n, myrank=0, nprocs=1): h = 1.0 / n; s = 0.0; for i in range(myrank + 1, n + 1, nprocs): x = h * (i - 0.5); s += 4.0 / (1.0 + x**2); return s * h comm.Barrier() for N in [10000]*10: if myrank == 0: n[0] = N comm.Bcast([n, MPI.INT], root=0) mypi[0] = comp_pi(n[0], myrank, nprocs) comm.Reduce([mypi, MPI.DOUBLE], [pi, MPI.DOUBLE], op=MPI.SUM, root=0) comm.Barrier() sleep(0.01) mpi4py-3.1.6/demo/vampirtrace/makefile000066400000000000000000000013251460670727200177200ustar00rootroot00000000000000MPIEXEC = mpiexec N = 8 PYTHON = python$(py) .PHONY: default default: build test clean .PHONY: run-cpilog run-ring run-threads run run: run-cpilog run-ring run-threads run-cpilog: ${MPIEXEC} -n ${N} ${PYTHON} cpilog.py run-ring: ${MPIEXEC} -n ${N} ${PYTHON} ring.py run-threads: ${MPIEXEC} -n ${N} ${PYTHON} threads.py .PHONY: view-cpilog view-ring view-threads view view: view-cpilog view-ring view-threads view-cpilog: cpilog.otf view-ring: ring.otf view-threads: threads.otf cpilog.otf: run-cpilog ring.otf: run-ring threads.otf: run-threads .PHONY: build build: .PHONY: test test: run .PHONY: clean clean: ${RM} *.otf *.uctl *.*.def.z *.*.events.z *.*.marker.z ${RM} *.thumb *.*.def *.*.events mpi4py-3.1.6/demo/vampirtrace/ring.py000066400000000000000000000014611460670727200175320ustar00rootroot00000000000000#!/usr/bin/env python # If you want VampirTrace to log MPI calls, you have to add the two # lines below at the very beginning of your main bootstrap script. import mpi4py mpi4py.rc.threads = False mpi4py.profile('vt-mpi', logfile='ring') from mpi4py import MPI comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() src = rank-1 dest = rank+1 if rank == 0: src = size-1 if rank == size-1: dest = 0 try: from numpy import zeros a1 = zeros(1000000, 'd') a2 = zeros(1000000, 'd') except ImportError: from array import array a1 = array('d', [0]*1000); a1 *= 1000 a2 = array('d', [0]*1000); a2 *= 1000 comm.Sendrecv(sendbuf=a1, recvbuf=a2, source=src, dest=dest) MPI.Request.Waitall([ comm.Isend(a1, dest=dest), comm.Irecv(a2, source=src), ]) mpi4py-3.1.6/demo/vampirtrace/threads.py000066400000000000000000000016161460670727200202270ustar00rootroot00000000000000#!/usr/bin/env python import mpi4py mpi4py.rc.threads = True mpi4py.rc.thread_level = "funneled" mpi4py.profile('vt-hyb', logfile='threads') from mpi4py import MPI from threading import Thread MPI.COMM_WORLD.Barrier() # Understanding the Python GIL # David Beazley, http://www.dabeaz.com # PyCon 2010, Atlanta, Georgia # http://www.dabeaz.com/python/UnderstandingGIL.pdf # Consider this trivial CPU-bound function def countdown(n): while n > 0: n -= 1 # Run it once with a lot of work COUNT = 10000000 # 10 millon tic = MPI.Wtime() countdown(COUNT) toc = MPI.Wtime() print ("sequential: %f seconds" % (toc-tic)) # Now, subdivide the work across two threads t1 = Thread(target=countdown, args=(COUNT//2,)) t2 = Thread(target=countdown, args=(COUNT//2,)) tic = MPI.Wtime() for t in (t1, t2): t.start() for t in (t1, t2): t.join() toc = MPI.Wtime() print ("threaded: %f seconds" % (toc-tic)) mpi4py-3.1.6/demo/wrap-boost/000077500000000000000000000000001460670727200157775ustar00rootroot00000000000000mpi4py-3.1.6/demo/wrap-boost/helloworld.cxx000066400000000000000000000017331460670727200207020ustar00rootroot00000000000000#include #include static void sayhello(MPI_Comm comm) { if (comm == MPI_COMM_NULL) { std::cout << "You passed MPI_COMM_NULL !!!" << std::endl; return; } int size; MPI_Comm_size(comm, &size); int rank; MPI_Comm_rank(comm, &rank); int plen; char pname[MPI_MAX_PROCESSOR_NAME]; MPI_Get_processor_name(pname, &plen); std::cout << "Hello, World! " << "I am process " << rank << " of " << size << " on " << pname << "." << std::endl; } #include #include using namespace boost::python; static void hw_sayhello(object py_comm) { PyObject* py_obj = py_comm.ptr(); MPI_Comm *comm_p = PyMPIComm_Get(py_obj); if (comm_p == NULL) throw_error_already_set(); sayhello(*comm_p); } BOOST_PYTHON_MODULE(helloworld) { if (import_mpi4py() < 0) return; /* Python 2.X */ def("sayhello", hw_sayhello); } /* * Local Variables: * mode: C++ * End: */ mpi4py-3.1.6/demo/wrap-boost/makefile000066400000000000000000000013261460670727200175010ustar00rootroot00000000000000.PHONY: default default: build test clean PYTHON = python$(py) PYTHON_CONFIG = ${PYTHON} ../python-config MPI4PY_INCLUDE = ${shell ${PYTHON} -c 'import mpi4py; print( mpi4py.get_include() )'} BOOST_INCS = BOOST_LIBS = -lboost_python MPICXX = mpicxx CXXFLAGS = -fPIC ${shell ${PYTHON_CONFIG} --includes} ${BOOST_INCS} LDFLAGS = -shared ${shell ${PYTHON_CONFIG} --libs} ${BOOST_LIBS} SO = ${shell ${PYTHON_CONFIG} --extension-suffix} .PHONY: build build: helloworld${SO} helloworld${SO}: helloworld.cxx ${MPICXX} ${CXXFLAGS} -I${MPI4PY_INCLUDE} -o $@ $< ${LDFLAGS} MPIEXEC = mpiexec NP_FLAG = -n NP = 5 .PHONY: test test: build ${MPIEXEC} ${NP_FLAG} ${NP} ${PYTHON} test.py .PHONY: clean clean: ${RM} helloworld${SO} mpi4py-3.1.6/demo/wrap-boost/test.py000066400000000000000000000003321460670727200173260ustar00rootroot00000000000000from mpi4py import MPI import helloworld as hw null = MPI.COMM_NULL hw.sayhello(null) comm = MPI.COMM_WORLD hw.sayhello(comm) try: hw.sayhello(list()) except: pass else: assert 0, "exception not raised" mpi4py-3.1.6/demo/wrap-c/000077500000000000000000000000001460670727200150735ustar00rootroot00000000000000mpi4py-3.1.6/demo/wrap-c/helloworld.c000066400000000000000000000043471460670727200174220ustar00rootroot00000000000000#define MPICH_SKIP_MPICXX 1 #define OMPI_SKIP_MPICXX 1 #include #include /* -------------------------------------------------------------------------- */ static void sayhello(MPI_Comm comm) { int size, rank; char pname[MPI_MAX_PROCESSOR_NAME]; int len; if (comm == MPI_COMM_NULL) { printf("You passed MPI_COMM_NULL !!!\n"); return; } MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank); MPI_Get_processor_name(pname, &len); pname[len] = 0; printf("Hello, World! I am process %d of %d on %s.\n", rank, size, pname); } /* -------------------------------------------------------------------------- */ static PyObject * hw_sayhello(PyObject *self, PyObject *args) { PyObject *py_comm = NULL; MPI_Comm *comm_p = NULL; if (!PyArg_ParseTuple(args, "O:sayhello", &py_comm)) return NULL; comm_p = PyMPIComm_Get(py_comm); if (comm_p == NULL) return NULL; sayhello(*comm_p); Py_INCREF(Py_None); return Py_None; } static struct PyMethodDef hw_methods[] = { {"sayhello", (PyCFunction)hw_sayhello, METH_VARARGS, NULL}, {NULL, NULL, 0, NULL} /* sentinel */ }; #if PY_MAJOR_VERSION < 3 /* --- Python 2 --- */ PyMODINIT_FUNC inithelloworld(void) { PyObject *m = NULL; /* Initialize mpi4py C-API */ if (import_mpi4py() < 0) goto bad; /* Module initialization */ m = Py_InitModule("helloworld", hw_methods); if (m == NULL) goto bad; return; bad: return; } #else /* --- Python 3 --- */ static struct PyModuleDef hw_module = { PyModuleDef_HEAD_INIT, "helloworld", /* m_name */ NULL, /* m_doc */ -1, /* m_size */ hw_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; PyMODINIT_FUNC PyInit_helloworld(void) { PyObject *m = NULL; /* Initialize mpi4py's C-API */ if (import_mpi4py() < 0) goto bad; /* Module initialization */ m = PyModule_Create(&hw_module); if (m == NULL) goto bad; return m; bad: return NULL; } #endif /* -------------------------------------------------------------------------- */ /* Local variables: c-basic-offset: 2 indent-tabs-mode: nil End: */ mpi4py-3.1.6/demo/wrap-c/makefile000066400000000000000000000012071460670727200165730ustar00rootroot00000000000000.PHONY: default default: build test clean PYTHON = python$(py) PYTHON_CONFIG = ${PYTHON} ../python-config MPI4PY_INCLUDE = ${shell ${PYTHON} -c 'import mpi4py; print( mpi4py.get_include() )'} MPICC = mpicc CFLAGS = -fPIC ${shell ${PYTHON_CONFIG} --includes} LDFLAGS = -shared ${shell ${PYTHON_CONFIG} --libs} SO = ${shell ${PYTHON_CONFIG} --extension-suffix} .PHONY: build build: helloworld${SO} helloworld${SO}: helloworld.c ${MPICC} ${CFLAGS} -I${MPI4PY_INCLUDE} -o $@ $< ${LDFLAGS} MPIEXEC = mpiexec NP_FLAG = -n NP = 5 .PHONY: test test: build ${MPIEXEC} ${NP_FLAG} ${NP} ${PYTHON} test.py .PHONY: clean clean: ${RM} helloworld${SO} mpi4py-3.1.6/demo/wrap-c/test.py000066400000000000000000000003321460670727200164220ustar00rootroot00000000000000from mpi4py import MPI import helloworld as hw null = MPI.COMM_NULL hw.sayhello(null) comm = MPI.COMM_WORLD hw.sayhello(comm) try: hw.sayhello(list()) except: pass else: assert 0, "exception not raised" mpi4py-3.1.6/demo/wrap-cffi/000077500000000000000000000000001460670727200155605ustar00rootroot00000000000000mpi4py-3.1.6/demo/wrap-cffi/helloworld.c000066400000000000000000000011131460670727200200730ustar00rootroot00000000000000#define MPICH_SKIP_MPICXX 1 #define OMPI_SKIP_MPICXX 1 #include #include #ifdef __cplusplus extern "C" { #endif extern void sayhello(MPI_Comm); #ifdef __cplusplus } #endif void sayhello(MPI_Comm comm) { int size, rank; char pname[MPI_MAX_PROCESSOR_NAME]; int len; if (comm == MPI_COMM_NULL) { printf("You passed MPI_COMM_NULL !!!\n"); return; } MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank); MPI_Get_processor_name(pname, &len); pname[len] = 0; printf("Hello, World! I am process %d of %d on %s.\n", rank, size, pname); } mpi4py-3.1.6/demo/wrap-cffi/helloworld.py000066400000000000000000000007431460670727200203110ustar00rootroot00000000000000from mpi4py import MPI import cffi import os _libdir = os.path.dirname(__file__) ffi = cffi.FFI() if MPI._sizeof(MPI.Comm) == ffi.sizeof('int'): _mpi_comm_t = 'int' else: _mpi_comm_t = 'void*' ffi.cdef(""" typedef %(_mpi_comm_t)s MPI_Comm; void sayhello(MPI_Comm); """ % vars()) lib = ffi.dlopen(os.path.join(_libdir, "libhelloworld.so")) def sayhello(comm): comm_ptr = MPI._addressof(comm) comm_val = ffi.cast('MPI_Comm*', comm_ptr)[0] lib.sayhello(comm_val) mpi4py-3.1.6/demo/wrap-cffi/makefile000066400000000000000000000005621460670727200172630ustar00rootroot00000000000000.PHONY: default default: build test clean PYTHON = python$(py) MPICC = mpicc LIBNAME = libhelloworld.so .PHONY: build build: ${LIBNAME} ${LIBNAME}: helloworld.c ${MPICC} -shared -fPIC -o $@ $< MPIEXEC = mpiexec NP_FLAG = -n NP = 5 .PHONY: test test: build ${MPIEXEC} ${NP_FLAG} ${NP} ${PYTHON} test.py .PHONY: clean clean: ${RM} -r ${LIBNAME} *.pyc __pycache__ mpi4py-3.1.6/demo/wrap-cffi/test.py000066400000000000000000000003321460670727200171070ustar00rootroot00000000000000from mpi4py import MPI import helloworld as hw null = MPI.COMM_NULL hw.sayhello(null) comm = MPI.COMM_WORLD hw.sayhello(comm) try: hw.sayhello(list()) except: pass else: assert 0, "exception not raised" mpi4py-3.1.6/demo/wrap-ctypes/000077500000000000000000000000001460670727200161605ustar00rootroot00000000000000mpi4py-3.1.6/demo/wrap-ctypes/helloworld.c000066400000000000000000000011131460670727200204730ustar00rootroot00000000000000#define MPICH_SKIP_MPICXX 1 #define OMPI_SKIP_MPICXX 1 #include #include #ifdef __cplusplus extern "C" { #endif extern void sayhello(MPI_Comm); #ifdef __cplusplus } #endif void sayhello(MPI_Comm comm) { int size, rank; char pname[MPI_MAX_PROCESSOR_NAME]; int len; if (comm == MPI_COMM_NULL) { printf("You passed MPI_COMM_NULL !!!\n"); return; } MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank); MPI_Get_processor_name(pname, &len); pname[len] = 0; printf("Hello, World! I am process %d of %d on %s.\n", rank, size, pname); } mpi4py-3.1.6/demo/wrap-ctypes/helloworld.py000066400000000000000000000007221460670727200207060ustar00rootroot00000000000000from mpi4py import MPI import ctypes import os _libdir = os.path.dirname(__file__) if MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_int): MPI_Comm = ctypes.c_int else: MPI_Comm = ctypes.c_void_p _lib = ctypes.CDLL(os.path.join(_libdir, "libhelloworld.so")) _lib.sayhello.restype = None _lib.sayhello.argtypes = [MPI_Comm] def sayhello(comm): comm_ptr = MPI._addressof(comm) comm_val = MPI_Comm.from_address(comm_ptr) _lib.sayhello(comm_val) mpi4py-3.1.6/demo/wrap-ctypes/makefile000066400000000000000000000005621460670727200176630ustar00rootroot00000000000000.PHONY: default default: build test clean PYTHON = python$(py) MPICC = mpicc LIBNAME = libhelloworld.so .PHONY: build build: ${LIBNAME} ${LIBNAME}: helloworld.c ${MPICC} -shared -fPIC -o $@ $< MPIEXEC = mpiexec NP_FLAG = -n NP = 5 .PHONY: test test: build ${MPIEXEC} ${NP_FLAG} ${NP} ${PYTHON} test.py .PHONY: clean clean: ${RM} -r ${LIBNAME} *.pyc __pycache__ mpi4py-3.1.6/demo/wrap-ctypes/test.py000066400000000000000000000003321460670727200175070ustar00rootroot00000000000000from mpi4py import MPI import helloworld as hw null = MPI.COMM_NULL hw.sayhello(null) comm = MPI.COMM_WORLD hw.sayhello(comm) try: hw.sayhello(list()) except: pass else: assert 0, "exception not raised" mpi4py-3.1.6/demo/wrap-cython/000077500000000000000000000000001460670727200161555ustar00rootroot00000000000000mpi4py-3.1.6/demo/wrap-cython/helloworld.pyx000066400000000000000000000012231460670727200210700ustar00rootroot00000000000000cdef extern from "mpi-compat.h": pass cimport mpi4py.MPI as MPI from mpi4py.libmpi cimport * cdef extern from "stdio.h": int printf(char*, ...) cdef void c_sayhello(MPI_Comm comm): cdef int size, rank, plen cdef char pname[MPI_MAX_PROCESSOR_NAME] if comm == MPI_COMM_NULL: printf(b"You passed MPI_COMM_NULL !!!%s", b"\n") return MPI_Comm_size(comm, &size) MPI_Comm_rank(comm, &rank) MPI_Get_processor_name(pname, &plen) printf(b"Hello, World! I am process %d of %d on %s.\n", rank, size, pname) def sayhello(MPI.Comm comm not None ): cdef MPI_Comm c_comm = comm.ob_mpi c_sayhello(c_comm) mpi4py-3.1.6/demo/wrap-cython/makefile000066400000000000000000000012051460670727200176530ustar00rootroot00000000000000.PHONY: default default: build test clean PYTHON = python$(py) PYTHON_CONFIG = ${PYTHON} ../python-config CYTHON = cython .PHONY: src src: helloworld.c helloworld.c: helloworld.pyx ${CYTHON} $< MPICC = mpicc CFLAGS = -fPIC ${shell ${PYTHON_CONFIG} --includes} LDFLAGS = -shared ${shell ${PYTHON_CONFIG} --libs} SO = ${shell ${PYTHON_CONFIG} --extension-suffix} .PHONY: build build: helloworld${SO} helloworld${SO}: helloworld.c ${MPICC} ${CFLAGS} -o $@ $< ${LDFLAGS} MPIEXEC = mpiexec NP_FLAG = -n NP = 5 .PHONY: test test: build ${MPIEXEC} ${NP_FLAG} ${NP} ${PYTHON} test.py .PHONY: clean clean: ${RM} helloworld.c helloworld${SO} mpi4py-3.1.6/demo/wrap-cython/mpi-compat.h000066400000000000000000000004401460670727200203720ustar00rootroot00000000000000/* Author: Lisandro Dalcin */ /* Contact: dalcinl@gmail.com */ #ifndef MPI_COMPAT_H #define MPI_COMPAT_H #include #if (MPI_VERSION < 3) && !defined(PyMPI_HAVE_MPI_Message) typedef void *PyMPI_MPI_Message; #define MPI_Message PyMPI_MPI_Message #endif #endif/*MPI_COMPAT_H*/ mpi4py-3.1.6/demo/wrap-cython/test.py000066400000000000000000000004621460670727200175100ustar00rootroot00000000000000from mpi4py import MPI import helloworld as hw null = MPI.COMM_NULL hw.sayhello(null) comm = MPI.COMM_WORLD hw.sayhello(comm) try: hw.sayhello(None) except: pass else: assert 0, "exception not raised" try: hw.sayhello(list()) except: pass else: assert 0, "exception not raised" mpi4py-3.1.6/demo/wrap-f2py/000077500000000000000000000000001460670727200155315ustar00rootroot00000000000000mpi4py-3.1.6/demo/wrap-f2py/helloworld.f90000066400000000000000000000013661460670727200202320ustar00rootroot00000000000000! ! $ f2py --f90exec=mpif90 -m helloworld -c helloworld.f90 ! subroutine sayhello(comm) use mpi implicit none integer :: comm integer :: rank, size, nlen, ierr character (len=MPI_MAX_PROCESSOR_NAME) :: pname if (comm == MPI_COMM_NULL) then print *, 'You passed MPI_COMM_NULL !!!' return end if call MPI_Comm_rank(comm, rank, ierr) call MPI_Comm_size(comm, size, ierr) call MPI_Get_processor_name(pname, nlen, ierr) print *, 'Hello, World!', & ' I am process ', rank, & ' of ', size, & ' on ', pname(1:nlen), '.' end subroutine sayhello ! program main ! use mpi ! implicit none ! integer ierr ! call MPI_Init(ierr) ! call sayhello(MPI_COMM_WORLD) ! call MPI_Finalize(ierr) ! end program main mpi4py-3.1.6/demo/wrap-f2py/makefile000066400000000000000000000010571460670727200172340ustar00rootroot00000000000000.PHONY: default default: build test clean PYTHON = python PYTHON_CONFIG = ${PYTHON} ../python-config SO = ${shell ${PYTHON_CONFIG} --extension-suffix} MPIF90 = mpif90 F2PY = f2py ifneq (${MPI_FORTRAN_MOD_DIR},) F2PYFLAGS += --f90flags=-I${MPI_FORTRAN_MOD_DIR} endif .PHONY: build build: helloworld${SO} helloworld${SO}: helloworld.f90 ${F2PY} ${F2PYFLAGS} --f90exec=${MPIF90} -m helloworld -c $< MPIEXEC = mpiexec NP_FLAG = -n NP = 5 .PHONY: test test: build ${MPIEXEC} ${NP_FLAG} ${NP} ${PYTHON} test.py .PHONY: clean clean: ${RM} helloworld${SO} mpi4py-3.1.6/demo/wrap-f2py/test.py000066400000000000000000000004041460670727200170600ustar00rootroot00000000000000from mpi4py import MPI import helloworld as hw null = MPI.COMM_NULL fnull = null.py2f() hw.sayhello(fnull) comm = MPI.COMM_WORLD fcomm = comm.py2f() hw.sayhello(fcomm) try: hw.sayhello(list()) except: pass else: assert 0, "exception not raised" mpi4py-3.1.6/demo/wrap-swig/000077500000000000000000000000001460670727200156225ustar00rootroot00000000000000mpi4py-3.1.6/demo/wrap-swig/helloworld.i000066400000000000000000000012161460670727200201470ustar00rootroot00000000000000%module helloworld %{ #define MPICH_SKIP_MPICXX 1 #define OMPI_SKIP_MPICXX 1 #include #include void sayhello(MPI_Comm comm) { int size, rank; char pname[MPI_MAX_PROCESSOR_NAME]; int len; if (comm == MPI_COMM_NULL) { printf("You passed MPI_COMM_NULL !!!\n"); return; } MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank); MPI_Get_processor_name(pname, &len); pname[len] = 0; printf("Hello, World! I am process %d of %d on %s.\n", rank, size, pname); } %} %include mpi4py/mpi4py.i %mpi4py_typemap(Comm, MPI_Comm); void sayhello(MPI_Comm comm); /* * Local Variables: * mode: C * End: */ mpi4py-3.1.6/demo/wrap-swig/makefile000066400000000000000000000015031460670727200173210ustar00rootroot00000000000000.PHONY: default default: build test clean PYTHON = python$(py) PYTHON_CONFIG = ${PYTHON} ../python-config MPI4PY_INCLUDE = ${shell ${PYTHON} -c 'import mpi4py; print( mpi4py.get_include() )'} SWIG = swig SWIG_PY = ${SWIG} -python .PHONY: src src: helloworld_wrap.c helloworld_wrap.c: helloworld.i ${SWIG_PY} -I${MPI4PY_INCLUDE} -o $@ $< MPICC = mpicc CFLAGS = -fPIC ${shell ${PYTHON_CONFIG} --includes} LDFLAGS = -shared ${shell ${PYTHON_CONFIG} --libs} SO = ${shell ${PYTHON_CONFIG} --extension-suffix} .PHONY: build build: _helloworld${SO} _helloworld${SO}: helloworld_wrap.c ${MPICC} ${CFLAGS} -I${MPI4PY_INCLUDE} -o $@ $< ${LDFLAGS} MPIEXEC = mpiexec NP_FLAG = -n NP = 5 .PHONY: test test: build ${MPIEXEC} ${NP_FLAG} ${NP} ${PYTHON} test.py .PHONY: clean clean: ${RM} helloworld_wrap.c helloworld.py* _helloworld${SO} mpi4py-3.1.6/demo/wrap-swig/test.py000066400000000000000000000003321460670727200171510ustar00rootroot00000000000000from mpi4py import MPI import helloworld as hw null = MPI.COMM_NULL hw.sayhello(null) comm = MPI.COMM_WORLD hw.sayhello(comm) try: hw.sayhello(list()) except: pass else: assert 0, "exception not raised" mpi4py-3.1.6/docs/000077500000000000000000000000001460670727200137065ustar00rootroot00000000000000mpi4py-3.1.6/docs/index.rst000066400000000000000000000051401460670727200155470ustar00rootroot00000000000000============== MPI for Python ============== :Author: Lisandro Dalcin :Contact: dalcinl@gmail.com Online Documentation -------------------- Hosted at *Read the Docs* [https://mpi4py.readthedocs.io/]: + `Stable `_: |rtd-stable| + `Latest `_: |rtd-latest| .. |rtd-stable| image:: https://readthedocs.org/projects/mpi4py/badge/?version=stable :target: `rtd-stable`_ .. _rtd-stable: https://mpi4py.readthedocs.io/en/stable/ .. |rtd-latest| image:: https://readthedocs.org/projects/mpi4py/badge/?version=latest :target: `rtd-latest`_ .. _rtd-latest: https://mpi4py.readthedocs.io/en/latest Hosted at *GitHub* [https://mpi4py.github.io/]: + `User Manual (HTML)`_ (generated with Sphinx_). + `User Manual (PDF)`_ (generated with Sphinx_). + `API Reference`_ (generated with Epydoc_). .. _User Manual (HTML): usrman/index.html .. _User Manual (PDF): mpi4py.pdf .. _API Reference: apiref/index.html .. _Sphinx: https://www.sphinx-doc.org/ .. _Epydoc: http://epydoc.sourceforge.net/ Discussion and Support ---------------------- Hosted at Google Groups: + Group Page: https://groups.google.com/g/mpi4py + Mailing List: mpi4py@googlegroups.com Hosted at GitHub: * Discussions: https://github.com/mpi4py/mpi4py/discussions Downloads and Development ------------------------- Hosted at GitHub: + Project Site: https://github.com/mpi4py/mpi4py + Source Releases: https://github.com/mpi4py/mpi4py/releases + Issue Tracker: https://github.com/mpi4py/mpi4py/issues + Git Repository: https://github.com/mpi4py/mpi4py.git Citations --------- + L. Dalcin and Y.-L. L. Fang, *mpi4py: Status Update After 12 Years of Development*, Computing in Science & Engineering, 23(4):47-54, 2021. https://doi.org/10.1109/MCSE.2021.3083216 + L. Dalcin, P. Kler, R. Paz, and A. Cosimo, *Parallel Distributed Computing using Python*, Advances in Water Resources, 34(9):1124-1139, 2011. https://doi.org/10.1016/j.advwatres.2011.04.013 + L. Dalcin, R. Paz, M. Storti, and J. D'Elia, *MPI for Python: performance improvements and MPI-2 extensions*, Journal of Parallel and Distributed Computing, 68(5):655-662, 2008. https://doi.org/10.1016/j.jpdc.2007.09.005 + L. Dalcin, R. Paz, and M. Storti, *MPI for Python*, Journal of Parallel and Distributed Computing, 65(9):1108-1115, 2005. https://doi.org/10.1016/j.jpdc.2005.03.010 Acknowledgments --------------- This project was partially supported by the Extreme Computing Research Center (ECRC), Division of Computer, Electrical, and Mathematical Sciences & Engineering (CEMSE), King Abdullah University of Science and Technology (KAUST). mpi4py-3.1.6/docs/mpi4py.bib000066400000000000000000000026301460670727200156070ustar00rootroot00000000000000@article{Dalcin2021, title = {mpi4py: Status Update After 12 Years of Development}, author = {Dalcin, Lisandro and Fang, Yao-Lung Leo}, journal = {Computing in Science \& Engineering}, volume = {23}, number = {4}, pages = {47--54}, year = {2021}, issn = {1521-9615}, doi = {10.1109/MCSE.2021.3083216} } @article{Dalcin2011, title = {Parallel distributed computing using {P}ython}, author = {Lisandro D. Dalcin and Rodrigo R. Paz and Pablo A. Kler and Alejandro Cosimo}, journal = {Advances in Water Resources}, volume = {34}, number = {9}, pages = {1124--1139}, year = {2011}, issn = {0309-1708}, doi = {10.1016/j.advwatres.2011.04.013}, } @article{mpi4py2008, title = {{MPI} for {P}ython: Performance improvements and {MPI-2} extensions}, author = {Lisandro Dalcin and Rodrigo Paz and Mario Storti and Jorge D'Elia}, journal = {Journal of Parallel and Distributed Computing}, volume = {68}, number = {5}, pages = {655--662}, year = {2008}, issn = {0743-7315}, doi = {10.1016/j.jpdc.2007.09.005}, } @article{mpi4py2005, title = {{MPI} for {P}ython}, author = {Lisandro Dalcin and Rodrigo Paz and Mario Storti}, journal = {Journal of Parallel and Distributed Computing}, author = {Lisandro Dalcin and Rodrigo Paz and Mario Storti}, volume = {65}, number = {9}, pages = {1108--1115}, year = {2005}, issn = {0743-7315}, doi = {10.1016/j.jpdc.2005.03.010}, } mpi4py-3.1.6/docs/source/000077500000000000000000000000001460670727200152065ustar00rootroot00000000000000mpi4py-3.1.6/docs/source/usrman/000077500000000000000000000000001460670727200165135ustar00rootroot00000000000000mpi4py-3.1.6/docs/source/usrman/.gitignore000066400000000000000000000000211460670727200204740ustar00rootroot00000000000000_build reference mpi4py-3.1.6/docs/source/usrman/Makefile000066400000000000000000000011721460670727200201540ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line, and also # from the environment for the first two. SPHINXOPTS ?= SPHINXBUILD ?= sphinx-build SOURCEDIR = . BUILDDIR = _build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) mpi4py-3.1.6/docs/source/usrman/_static/000077500000000000000000000000001460670727200201415ustar00rootroot00000000000000mpi4py-3.1.6/docs/source/usrman/_static/css/000077500000000000000000000000001460670727200207315ustar00rootroot00000000000000mpi4py-3.1.6/docs/source/usrman/_static/css/custom.css000066400000000000000000000001441460670727200227540ustar00rootroot00000000000000/* Custom :c:func:`Function` */ .rst-content code.xref.c-func span.pre { font-weight: normal; } mpi4py-3.1.6/docs/source/usrman/_templates/000077500000000000000000000000001460670727200206505ustar00rootroot00000000000000mpi4py-3.1.6/docs/source/usrman/_templates/autosummary/000077500000000000000000000000001460670727200232365ustar00rootroot00000000000000mpi4py-3.1.6/docs/source/usrman/_templates/autosummary/class.rst000066400000000000000000000032271460670727200251010ustar00rootroot00000000000000{{ fullname | escape | underline}} .. currentmodule:: {{ module }} {%- if autotype is defined %} {%- set objtype = autotype.get(name) or objtype %} {%- endif %} .. auto{{ objtype }}:: {{ objname }} :show-inheritance: {% for item in ['__new__', '__init__'] %} {%- if item in members and item not in inherited_members %} .. automethod:: {{item}} {%- endif %} {%- endfor %} {%- for item in inherited_members %} {%- if item in methods %} {%- set dummy = methods.remove(item) %} {%- endif %} {%- if item in attributes %} {%- set dummy = attributes.remove(item) %} {%- endif %} {%- endfor %} {%- for item in ['__new__', '__init__'] %} {%- if item in methods %} {%- set dummy = methods.remove(item) %} {%- endif %} {%- endfor %} {% block methods_summary %} {%- if methods %} .. rubric:: Methods Summary .. autosummary:: {% for item in methods %} ~{{ name }}.{{ item }} {%- endfor %} {%- endif %} {%- endblock %} {% block attributes_summary %} {%- if attributes %} .. rubric:: Attributes Summary .. autosummary:: {% for item in attributes %} ~{{ name }}.{{ item }} {%- endfor %} {%- endif %} {%- endblock %} {% block methods_documentation %} {%- if methods %} .. rubric:: Methods Documentation {% for item in methods %} .. automethod:: {{ item }} {%- endfor %} {%- endif %} {%- endblock %} {% block attributes_documentation %} {%- if attributes %} .. rubric:: Attributes Documentation {% for item in attributes %} .. autoattribute:: {{ item }} {%- endfor %} {%- endif %} {%- endblock %} {# #} mpi4py-3.1.6/docs/source/usrman/_templates/autosummary/module.rst000066400000000000000000000023351460670727200252600ustar00rootroot00000000000000{{ fullname | escape | underline}} .. automodule:: {{ fullname }} {%- if synopsis is defined %} :synopsis: {{ synopsis.get(fullname, '') }} {%- endif %} {% block classes %} {%- if classes %} .. rubric:: {{ _('Classes') }} .. autosummary:: :toctree: {% for item in classes %} {{ item }} {%- endfor %} {%- endif %} {%- endblock %} {% block exceptions %} {%- if exceptions %} .. rubric:: {{ _('Exceptions') }} .. autosummary:: :toctree: {% for item in exceptions %} {{ item }} {%- endfor %} {%- endif %} {%- endblock %} {% block functions %} {%- if functions %} .. rubric:: {{ _('Functions') }} .. autosummary:: :toctree: {% for item in functions %} {{ item }} {%- endfor %} {%- endif %} {%- endblock %} {% block attributes %} {%- if attributes %} .. rubric:: {{ _('Attributes') }} .. autosummary:: :toctree: {% for item in attributes %} {{ item }} {%- endfor %} {%- endif %} {%- endblock %} {% block modules %} {%- if modules %} .. rubric:: {{ _('Modules') }} .. autosummary:: :toctree: :recursive: {% for item in modules %} {{ item }} {%- endfor %} {%- endif %} {%- endblock %} mpi4py-3.1.6/docs/source/usrman/_templates/layout.html000066400000000000000000000005731460670727200230600ustar00rootroot00000000000000{% extends "!layout.html" %} {% macro menu_genindex() -%} {%- endmacro %} {% block menu %} {{ super() }} {%- if builder == 'html' %} {{ menu_genindex() }} {%- endif %} {%- if builder == 'dirhtml' %} {{ menu_genindex() }} {%- endif %} {% endblock %} mpi4py-3.1.6/docs/source/usrman/apidoc.py000066400000000000000000000313371460670727200203330ustar00rootroot00000000000000import os import sys import inspect from textwrap import dedent from textwrap import indent def is_function(obj): return inspect.isbuiltin(obj) or type(obj) is type(ord) def is_method(obj): return inspect.ismethoddescriptor(obj) or type(obj) in ( type(str.index), type(str.__add__), type(str.__new__), ) def is_classmethod(obj): return inspect.isbuiltin(obj) or type(obj).__name__ in ( 'classmethod', 'classmethod_descriptor', ) def is_staticmethod(obj): return type(obj).__name__ in ( 'staticmethod', ) def is_datadescr(obj): return inspect.isdatadescriptor(obj) and not hasattr(obj, 'fget') def is_property(obj): return inspect.isdatadescriptor(obj) and hasattr(obj, 'fget') def is_class(obj): return inspect.isclass(obj) or type(obj) is type(int) class Lines(list): INDENT = " " * 4 level = 0 @property def add(self): return self @add.setter def add(self, lines): if lines is None: return if isinstance(lines, str): lines = dedent(lines).strip().split("\n") indent = self.INDENT * self.level for line in lines: self.append(indent + line) def signature(obj): doc = obj.__doc__ sig = doc.split('\n', 1)[0].split('.', 1)[-1] return sig def docstring(obj): doc = obj.__doc__ doc = doc.split('\n', 1)[1] doc = dedent(doc).strip() doc = f'"""{doc}"""' doc = indent(doc, Lines.INDENT) return doc def apidoc_constant(constant): name, value = constant typename = type(value).__name__ init = f"_def({typename}, '{name}')" doc = f"#: :class:`{typename}` ``{name}``" return f"{name}: {typename} = {init} {doc}\n" def apidoc_function(function): sig = signature(function) doc = docstring(function) body = Lines.INDENT + "..." return f"def {sig}:\n{doc}\n{body}\n" def apidoc_method(method): sig = signature(method) doc = docstring(method) body = Lines.INDENT + "..." return f"def {sig}:\n{doc}\n{body}\n" def apidoc_datadescr(datadescr, name=None): sig = signature(datadescr) doc = docstring(datadescr) name = sig.split(':')[0].strip() type = sig.split(':')[1].strip() sig = f"{name}(self) -> {type}" body = Lines.INDENT + "..." return f"@property\ndef {sig}:\n{doc}\n{body}\n" def apidoc_property(prop, name=None): sig = signature(prop.fget) name = name or prop.fget.__name__ type = sig.rsplit('->', 1)[-1].strip() sig = f"{name}(self) -> {type}" doc = f'"""{prop.__doc__}"""' doc = indent(doc, Lines.INDENT) body = Lines.INDENT + "..." return f"@property\ndef {sig}:\n{doc}\n{body}\n" def apidoc_constructor(cls, name='__init__'): init = (name == '__init__') argname = cls.__mro__[-2].__name__.lower() argtype = cls.__name__ initarg = f"{argname}: Optional[{argtype}] = None" selfarg = 'self' if init else 'cls' rettype = 'None' if init else argtype arglist = f"{selfarg}, {initarg}" sig = f"{name}({arglist}) -> {rettype}" ret = '...' if init else 'return super().__new__(cls)' body = Lines.INDENT + ret return f"def {sig}:\n{body}" def apidoc_class(cls, done=None): skip = { '__doc__', '__module__', '__weakref__', '__pyx_vtable__', '__lt__', '__le__', '__ge__', '__gt__', '__buffer__', } special = { '__len__': "__len__(self) -> int", '__bool__': "__bool__(self) -> bool", '__hash__': "__hash__(self) -> int", '__int__': "__int__(self) -> int", '__index__': "__int__(self) -> int", '__str__': "__str__(self) -> str", '__repr__': "__repr__(self) -> str", '__eq__': "__eq__(self, other: Any) -> bool", '__ne__': "__ne__(self, other: Any) -> bool", } constructor = { '__new__', '__init__', } override = OVERRIDE.get(cls.__name__, {}) done = set() if done is None else done lines = Lines() base = cls.__base__ if base is object: lines.add = f"class {cls.__name__}:" else: lines.add = f"class {cls.__name__}({base.__name__}):" lines.level += 1 doc = cls.__doc__ doc = dedent(doc).strip() if doc.startswith(f"{cls.__name__}("): doc = doc.split('\n', 1)[1].strip() lines.add = f'"""{doc}"""' for name in constructor: if name in override: done.update(constructor) lines.add = override[name] break for name in constructor: if name in done: break if name in cls.__dict__: done.update(constructor) lines.add = apidoc_constructor(cls, name) break if '__hash__' in cls.__dict__: if cls.__hash__ is None: done.add('__hash__') dct = cls.__dict__ keys = list(dct.keys()) for name in keys: if name in done: continue if name in skip: continue if name in override: done.add(name) lines.add = override[name] continue if name in special: done.add(name) sig = special[name] lines.add = f"def {sig}: ..." continue if name in constructor: done.update(constructor) lines.add = apidoc_constructor(cls) continue attr = getattr(cls, name) if is_method(attr): done.add(name) if name == attr.__name__: obj = dct[name] if is_classmethod(obj): lines.add = f"@classmethod" elif is_staticmethod(obj): lines.add = f"@staticmethod" lines.add = apidoc_method(attr) elif False: lines.add = f"{name} = {attr.__name__}" continue if is_datadescr(attr): done.add(name) lines.add = apidoc_datadescr(attr) continue if is_property(attr): done.add(name) lines.add = apidoc_property(attr, name) continue leftovers = [name for name in keys if name not in done and name not in skip] assert not leftovers, f"leftovers: {leftovers}" lines.level -= 1 return lines def apidoc_module(module, done=None): skip = { '__doc__', '__name__', '__loader__', '__spec__', '__file__', '__package__', '__builtins__', '__pyx_capi__', } done = set() if done is None else done lines = Lines() keys = list(module.__dict__.keys()) keys.sort(key=lambda name: name.startswith("_")) constants = [ (name, getattr(module, name)) for name in keys if all(( name not in done and name not in skip, isinstance(getattr(module, name), int), )) ] for attr in constants: name, value = attr done.add(name) if name in OVERRIDE: lines.add = OVERRIDE[name] else: lines.add = apidoc_constant((name, value)) if constants: lines.add = "" for name in keys: if name in done or name in skip: continue value = getattr(module, name) if is_class(value): done.add(name) lines.add = apidoc_class(value) lines.add = "" instances = [ (k, getattr(module, k)) for k in keys if all(( k not in done and k not in skip, type(getattr(module, k)) is value, )) ] for attrname, attrvalue in instances: done.add(attrname) lines.add = apidoc_constant((attrname, attrvalue)) if instances: lines.add = "" continue if is_function(value): done.add(name) if name == value.__name__: lines.add = apidoc_function(value) else: lines.add = f"{name} = {value.__name__}" continue lines.add = "" for name in keys: if name in done or name in skip: continue value = getattr(module, name) done.add(name) if name in OVERRIDE: lines.add = OVERRIDE[name] else: lines.add = apidoc_constant((name, value)) leftovers = [name for name in keys if name not in done and name not in skip] assert not leftovers, f"leftovers: {leftovers}" return lines IMPORTS = """ from __future__ import annotations """ HELPERS = """ class _Int(int): pass class Bottom(_Int): pass class InPlace(_Int): pass def _repr(obj): try: return obj._repr except AttributeError: return super(obj).__repr__() def _def(cls, name): if cls is int: cls = _Int obj = cls() if cls.__name__ in ('Pickle', 'memory'): return obj obj._repr = name if '__repr__' not in cls.__dict__: cls.__repr__ = _repr return obj """ OVERRIDE = { 'Exception': { '__new__':( "def __new__(cls, ierr: int = SUCCESS) -> Exception:\n" " return super().__new__(ierr)"), "__lt__": "def __lt__(self, other: int) -> bool: ...", "__le__": "def __le__(self, other: int) -> bool: ...", "__gt__": "def __gt__(self, other: int) -> bool: ...", "__ge__": "def __ge__(self, other: int) -> bool: ...", }, 'Info': { '__iter__': "def __iter__(self) -> Iterator[str]: ...", '__getitem__': "def __getitem__(self, item: str) -> str: ...", '__setitem__': "def __setitem__(self, item: str, value: str) -> None: ...", '__delitem__': "def __delitem__(self, item: str) -> None: ...", '__contains__': "def __contains__(self, value: str) -> bool: ...", }, 'Op': { '__call__': "def __call__(self, x: Any, y: Any) -> Any: ...", }, 'memory': { '__new__':( "def __new__(cls, buf: Buffer) -> memory:\n" " return super().__new__()"), '__getitem__': ( "def __getitem__(self, " "item: Union[int, slice]) " "-> Union[int, memory]: ..."), '__setitem__': ( "def __setitem__(self, " "item: Union[int, slice], " "value: Union[int, Buffer]) " "-> None: ..."), '__delitem__': None, }, 'Pickle': { '__init__': """ def __init__(self, dumps: Optional[Callable[[Any, int], bytes]] = None, loads: Optional[Callable[[Buffer], Any]] = None, protocol: Optional[int] = None, ) -> None: ... """ }, '_typedict': "_typedict: Dict[str, Datatype] = {}", '_typedict_c': "_typedict_c: Dict[str, Datatype] = {}", '_typedict_f': "_typedict_f: Dict[str, Datatype] = {}", '_keyval_registry': None, } def apidoc_mpi4py_MPI(done=None): from mpi4py import MPI lines = Lines() lines.add = f'"""{MPI.__doc__}"""' lines.add = IMPORTS lines.add = "" lines.add = HELPERS lines.add = "" lines.add = apidoc_module(MPI) return lines def generate(filename): dirname = os.path.dirname(filename) os.makedirs(dirname, exist_ok=True) with open(filename, 'w') as f: for line in apidoc_mpi4py_MPI(): print(line, file=f) def load_module(filename, name=None): if name is None: name, _ = os.path.splitext( os.path.basename(filename)) module = type(sys)(name) module.__file__ = filename module.__package__ = name.rsplit('.', 1)[0] with open(filename) as f: exec(f.read(), module.__dict__) return module _sys_modules = {} def replace_module(module): name = module.__name__ assert name not in _sys_modules _sys_modules[name] = sys.modules[name] sys.modules[name] = module def restore_module(module): name = module.__name__ assert name in _sys_modules sys.modules[name] = _sys_modules[name] def annotate(dest, source): try: dest.__annotations__ = source.__annotations__ except Exception: pass if isinstance(dest, type): for name in dest.__dict__.keys(): if hasattr(source, name): obj = getattr(dest, name) annotate(obj, getattr(source, name)) if isinstance(dest, type(sys)): for name in dir(dest): if hasattr(source, name): obj = getattr(dest, name) mod = getattr(obj, '__module__', None) if dest.__name__ == mod: annotate(obj, getattr(source, name)) OUTDIR = 'reference' if __name__ == '__main__': generate(os.path.join(OUTDIR, 'mpi4py.MPI.py')) mpi4py-3.1.6/docs/source/usrman/appendix.rst000066400000000000000000000076741460670727200210730ustar00rootroot00000000000000Appendix ======== .. _python-mpi: MPI-enabled Python interpreter ------------------------------ .. warning:: These days it is no longer required to use the MPI-enabled Python interpreter in most cases, and, therefore, it is not built by default anymore because it is too difficult to reliably build a Python interpreter across different distributions. If you know that you still **really** need it, see below on how to use the ``build_exe`` and ``install_exe`` commands. Some MPI-1 implementations (notably, MPICH 1) **do require** the actual command line arguments to be passed at the time :c:func:`MPI_Init()` is called. In this case, you will need to use a re-built, MPI-enabled, Python interpreter binary executable. A basic implementation (targeting Python 2.X) of what is required is shown below: .. sourcecode:: c #include #include int main(int argc, char *argv[]) { int status, flag; MPI_Init(&argc, &argv); status = Py_Main(argc, argv); MPI_Finalized(&flag); if (!flag) MPI_Finalize(); return status; } The source code above is straightforward; compiling it should also be. However, the linking step is more tricky: special flags have to be passed to the linker depending on your platform. In order to alleviate you for such low-level details, *MPI for Python* provides some pure-distutils based support to build and install an MPI-enabled Python interpreter executable:: $ cd mpi4py-X.X.X $ python setup.py build_exe [--mpi=|--mpicc=/path/to/mpicc] $ [sudo] python setup.py install_exe [--install-dir=$HOME/bin] After the above steps you should have the MPI-enabled interpreter installed as :file:`{prefix}/bin/python{X}.{X}-mpi` (or :file:`$HOME/bin/python{X}.{X}-mpi`). Assuming that :file:`{prefix}/bin` (or :file:`$HOME/bin`) is listed on your :envvar:`PATH`, you should be able to enter your MPI-enabled Python interactively, for example:: $ python2.7-mpi Python 2.7.8 (default, Nov 10 2014, 08:19:18) [GCC 4.9.2 20141101 (Red Hat 4.9.2-1)] on linux2 Type "help", "copyright", "credits" or "license" for more information. >>> import sys >>> sys.executable '/usr/bin/python2.7-mpi' >>> .. _building-mpi: Building MPI from sources ------------------------- In the list below you have some executive instructions for building some of the open-source MPI implementations out there with support for shared/dynamic libraries on POSIX environments. + *MPICH* :: $ tar -zxf mpich-X.X.X.tar.gz $ cd mpich-X.X.X $ ./configure --enable-shared --prefix=/usr/local/mpich $ make $ make install + *Open MPI* :: $ tar -zxf openmpi-X.X.X tar.gz $ cd openmpi-X.X.X $ ./configure --prefix=/usr/local/openmpi $ make all $ make install + *MPICH 1* :: $ tar -zxf mpich-X.X.X.tar.gz $ cd mpich-X.X.X $ ./configure --enable-sharedlib --prefix=/usr/local/mpich1 $ make $ make install Perhaps you will need to set the :envvar:`LD_LIBRARY_PATH` environment variable (using :command:`export`, :command:`setenv` or what applies to your system) pointing to the directory containing the MPI libraries . In case of getting runtime linking errors when running MPI programs, the following lines can be added to the user login shell script (:file:`.profile`, :file:`.bashrc`, etc.). - *MPICH* :: MPI_DIR=/usr/local/mpich export LD_LIBRARY_PATH=$MPI_DIR/lib:$LD_LIBRARY_PATH - *Open MPI* :: MPI_DIR=/usr/local/openmpi export LD_LIBRARY_PATH=$MPI_DIR/lib:$LD_LIBRARY_PATH - *MPICH 1* :: MPI_DIR=/usr/local/mpich1 export LD_LIBRARY_PATH=$MPI_DIR/lib/shared:$LD_LIBRARY_PATH: export MPICH_USE_SHLIB=yes .. warning:: MPICH 1 support for dynamic libraries is not completely transparent. Users should set the environment variable :envvar:`MPICH_USE_SHLIB` to ``yes`` in order to avoid link problems when using the :program:`mpicc` compiler wrapper. mpi4py-3.1.6/docs/source/usrman/citing.rst000066400000000000000000000016671460670727200205340ustar00rootroot00000000000000Citation ======== If MPI for Python been significant to a project that leads to an academic publication, please acknowledge that fact by citing the project. * L. Dalcin and Y.-L. L. Fang, *mpi4py: Status Update After 12 Years of Development*, Computing in Science & Engineering, 23(4):47-54, 2021. https://doi.org/10.1109/MCSE.2021.3083216 * L. Dalcin, P. Kler, R. Paz, and A. Cosimo, *Parallel Distributed Computing using Python*, Advances in Water Resources, 34(9):1124-1139, 2011. https://doi.org/10.1016/j.advwatres.2011.04.013 * L. Dalcin, R. Paz, M. Storti, and J. D'Elia, *MPI for Python: performance improvements and MPI-2 extensions*, Journal of Parallel and Distributed Computing, 68(5):655-662, 2008. https://doi.org/10.1016/j.jpdc.2007.09.005 * L. Dalcin, R. Paz, and M. Storti, *MPI for Python*, Journal of Parallel and Distributed Computing, 65(9):1108-1115, 2005. https://doi.org/10.1016/j.jpdc.2005.03.010 mpi4py-3.1.6/docs/source/usrman/conf.py000066400000000000000000000210361460670727200200140ustar00rootroot00000000000000# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. import os import sys import typing import datetime import importlib sys.path.insert(0, os.path.abspath('.')) _today = datetime.datetime.now() # -- Project information ----------------------------------------------------- package = 'mpi4py' def pkg_version(): import re here = os.path.dirname(__file__) pardir = [os.path.pardir] * 3 topdir = os.path.join(here, *pardir) srcdir = os.path.join(topdir, 'src') with open(os.path.join(srcdir, 'mpi4py', '__init__.py')) as f: m = re.search(r"__version__\s*=\s*'(.*)'", f.read()) return m.groups()[0] project = 'MPI for Python' author = 'Lisandro Dalcin' copyright = f'{_today.year}, {author}' # The full version, including alpha/beta/rc tags release = pkg_version() version = release.rsplit('.', 1)[0] # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.intersphinx', 'sphinx.ext.napoleon', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] default_role = 'any' nitpicky = False nitpick_ignore = [ ('c:func', r'atexit'), ('envvar', r'MPICC'), ('py:mod', r'__worker__'), ('py:mod', r'pickle5'), ] nitpick_ignore_regex = [ (r'c:.*', r'MPI_.*'), (r'envvar', r'(LD_LIBRARY_)?PATH'), (r'envvar', r'(MPICH|OMPI|MPIEXEC)_.*'), ] # python_use_unqualified_type_names = True autodoc_typehints = 'description' autodoc_type_aliases = {} autodoc_mock_imports = [] autosummary_context = { 'synopsis': {}, 'autotype': {}, } intersphinx_mapping = { 'python': ('https://docs.python.org/3/', None), 'numpy': ('https://numpy.org/doc/stable/', None), } napoleon_preprocess_types = True try: import sphinx_rtd_theme if 'sphinx_rtd_theme' not in extensions: extensions.append('sphinx_rtd_theme') except ImportError: sphinx_rtd_theme = None def _setup_numpy_typing(): try: import numpy as np except ImportError: np = type(sys)('numpy') sys.modules[np.__name__] = np np.dtype = type('dtype', (), {}) try: import numpy.typing as npt except ImportError: npt = type(sys)('numpy.typing') np.typing = npt sys.modules[npt.__name__] = npt npt.__all__ = [] for attr in ['ArrayLike', 'DTypeLike']: setattr(npt, attr, typing.Any) npt.__all__.append(attr) autodoc_type_aliases.update({ 'dtype': 'numpy.dtype', 'ArrayLike': 'numpy.typing.ArrayLike', 'DTypeLike': 'numpy.typing.DTypeLike', }) def _patch_domain_python(): from sphinx.domains import python try: from numpy.typing import __all__ as numpy_types except ImportError: numpy_types = [] try: from mpi4py.typing import __all__ as mpi4py_types except ImportError: mpi4py_types = [] def make_xref(self, rolename, domain, target, *args, **kwargs): if target in ('True', 'False'): rolename = 'obj' elif target in numpy_types: rolename = 'data' elif target in mpi4py_types: rolename = 'data' return make_xref_orig(self, rolename, domain, target, *args, *kwargs) numpy_types = set(f'numpy.typing.{name}' for name in numpy_types) mpi4py_types = set(mpi4py_types) make_xref_orig = python.PyXrefMixin.make_xref python.PyXrefMixin.make_xref = make_xref def _patch_autosummary(): from sphinx.ext import autodoc from sphinx.ext import autosummary from sphinx.ext.autosummary import generate class ExceptionDocumenter(autodoc.ExceptionDocumenter): objtype = 'class' def get_documenter(app, obj, parent): if isinstance(obj, type) and issubclass(obj, BaseException): caller = sys._getframe().f_back.f_code.co_name if caller == 'generate_autosummary_content': if obj.__module__ == 'mpi4py.MPI': if obj.__name__ == 'Exception': return ExceptionDocumenter return autosummary.get_documenter(app, obj, parent) generate.get_documenter = get_documenter def setup(app): _setup_numpy_typing() _patch_domain_python() _patch_autosummary() try: from mpi4py import MPI except ImportError: autodoc_mock_imports.append('mpi4py') return sys_dwb = sys.dont_write_bytecode sys.dont_write_bytecode = True import apidoc sys.dont_write_bytecode = sys_dwb name = MPI.__name__ here = os.path.abspath(os.path.dirname(__file__)) outdir = os.path.join(here, apidoc.OUTDIR) source = os.path.join(outdir, f'{name}.py') getmtime = os.path.getmtime generate = ( not os.path.exists(source) or getmtime(source) < getmtime(MPI.__file__) or getmtime(source) < getmtime(apidoc.__file__) ) if generate: apidoc.generate(source) module = apidoc.load_module(source) apidoc.replace_module(module) for name in dir(module): attr = getattr(module, name) if isinstance(attr, type): if attr.__module__ == module.__name__: autodoc_type_aliases[name] = name synopsis = autosummary_context['synopsis'] synopsis[module.__name__] = module.__doc__.strip() autotype = autosummary_context['autotype'] autotype[module.Exception.__name__] = 'exception' modules = [ 'mpi4py', 'mpi4py.run', 'mpi4py.util.dtlib', 'mpi4py.util.pkl5', ] typing_overload = typing.overload typing.overload = lambda arg: arg for name in modules: mod = importlib.import_module(name) ann = apidoc.load_module(f'{mod.__file__}i', name) apidoc.annotate(mod, ann) typing.overload = typing_overload # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = ( 'sphinx_rtd_theme' if 'sphinx_rtd_theme' in extensions else 'default' ) if html_theme == 'default': html_copy_source = False if html_theme == 'sphinx_rtd_theme': html_theme_options = { 'analytics_id': 'UA-48837848-1', } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] if html_theme == 'sphinx_rtd_theme': html_css_files = [ 'css/custom.css', ] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = f'{package}-man' # -- Options for LaTeX output --------------------------------------------- # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', f'{package}.tex', project, author, 'howto'), ] latex_elements = { 'papersize': 'a4', } # -- Options for manual page output --------------------------------------- # (source start file, name, description, authors, manual section). man_pages = [ ('index', package, project, [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', package, project, author, package, f'{project}.', 'Miscellaneous'), ] # -- Options for Epub output ---------------------------------------------- # Output file base name for ePub builder. epub_basename = package mpi4py-3.1.6/docs/source/usrman/index.rst000066400000000000000000000021441460670727200203550ustar00rootroot00000000000000MPI for Python ============== .. only:: html or man :Author: Lisandro Dalcin :Contact: dalcinl@gmail.com :Date: |today| .. topic:: Abstract This document describes the *MPI for Python* package. *MPI for Python* provides Python bindings for the *Message Passing Interface* (MPI) standard, allowing Python applications to exploit multiple processors on workstations, clusters and supercomputers. This package builds on the MPI specification and provides an object oriented interface resembling the MPI-2 C++ bindings. It supports point-to-point (sends, receives) and collective (broadcasts, scatters, gathers) communication of any *picklable* Python object, as well as efficient communication of Python objects exposing the Python buffer interface (e.g. NumPy arrays and builtin bytes/array/memoryview objects). .. toctree:: :caption: Contents :maxdepth: 2 intro overview tutorial mpi4py mpi4py.MPI mpi4py.futures mpi4py.util mpi4py.run reference citing install appendix .. only:: html and not singlehtml * :ref:`genindex` mpi4py-3.1.6/docs/source/usrman/install.rst000066400000000000000000000136471460670727200207260ustar00rootroot00000000000000Installation ============ Requirements ------------ You need to have the following software properly installed in order to build *MPI for Python*: * A working MPI implementation, preferably supporting MPI-3 and built with shared/dynamic libraries. .. note:: If you want to build some MPI implementation from sources, check the instructions at :ref:`building-mpi` in the appendix. * Python 2.7, 3.5 or above. .. note:: Some MPI-1 implementations **do require** the actual command line arguments to be passed in :c:func:`MPI_Init()`. In this case, you will need to use a rebuilt, MPI-enabled, Python interpreter executable. *MPI for Python* has some support for alleviating you from this task. Check the instructions at :ref:`python-mpi` in the appendix. Using **pip** ------------- If you already have a working MPI (either if you installed it from sources or by using a pre-built package from your favourite GNU/Linux distribution) and the :program:`mpicc` compiler wrapper is on your search path, you can use :program:`pip`:: $ python -m pip install mpi4py .. note:: If the :program:`mpicc` compiler wrapper is not on your search path (or if it has a different name) you can use :program:`env` to pass the environment variable :envvar:`MPICC` providing the full path to the MPI compiler wrapper executable:: $ env MPICC=/path/to/mpicc python -m pip install mpi4py .. warning:: :program:`pip` keeps previouly built wheel files on its cache for future reuse. If you want to reinstall the :mod:`mpi4py` package using a different or updated MPI implementation, you have to either first remove the cached wheel file with:: $ python -m pip cache remove mpi4py or ask :program:`pip` to disable the cache:: $ python -m pip install --no-cache-dir mpi4py Using **distutils** ------------------- The *MPI for Python* package is available for download at the project website generously hosted by GitHub. You can use :program:`curl` or :program:`wget` to get a release tarball. * Using :program:`curl`:: $ curl -O https://github.com/mpi4py/mpi4py/releases/download/X.Y.Z/mpi4py-X.Y.Z.tar.gz * Using :program:`wget`:: $ wget https://github.com/mpi4py/mpi4py/releases/download/X.Y.Z/mpi4py-X.Y.Z.tar.gz After unpacking the release tarball:: $ tar -zxf mpi4py-X.Y.Z.tar.gz $ cd mpi4py-X.Y.Z the package is ready for building. *MPI for Python* uses a standard distutils-based build system. However, some distutils commands (like *build*) have additional options: .. cmdoption:: --mpicc= Lets you specify a special location or name for the :program:`mpicc` compiler wrapper. .. cmdoption:: --mpi= Lets you pass a section with MPI configuration within a special configuration file. .. cmdoption:: --configure Runs exhaustive tests for checking about missing MPI types, constants, and functions. This option should be passed in order to build *MPI for Python* against old MPI-1 or MPI-2 implementations, possibly providing a subset of MPI-3. If you use a MPI implementation providing a :program:`mpicc` compiler wrapper (e.g., MPICH, Open MPI), it will be used for compilation and linking. This is the preferred and easiest way of building *MPI for Python*. If :program:`mpicc` is located somewhere in your search path, simply run the *build* command:: $ python setup.py build If :program:`mpicc` is not in your search path or the compiler wrapper has a different name, you can run the *build* command specifying its location:: $ python setup.py build --mpicc=/where/you/have/mpicc Alternatively, you can provide all the relevant information about your MPI implementation by editing the file called :file:`mpi.cfg`. You can use the default section ``[mpi]`` or add a new, custom section, for example ``[other_mpi]`` (see the examples provided in the :file:`mpi.cfg` file as a starting point to write your own section):: [mpi] include_dirs = /usr/local/mpi/include libraries = mpi library_dirs = /usr/local/mpi/lib runtime_library_dirs = /usr/local/mpi/lib [other_mpi] include_dirs = /opt/mpi/include ... libraries = mpi ... library_dirs = /opt/mpi/lib ... runtime_library_dirs = /op/mpi/lib ... ... and then run the *build* command, perhaps specifying you custom configuration section:: $ python setup.py build --mpi=other_mpi After building, the package is ready for install. If you have root privileges (either by log-in as the root user of by using :command:`sudo`) and you want to install *MPI for Python* in your system for all users, just do:: $ python setup.py install The previous steps will install the :mod:`mpi4py` package at standard location :file:`{prefix}/lib/python{X}.{X}/site-packages`. If you do not have root privileges or you want to install *MPI for Python* for your private use, just do:: $ python setup.py install --user Testing ------- To quickly test the installation:: $ mpiexec -n 5 python -m mpi4py.bench helloworld Hello, World! I am process 0 of 5 on localhost. Hello, World! I am process 1 of 5 on localhost. Hello, World! I am process 2 of 5 on localhost. Hello, World! I am process 3 of 5 on localhost. Hello, World! I am process 4 of 5 on localhost. If you installed from source, issuing at the command line:: $ mpiexec -n 5 python demo/helloworld.py or (in the case of ancient MPI-1 implementations):: $ mpirun -np 5 python `pwd`/demo/helloworld.py will launch a five-process run of the Python interpreter and run the test script :file:`demo/helloworld.py` from the source distribution. You can also run all the *unittest* scripts:: $ mpiexec -n 5 python test/runtests.py or, if you have nose_ unit testing framework installed:: $ mpiexec -n 5 nosetests -w test .. _nose: https://nose.readthedocs.io/ or, if you have `py.test`_ unit testing framework installed:: $ mpiexec -n 5 py.test test/ .. _py.test: https://docs.pytest.org/ mpi4py-3.1.6/docs/source/usrman/intro.rst000066400000000000000000000231521460670727200204030ustar00rootroot00000000000000Introduction ============ Over the last years, high performance computing has become an affordable resource to many more researchers in the scientific community than ever before. The conjunction of quality open source software and commodity hardware strongly influenced the now widespread popularity of Beowulf_ class clusters and cluster of workstations. Among many parallel computational models, message-passing has proven to be an effective one. This paradigm is specially suited for (but not limited to) distributed memory architectures and is used in today's most demanding scientific and engineering application related to modeling, simulation, design, and signal processing. However, portable message-passing parallel programming used to be a nightmare in the past because of the many incompatible options developers were faced to. Fortunately, this situation definitely changed after the MPI Forum released its standard specification. High performance computing is traditionally associated with software development using compiled languages. However, in typical applications programs, only a small part of the code is time-critical enough to require the efficiency of compiled languages. The rest of the code is generally related to memory management, error handling, input/output, and user interaction, and those are usually the most error prone and time-consuming lines of code to write and debug in the whole development process. Interpreted high-level languages can be really advantageous for this kind of tasks. For implementing general-purpose numerical computations, MATLAB [#]_ is the dominant interpreted programming language. In the open source side, Octave and Scilab are well known, freely distributed software packages providing compatibility with the MATLAB language. In this work, we present MPI for Python, a new package enabling applications to exploit multiple processors using standard MPI "look and feel" in Python scripts. .. [#] MATLAB is a registered trademark of The MathWorks, Inc. What is MPI? ------------ MPI_, [mpi-using]_ [mpi-ref]_ the *Message Passing Interface*, is a standardized and portable message-passing system designed to function on a wide variety of parallel computers. The standard defines the syntax and semantics of library routines and allows users to write portable programs in the main scientific programming languages (Fortran, C, or C++). Since its release, the MPI specification [mpi-std1]_ [mpi-std2]_ has become the leading standard for message-passing libraries for parallel computers. Implementations are available from vendors of high-performance computers and from well known open source projects like MPICH_ [mpi-mpich]_ and `Open MPI`_ [mpi-openmpi]_. What is Python? --------------- Python_ is a modern, easy to learn, powerful programming language. It has efficient high-level data structures and a simple but effective approach to object-oriented programming with dynamic typing and dynamic binding. It supports modules and packages, which encourages program modularity and code reuse. Python's elegant syntax, together with its interpreted nature, make it an ideal language for scripting and rapid application development in many areas on most platforms. The Python interpreter and the extensive standard library are available in source or binary form without charge for all major platforms, and can be freely distributed. It is easily extended with new functions and data types implemented in C or C++. Python is also suitable as an extension language for customizable applications. Python is an ideal candidate for writing the higher-level parts of large-scale scientific applications [Hinsen97]_ and driving simulations in parallel architectures [Beazley97]_ like clusters of PC's or SMP's. Python codes are quickly developed, easily maintained, and can achieve a high degree of integration with other libraries written in compiled languages. Related Projects ---------------- As this work started and evolved, some ideas were borrowed from well known MPI and Python related open source projects from the Internet. * `OOMPI`_ + It has no relation with Python, but is an excellent object oriented approach to MPI. + It is a C++ class library specification layered on top of the C bindings that encapsulates MPI into a functional class hierarchy. + It provides a flexible and intuitive interface by adding some abstractions, like *Ports* and *Messages*, which enrich and simplify the syntax. * `Pypar`_ + Its interface is rather minimal. There is no support for communicators or process topologies. + It does not require the Python interpreter to be modified or recompiled, but does not permit interactive parallel runs. + General (*picklable*) Python objects of any type can be communicated. There is good support for numeric arrays, practically full MPI bandwidth can be achieved. * `pyMPI`_ + It rebuilds the Python interpreter providing a built-in module for message passing. It does permit interactive parallel runs, which are useful for learning and debugging. + It provides an interface suitable for basic parallel programing. There is not full support for defining new communicators or process topologies. + General (picklable) Python objects can be messaged between processors. There is not support for numeric arrays. * `Scientific Python`_ + It provides a collection of Python modules that are useful for scientific computing. + There is an interface to MPI and BSP (*Bulk Synchronous Parallel programming*). + The interface is simple but incomplete and does not resemble the MPI specification. There is support for numeric arrays. Additionally, we would like to mention some available tools for scientific computing and software development with Python. + `NumPy`_ is a package that provides array manipulation and computational capabilities similar to those found in IDL, MATLAB, or Octave. Using NumPy, it is possible to write many efficient numerical data processing applications directly in Python without using any C, C++ or Fortran code. + `SciPy`_ is an open source library of scientific tools for Python, gathering a variety of high level science and engineering modules together as a single package. It includes modules for graphics and plotting, optimization, integration, special functions, signal and image processing, genetic algorithms, ODE solvers, and others. + `Cython`_ is a language that makes writing C extensions for the Python language as easy as Python itself. The Cython language is very close to the Python language, but Cython additionally supports calling C functions and declaring C types on variables and class attributes. This allows the compiler to generate very efficient C code from Cython code. This makes Cython the ideal language for wrapping for external C libraries, and for fast C modules that speed up the execution of Python code. + `SWIG`_ is a software development tool that connects programs written in C and C++ with a variety of high-level programming languages like Perl, Tcl/Tk, Ruby and Python. Issuing header files to SWIG is the simplest approach to interfacing C/C++ libraries from a Python module. .. External Links .. .............. .. _MPI: https://www.mpi-forum.org/ .. _MPICH: https://www.mpich.org/ .. _Open MPI: https://www.open-mpi.org/ .. _Beowulf: https://www.beowulf.org/ .. _Python: https://www.python.org/ .. _NumPy: https://numpy.org/ .. _SciPy: https://scipy.org/ .. _Cython: https://cython.org/ .. _SWIG: http://www.swig.org/ .. _OOMPI: https://web.archive.org/web/20100614170656/http://www.osl.iu.edu/research/oompi/overview.php .. _Pypar: https://github.com/daleroberts/pypar .. _pyMPI: https://sourceforge.net/projects/pympi/ .. _Scientific Python: http://dirac.cnrs-orleans.fr/ScientificPython.html .. References .. .......... .. [mpi-std1] MPI Forum. MPI: A Message Passing Interface Standard. International Journal of Supercomputer Applications, volume 8, number 3-4, pages 159-416, 1994. .. [mpi-std2] MPI Forum. MPI: A Message Passing Interface Standard. High Performance Computing Applications, volume 12, number 1-2, pages 1-299, 1998. .. [mpi-using] William Gropp, Ewing Lusk, and Anthony Skjellum. Using MPI: portable parallel programming with the message-passing interface. MIT Press, 1994. .. [mpi-ref] Mark Snir, Steve Otto, Steven Huss-Lederman, David Walker, and Jack Dongarra. MPI - The Complete Reference, volume 1, The MPI Core. MIT Press, 2nd. edition, 1998. .. [mpi-mpich] W. Gropp, E. Lusk, N. Doss, and A. Skjellum. A high-performance, portable implementation of the MPI message passing interface standard. Parallel Computing, 22(6):789-828, September 1996. .. [mpi-openmpi] Edgar Gabriel, Graham E. Fagg, George Bosilca, Thara Angskun, Jack J. Dongarra, Jeffrey M. Squyres, Vishal Sahay, Prabhanjan Kambadur, Brian Barrett, Andrew Lumsdaine, Ralph H. Castain, David J. Daniel, Richard L. Graham, and Timothy S. Woodall. Open MPI: Goals, Concept, and Design of a Next Generation MPI Implementation. In Proceedings, 11th European PVM/MPI Users' Group Meeting, Budapest, Hungary, September 2004. .. [Hinsen97] Konrad Hinsen. The Molecular Modelling Toolkit: a case study of a large scientific application in Python. In Proceedings of the 6th International Python Conference, pages 29-35, San Jose, Ca., October 1997. .. [Beazley97] David M. Beazley and Peter S. Lomdahl. Feeding a large-scale physics application to Python. In Proceedings of the 6th International Python Conference, pages 21-29, San Jose, Ca., October 1997. mpi4py-3.1.6/docs/source/usrman/make.bat000066400000000000000000000014331460670727200201210ustar00rootroot00000000000000@ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set SOURCEDIR=. set BUILDDIR=_build if "%1" == "" goto help %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% :end popd mpi4py-3.1.6/docs/source/usrman/mpi4py.MPI.rst000066400000000000000000000133541460670727200211210ustar00rootroot00000000000000mpi4py.MPI ========== .. currentmodule:: mpi4py.MPI Classes ------- .. rubric:: Ancillary .. autosummary:: Datatype Status Request Prequest Grequest Op Group Info .. rubric:: Communication .. autosummary:: Comm Intracomm Topocomm Cartcomm Graphcomm Distgraphcomm Intercomm Message .. rubric:: One-sided operations .. autosummary:: Win .. rubric:: Input/Output .. autosummary:: File .. rubric:: Error handling .. autosummary:: Errhandler Exception .. rubric:: Auxiliary .. autosummary:: Pickle memory Functions --------- .. rubric:: Version inquiry .. autosummary:: Get_version Get_library_version .. rubric:: Initialization and finalization .. autosummary:: Init Init_thread Finalize Is_initialized Is_finalized Query_thread Is_thread_main .. rubric:: Memory allocation .. autosummary:: Alloc_mem Free_mem .. rubric:: Address manipulation .. autosummary:: Get_address Aint_add Aint_diff .. rubric:: Timer .. autosummary:: Wtick Wtime .. rubric:: Error handling .. autosummary:: Get_error_class Get_error_string Add_error_class Add_error_code Add_error_string .. rubric:: Dynamic process management .. autosummary:: Open_port Close_port Publish_name Unpublish_name Lookup_name .. rubric:: Miscellanea .. autosummary:: Attach_buffer Detach_buffer Compute_dims Get_processor_name Register_datarep Pcontrol .. rubric:: Utilities .. autosummary:: get_vendor .. _typecode _sizeof _addressof _handleof .. _comm_lock _comm_lock_table _commctx_inter _commctx_intra _set_abort_status Attributes ---------- .. autosummary:: UNDEFINED ANY_SOURCE ANY_TAG PROC_NULL ROOT BOTTOM IN_PLACE KEYVAL_INVALID TAG_UB HOST IO WTIME_IS_GLOBAL UNIVERSE_SIZE APPNUM LASTUSEDCODE WIN_BASE WIN_SIZE WIN_DISP_UNIT WIN_CREATE_FLAVOR WIN_FLAVOR WIN_MODEL SUCCESS ERR_LASTCODE ERR_COMM ERR_GROUP ERR_TYPE ERR_REQUEST ERR_OP ERR_BUFFER ERR_COUNT ERR_TAG ERR_RANK ERR_ROOT ERR_TRUNCATE ERR_IN_STATUS ERR_PENDING ERR_TOPOLOGY ERR_DIMS ERR_ARG ERR_OTHER ERR_UNKNOWN ERR_INTERN ERR_INFO ERR_FILE ERR_WIN ERR_KEYVAL ERR_INFO_KEY ERR_INFO_VALUE ERR_INFO_NOKEY ERR_ACCESS ERR_AMODE ERR_BAD_FILE ERR_FILE_EXISTS ERR_FILE_IN_USE ERR_NO_SPACE ERR_NO_SUCH_FILE ERR_IO ERR_READ_ONLY ERR_CONVERSION ERR_DUP_DATAREP ERR_UNSUPPORTED_DATAREP ERR_UNSUPPORTED_OPERATION ERR_NAME ERR_NO_MEM ERR_NOT_SAME ERR_PORT ERR_QUOTA ERR_SERVICE ERR_SPAWN ERR_BASE ERR_SIZE ERR_DISP ERR_ASSERT ERR_LOCKTYPE ERR_RMA_CONFLICT ERR_RMA_SYNC ERR_RMA_RANGE ERR_RMA_ATTACH ERR_RMA_SHARED ERR_RMA_FLAVOR ORDER_C ORDER_F ORDER_FORTRAN TYPECLASS_INTEGER TYPECLASS_REAL TYPECLASS_COMPLEX DISTRIBUTE_NONE DISTRIBUTE_BLOCK DISTRIBUTE_CYCLIC DISTRIBUTE_DFLT_DARG COMBINER_NAMED COMBINER_DUP COMBINER_CONTIGUOUS COMBINER_VECTOR COMBINER_HVECTOR COMBINER_INDEXED COMBINER_HINDEXED COMBINER_INDEXED_BLOCK COMBINER_HINDEXED_BLOCK COMBINER_STRUCT COMBINER_SUBARRAY COMBINER_DARRAY COMBINER_RESIZED COMBINER_F90_REAL COMBINER_F90_COMPLEX COMBINER_F90_INTEGER IDENT CONGRUENT SIMILAR UNEQUAL CART GRAPH DIST_GRAPH UNWEIGHTED WEIGHTS_EMPTY COMM_TYPE_SHARED BSEND_OVERHEAD WIN_FLAVOR_CREATE WIN_FLAVOR_ALLOCATE WIN_FLAVOR_DYNAMIC WIN_FLAVOR_SHARED WIN_SEPARATE WIN_UNIFIED MODE_NOCHECK MODE_NOSTORE MODE_NOPUT MODE_NOPRECEDE MODE_NOSUCCEED LOCK_EXCLUSIVE LOCK_SHARED MODE_RDONLY MODE_WRONLY MODE_RDWR MODE_CREATE MODE_EXCL MODE_DELETE_ON_CLOSE MODE_UNIQUE_OPEN MODE_SEQUENTIAL MODE_APPEND SEEK_SET SEEK_CUR SEEK_END DISPLACEMENT_CURRENT DISP_CUR THREAD_SINGLE THREAD_FUNNELED THREAD_SERIALIZED THREAD_MULTIPLE VERSION SUBVERSION MAX_PROCESSOR_NAME MAX_ERROR_STRING MAX_PORT_NAME MAX_INFO_KEY MAX_INFO_VAL MAX_OBJECT_NAME MAX_DATAREP_STRING MAX_LIBRARY_VERSION_STRING DATATYPE_NULL UB LB PACKED BYTE AINT OFFSET COUNT CHAR WCHAR SIGNED_CHAR SHORT INT LONG LONG_LONG UNSIGNED_CHAR UNSIGNED_SHORT UNSIGNED UNSIGNED_LONG UNSIGNED_LONG_LONG FLOAT DOUBLE LONG_DOUBLE C_BOOL INT8_T INT16_T INT32_T INT64_T UINT8_T UINT16_T UINT32_T UINT64_T C_COMPLEX C_FLOAT_COMPLEX C_DOUBLE_COMPLEX C_LONG_DOUBLE_COMPLEX CXX_BOOL CXX_FLOAT_COMPLEX CXX_DOUBLE_COMPLEX CXX_LONG_DOUBLE_COMPLEX SHORT_INT INT_INT TWOINT LONG_INT FLOAT_INT DOUBLE_INT LONG_DOUBLE_INT CHARACTER LOGICAL INTEGER REAL DOUBLE_PRECISION COMPLEX DOUBLE_COMPLEX LOGICAL1 LOGICAL2 LOGICAL4 LOGICAL8 INTEGER1 INTEGER2 INTEGER4 INTEGER8 INTEGER16 REAL2 REAL4 REAL8 REAL16 COMPLEX4 COMPLEX8 COMPLEX16 COMPLEX32 UNSIGNED_INT SIGNED_SHORT SIGNED_INT SIGNED_LONG SIGNED_LONG_LONG BOOL SINT8_T SINT16_T SINT32_T SINT64_T F_BOOL F_INT F_FLOAT F_DOUBLE F_COMPLEX F_FLOAT_COMPLEX F_DOUBLE_COMPLEX REQUEST_NULL MESSAGE_NULL MESSAGE_NO_PROC OP_NULL MAX MIN SUM PROD LAND BAND LOR BOR LXOR BXOR MAXLOC MINLOC REPLACE NO_OP GROUP_NULL GROUP_EMPTY INFO_NULL INFO_ENV ERRHANDLER_NULL ERRORS_RETURN ERRORS_ARE_FATAL COMM_NULL COMM_SELF COMM_WORLD WIN_NULL FILE_NULL pickle .. Local variables: .. fill-column: 79 .. End: mpi4py-3.1.6/docs/source/usrman/mpi4py.futures.rst000066400000000000000000000554571460670727200222030ustar00rootroot00000000000000mpi4py.futures ============== .. module:: mpi4py.futures :synopsis: Execute computations concurrently using MPI processes. .. versionadded:: 3.0.0 This package provides a high-level interface for asynchronously executing callables on a pool of worker processes using MPI for inter-process communication. concurrent.futures ------------------ The :mod:`mpi4py.futures` package is based on :mod:`concurrent.futures` from the Python standard library. More precisely, :mod:`mpi4py.futures` provides the :class:`MPIPoolExecutor` class as a concrete implementation of the abstract class :class:`~concurrent.futures.Executor`. The :meth:`~concurrent.futures.Executor.submit` interface schedules a callable to be executed asynchronously and returns a :class:`~concurrent.futures.Future` object representing the execution of the callable. :class:`~concurrent.futures.Future` instances can be queried for the call result or exception. Sets of :class:`~concurrent.futures.Future` instances can be passed to the :func:`~concurrent.futures.wait` and :func:`~concurrent.futures.as_completed` functions. .. note:: The :mod:`concurrent.futures` package was introduced in Python 3.2. A `backport `_ targeting Python 2.7 is available on `PyPI `_. The :mod:`mpi4py.futures` package uses :mod:`concurrent.futures` if available, either from the Python 3 standard library or the Python 2.7 backport if installed. Otherwise, :mod:`mpi4py.futures` uses a bundled copy of core functionality backported from Python 3.5 to work with Python 2.7. .. _futures-repo: https://github.com/agronholm/pythonfutures .. _futures-pypi: https://pypi.org/project/futures .. seealso:: Module :mod:`concurrent.futures` Documentation of the :mod:`concurrent.futures` standard module. MPIPoolExecutor --------------- The :class:`MPIPoolExecutor` class uses a pool of MPI processes to execute calls asynchronously. By performing computations in separate processes, it allows to side-step the :term:`global interpreter lock` but also means that only picklable objects can be executed and returned. The :mod:`__main__` module must be importable by worker processes, thus :class:`MPIPoolExecutor` instances may not work in the interactive interpreter. :class:`MPIPoolExecutor` takes advantage of the dynamic process management features introduced in the MPI-2 standard. In particular, the `MPI.Intracomm.Spawn` method of `MPI.COMM_SELF` is used in the master (or parent) process to spawn new worker (or child) processes running a Python interpreter. The master process uses a separate thread (one for each :class:`MPIPoolExecutor` instance) to communicate back and forth with the workers. The worker processes serve the execution of tasks in the main (and only) thread until they are signaled for completion. .. note:: The worker processes must import the main script in order to *unpickle* any callable defined in the :mod:`__main__` module and submitted from the master process. Furthermore, the callables may need access to other global variables. At the worker processes, :mod:`mpi4py.futures` executes the main script code (using the :mod:`runpy` module) under the :mod:`__worker__` namespace to define the :mod:`__main__` module. The :mod:`__main__` and :mod:`__worker__` modules are added to :data:`sys.modules` (both at the master and worker processes) to ensure proper *pickling* and *unpickling*. .. warning:: During the initial import phase at the workers, the main script cannot create and use new :class:`MPIPoolExecutor` instances. Otherwise, each worker would attempt to spawn a new pool of workers, leading to infinite recursion. :mod:`mpi4py.futures` detects such recursive attempts to spawn new workers and aborts the MPI execution environment. As the main script code is run under the :mod:`__worker__` namespace, the easiest way to avoid spawn recursion is using the idiom :code:`if __name__ == '__main__': ...` in the main script. .. class:: MPIPoolExecutor(max_workers=None, \ initializer=None, initargs=(), **kwargs) An :class:`~concurrent.futures.Executor` subclass that executes calls asynchronously using a pool of at most *max_workers* processes. If *max_workers* is `None` or not given, its value is determined from the :envvar:`MPI4PY_FUTURES_MAX_WORKERS` environment variable if set, or the MPI universe size if set, otherwise a single worker process is spawned. If *max_workers* is lower than or equal to ``0``, then a :exc:`ValueError` will be raised. *initializer* is an optional callable that is called at the start of each worker process before executing any tasks; *initargs* is a tuple of arguments passed to the initializer. If *initializer* raises an exception, all pending tasks and any attempt to submit new tasks to the pool will raise a :exc:`~concurrent.futures.BrokenExecutor` exception. Other parameters: * *python_exe*: Path to the Python interpreter executable used to spawn worker processes, otherwise :data:`sys.executable` is used. * *python_args*: :class:`list` or iterable with additional command line flags to pass to the Python executable. Command line flags determined from inspection of :data:`sys.flags`, :data:`sys.warnoptions` and :data:`sys._xoptions` in are passed unconditionally. * *mpi_info*: :class:`dict` or iterable yielding ``(key, value)`` pairs. These ``(key, value)`` pairs are passed (through an `MPI.Info` object) to the `MPI.Intracomm.Spawn` call used to spawn worker processes. This mechanism allows telling the MPI runtime system where and how to start the processes. Check the documentation of the backend MPI implementation about the set of keys it interprets and the corresponding format for values. * *globals*: :class:`dict` or iterable yielding ``(name, value)`` pairs to initialize the main module namespace in worker processes. * *main*: If set to `False`, do not import the :mod:`__main__` module in worker processes. Setting *main* to `False` prevents worker processes from accessing definitions in the parent :mod:`__main__` namespace. * *path*: :class:`list` or iterable with paths to append to :data:`sys.path` in worker processes to extend the :ref:`module search path `. * *wdir*: Path to set the current working directory in worker processes using :func:`os.chdir()`. The initial working directory is set by the MPI implementation. Quality MPI implementations should honor a ``wdir`` info key passed through *mpi_info*, although such feature is not mandatory. * *env*: :class:`dict` or iterable yielding ``(name, value)`` pairs with environment variables to update :data:`os.environ` in worker processes. The initial environment is set by the MPI implementation. MPI implementations may allow setting the initial environment through *mpi_info*, however such feature is not required nor recommended by the MPI standard. .. method:: submit(func, *args, **kwargs) Schedule the callable, *func*, to be executed as ``func(*args, **kwargs)`` and returns a :class:`~concurrent.futures.Future` object representing the execution of the callable. :: executor = MPIPoolExecutor(max_workers=1) future = executor.submit(pow, 321, 1234) print(future.result()) .. method:: map(func, *iterables, timeout=None, chunksize=1, **kwargs) Equivalent to :func:`map(func, *iterables) ` except *func* is executed asynchronously and several calls to *func* may be made concurrently, out-of-order, in separate processes. The returned iterator raises a :exc:`~concurrent.futures.TimeoutError` if :meth:`~iterator.__next__` is called and the result isn't available after *timeout* seconds from the original call to :meth:`~MPIPoolExecutor.map`. *timeout* can be an int or a float. If *timeout* is not specified or `None`, there is no limit to the wait time. If a call raises an exception, then that exception will be raised when its value is retrieved from the iterator. This method chops *iterables* into a number of chunks which it submits to the pool as separate tasks. The (approximate) size of these chunks can be specified by setting *chunksize* to a positive integer. For very long iterables, using a large value for *chunksize* can significantly improve performance compared to the default size of one. By default, the returned iterator yields results in-order, waiting for successive tasks to complete . This behavior can be changed by passing the keyword argument *unordered* as `True`, then the result iterator will yield a result as soon as any of the tasks complete. :: executor = MPIPoolExecutor(max_workers=3) for result in executor.map(pow, [2]*32, range(32)): print(result) .. method:: starmap(func, iterable, timeout=None, chunksize=1, **kwargs) Equivalent to :func:`itertools.starmap(func, iterable) `. Used instead of :meth:`~MPIPoolExecutor.map` when argument parameters are already grouped in tuples from a single iterable (the data has been "pre-zipped"). :func:`map(func, *iterable) ` is equivalent to :func:`starmap(func, zip(*iterable)) `. :: executor = MPIPoolExecutor(max_workers=3) iterable = ((2, n) for n in range(32)) for result in executor.starmap(pow, iterable): print(result) .. method:: shutdown(wait=True, cancel_futures=False) Signal the executor that it should free any resources that it is using when the currently pending futures are done executing. Calls to :meth:`~MPIPoolExecutor.submit` and :meth:`~MPIPoolExecutor.map` made after :meth:`~MPIPoolExecutor.shutdown` will raise :exc:`RuntimeError`. If *wait* is `True` then this method will not return until all the pending futures are done executing and the resources associated with the executor have been freed. If *wait* is `False` then this method will return immediately and the resources associated with the executor will be freed when all pending futures are done executing. Regardless of the value of *wait*, the entire Python program will not exit until all pending futures are done executing. If *cancel_futures* is `True`, this method will cancel all pending futures that the executor has not started running. Any futures that are completed or running won't be cancelled, regardless of the value of *cancel_futures*. You can avoid having to call this method explicitly if you use the :keyword:`with` statement, which will shutdown the executor instance (waiting as if :meth:`~MPIPoolExecutor.shutdown` were called with *wait* set to `True`). :: import time with MPIPoolExecutor(max_workers=1) as executor: future = executor.submit(time.sleep, 2) assert future.done() .. method:: bootup(wait=True) Signal the executor that it should allocate eagerly any required resources (in particular, MPI worker processes). If *wait* is `True`, then :meth:`~MPIPoolExecutor.bootup` will not return until the executor resources are ready to process submissions. Resources are automatically allocated in the first call to :meth:`~MPIPoolExecutor.submit`, thus calling :meth:`~MPIPoolExecutor.bootup` explicitly is seldom needed. .. envvar:: MPI4PY_FUTURES_MAX_WORKERS If the *max_workers* parameter to :class:`MPIPoolExecutor` is `None` or not given, the :envvar:`MPI4PY_FUTURES_MAX_WORKERS` environment variable provides fallback value for the maximum number of MPI worker processes to spawn. .. note:: As the master process uses a separate thread to perform MPI communication with the workers, the backend MPI implementation should provide support for `MPI.THREAD_MULTIPLE`. However, some popular MPI implementations do not support yet concurrent MPI calls from multiple threads. Additionally, users may decide to initialize MPI with a lower level of thread support. If the level of thread support in the backend MPI is less than `MPI.THREAD_MULTIPLE`, :mod:`mpi4py.futures` will use a global lock to serialize MPI calls. If the level of thread support is less than `MPI.THREAD_SERIALIZED`, :mod:`mpi4py.futures` will emit a :exc:`RuntimeWarning`. .. warning:: If the level of thread support in the backend MPI is less than `MPI.THREAD_SERIALIZED` (i.e, it is either `MPI.THREAD_SINGLE` or `MPI.THREAD_FUNNELED`), in theory :mod:`mpi4py.futures` cannot be used. Rather than raising an exception, :mod:`mpi4py.futures` emits a warning and takes a "cross-fingers" attitude to continue execution in the hope that serializing MPI calls with a global lock will actually work. MPICommExecutor --------------- Legacy MPI-1 implementations (as well as some vendor MPI-2 implementations) do not support the dynamic process management features introduced in the MPI-2 standard. Additionally, job schedulers and batch systems in supercomputing facilities may pose additional complications to applications using the :c:func:`MPI_Comm_spawn` routine. With these issues in mind, :mod:`mpi4py.futures` supports an additonal, more traditional, SPMD-like usage pattern requiring MPI-1 calls only. Python applications are started the usual way, e.g., using the :program:`mpiexec` command. Python code should make a collective call to the :class:`MPICommExecutor` context manager to partition the set of MPI processes within a MPI communicator in one master processes and many workers processes. The master process gets access to an :class:`MPIPoolExecutor` instance to submit tasks. Meanwhile, the worker process follow a different execution path and team-up to execute the tasks submitted from the master. Besides alleviating the lack of dynamic process managment features in legacy MPI-1 or partial MPI-2 implementations, the :class:`MPICommExecutor` context manager may be useful in classic MPI-based Python applications willing to take advantage of the simple, task-based, master/worker approach available in the :mod:`mpi4py.futures` package. .. class:: MPICommExecutor(comm=None, root=0) Context manager for :class:`MPIPoolExecutor`. This context manager splits a MPI (intra)communicator *comm* (defaults to `MPI.COMM_WORLD` if not provided or `None`) in two disjoint sets: a single master process (with rank *root* in *comm*) and the remaining worker processes. These sets are then connected through an intercommunicator. The target of the :keyword:`with` statement is assigned either an :class:`MPIPoolExecutor` instance (at the master) or `None` (at the workers). :: from mpi4py import MPI from mpi4py.futures import MPICommExecutor with MPICommExecutor(MPI.COMM_WORLD, root=0) as executor: if executor is not None: future = executor.submit(abs, -42) assert future.result() == 42 answer = set(executor.map(abs, [-42, 42])) assert answer == {42} .. warning:: If :class:`MPICommExecutor` is passed a communicator of size one (e.g., `MPI.COMM_SELF`), then the executor instace assigned to the target of the :keyword:`with` statement will execute all submitted tasks in a single worker thread, thus ensuring that task execution still progress asynchronously. However, the :term:`GIL` will prevent the main and worker threads from running concurrently in multicore processors. Moreover, the thread context switching may harm noticeably the performance of CPU-bound tasks. In case of I/O-bound tasks, the :term:`GIL` is not usually an issue, however, as a single worker thread is used, it progress one task at a time. We advice against using :class:`MPICommExecutor` with communicators of size one and suggest refactoring your code to use instead a :class:`~concurrent.futures.ThreadPoolExecutor`. Command line ------------ Recalling the issues related to the lack of support for dynamic process managment features in MPI implementations, :mod:`mpi4py.futures` supports an alternative usage pattern where Python code (either from scripts, modules, or zip files) is run under command line control of the :mod:`mpi4py.futures` package by passing :samp:`-m mpi4py.futures` to the :program:`python` executable. The ``mpi4py.futures`` invocation should be passed a *pyfile* path to a script (or a zipfile/directory containing a :file:`__main__.py` file). Additionally, ``mpi4py.futures`` accepts :samp:`-m {mod}` to execute a module named *mod*, :samp:`-c {cmd}` to execute a command string *cmd*, or even :samp:`-` to read commands from standard input (:data:`sys.stdin`). Summarizing, :samp:`mpi4py.futures` can be invoked in the following ways: * :samp:`$ mpiexec -n {numprocs} python -m mpi4py.futures {pyfile} [arg] ...` * :samp:`$ mpiexec -n {numprocs} python -m mpi4py.futures -m {mod} [arg] ...` * :samp:`$ mpiexec -n {numprocs} python -m mpi4py.futures -c {cmd} [arg] ...` * :samp:`$ mpiexec -n {numprocs} python -m mpi4py.futures - [arg] ...` Before starting the main script execution, :mod:`mpi4py.futures` splits `MPI.COMM_WORLD` in one master (the process with rank 0 in `MPI.COMM_WORLD`) and *numprocs - 1* workers and connects them through an MPI intercommunicator. Afterwards, the master process proceeds with the execution of the user script code, which eventually creates :class:`MPIPoolExecutor` instances to submit tasks. Meanwhile, the worker processes follow a different execution path to serve the master. Upon successful termination of the main script at the master, the entire MPI execution environment exists gracefully. In case of any unhandled exception in the main script, the master process calls ``MPI.COMM_WORLD.Abort(1)`` to prevent deadlocks and force termination of entire MPI execution environment. .. warning:: Running scripts under command line control of :mod:`mpi4py.futures` is quite similar to executing a single-process application that spawn additional workers as required. However, there is a very important difference users should be aware of. All :class:`~MPIPoolExecutor` instances created at the master will share the pool of workers. Tasks submitted at the master from many different executors will be scheduled for execution in random order as soon as a worker is idle. Any executor can easily starve all the workers (e.g., by calling :func:`MPIPoolExecutor.map` with long iterables). If that ever happens, submissions from other executors will not be serviced until free workers are available. .. seealso:: :ref:`python:using-on-cmdline` Documentation on Python command line interface. Examples -------- The following :file:`julia.py` script computes the `Julia set`_ and dumps an image to disk in binary `PGM`_ format. The code starts by importing :class:`MPIPoolExecutor` from the :mod:`mpi4py.futures` package. Next, some global constants and functions implement the computation of the Julia set. The computations are protected with the standard :code:`if __name__ == '__main__': ...` idiom. The image is computed by whole scanlines submitting all these tasks at once using the :class:`~MPIPoolExecutor.map` method. The result iterator yields scanlines in-order as the tasks complete. Finally, each scanline is dumped to disk. .. _`Julia set`: https://en.wikipedia.org/wiki/Julia_set .. _`PGM`: http://netpbm.sourceforge.net/doc/pgm.html .. code-block:: python :name: julia-py :caption: :file:`julia.py` :emphasize-lines: 1,26,28,29 :linenos: from mpi4py.futures import MPIPoolExecutor x0, x1, w = -2.0, +2.0, 640*2 y0, y1, h = -1.5, +1.5, 480*2 dx = (x1 - x0) / w dy = (y1 - y0) / h c = complex(0, 0.65) def julia(x, y): z = complex(x, y) n = 255 while abs(z) < 3 and n > 1: z = z**2 + c n -= 1 return n def julia_line(k): line = bytearray(w) y = y1 - k * dy for j in range(w): x = x0 + j * dx line[j] = julia(x, y) return line if __name__ == '__main__': with MPIPoolExecutor() as executor: image = executor.map(julia_line, range(h)) with open('julia.pgm', 'wb') as f: f.write(b'P5 %d %d %d\n' % (w, h, 255)) for line in image: f.write(line) The recommended way to execute the script is by using the :program:`mpiexec` command specifying one MPI process (master) and (optional but recommended) the desired MPI universe size, which determines the number of additional dynamically spawned processes (workers). The MPI universe size is provided either by a batch system or set by the user via command-line arguments to :program:`mpiexec` or environment variables. Below we provide examples for MPICH and Open MPI implementations [#]_. In all of these examples, the :program:`mpiexec` command launches a single master process running the Python interpreter and executing the main script. When required, :mod:`mpi4py.futures` spawns the pool of 16 worker processes. The master submits tasks to the workers and waits for the results. The workers receive incoming tasks, execute them, and send back the results to the master. When using MPICH implementation or its derivatives based on the Hydra process manager, users can set the MPI universe size via the ``-usize`` argument to :program:`mpiexec`:: $ mpiexec -n 1 -usize 17 python julia.py or, alternatively, by setting the :envvar:`MPIEXEC_UNIVERSE_SIZE` environment variable:: $ MPIEXEC_UNIVERSE_SIZE=17 mpiexec -n 1 python julia.py In the Open MPI implementation, the MPI universe size can be set via the ``-host`` argument to :program:`mpiexec`:: $ mpiexec -n 1 -host :17 python julia.py Another way to specify the number of workers is to use the :mod:`mpi4py.futures`-specific environment variable :envvar:`MPI4PY_FUTURES_MAX_WORKERS`:: $ MPI4PY_FUTURES_MAX_WORKERS=16 mpiexec -n 1 python julia.py Note that in this case, the MPI universe size is ignored. Alternatively, users may decide to execute the script in a more traditional way, that is, all the MPI processes are started at once. The user script is run under command-line control of :mod:`mpi4py.futures` passing the :ref:`-m ` flag to the :program:`python` executable:: $ mpiexec -n 17 python -m mpi4py.futures julia.py As explained previously, the 17 processes are partitioned in one master and 16 workers. The master process executes the main script while the workers execute the tasks submitted by the master. .. [#] When using an MPI implementation other than MPICH or Open MPI, please check the documentation of the implementation and/or batch system for the ways to specify the desired MPI universe size. .. glossary:: GIL See :term:`global interpreter lock`. .. Local variables: .. fill-column: 79 .. End: mpi4py-3.1.6/docs/source/usrman/mpi4py.rst000066400000000000000000000134011460670727200204660ustar00rootroot00000000000000mpi4py ====== .. automodule:: mpi4py :synopsis: The MPI for Python package. Runtime configuration options ----------------------------- .. data:: mpi4py.rc This object has attributes exposing runtime configuration options that become effective at import time of the :mod:`~mpi4py.MPI` module. .. rubric:: Attributes Summary .. table:: :widths: grid ============== ========================================== `initialize` Automatic MPI initialization at import `threads` Request initialization with thread support `thread_level` Level of thread support to request `finalize` Automatic MPI finalization at exit `fast_reduce` Use tree-based reductions for objects `recv_mprobe` Use matched probes to receive objects `errors` Error handling policy ============== ========================================== .. rubric:: Attributes Documentation .. attribute:: mpi4py.rc.initialize Automatic MPI initialization at import. :type: :class:`bool` :default: :obj:`True` .. seealso:: :envvar:`MPI4PY_RC_INITIALIZE` .. attribute:: mpi4py.rc.threads Request initialization with thread support. :type: :class:`bool` :default: :obj:`True` .. seealso:: :envvar:`MPI4PY_RC_THREADS` .. attribute:: mpi4py.rc.thread_level Level of thread support to request. :type: :class:`str` :default: ``"multiple"`` :choices: ``"multiple"``, ``"serialized"``, ``"funneled"``, ``"single"`` .. seealso:: :envvar:`MPI4PY_RC_THREAD_LEVEL` .. attribute:: mpi4py.rc.finalize Automatic MPI finalization at exit. :type: :obj:`None` or :class:`bool` :default: :obj:`None` .. seealso:: :envvar:`MPI4PY_RC_FINALIZE` .. attribute:: mpi4py.rc.fast_reduce Use tree-based reductions for objects. :type: :class:`bool` :default: :obj:`True` .. seealso:: :envvar:`MPI4PY_RC_FAST_REDUCE` .. attribute:: mpi4py.rc.recv_mprobe Use matched probes to receive objects. :type: :class:`bool` :default: :obj:`True` .. seealso:: :envvar:`MPI4PY_RC_RECV_MPROBE` .. attribute:: mpi4py.rc.errors Error handling policy. :type: :class:`str` :default: ``"exception"`` :choices: ``"exception"``, ``"default"``, ``"fatal"`` .. seealso:: :envvar:`MPI4PY_RC_ERRORS` .. rubric:: Example MPI for Python features automatic initialization and finalization of the MPI execution environment. By using the `mpi4py.rc` object, MPI initialization and finalization can be handled programatically:: import mpi4py mpi4py.rc.initialize = False # do not initialize MPI automatically mpi4py.rc.finalize = False # do not finalize MPI automatically from mpi4py import MPI # import the 'MPI' module MPI.Init() # manual initialization of the MPI environment ... # your finest code here ... MPI.Finalize() # manual finalization of the MPI environment Environment variables --------------------- The following environment variables override the corresponding attributes of the :data:`mpi4py.rc` and :data:`MPI.pickle` objects at import time of the :mod:`~mpi4py.MPI` module. .. note:: For variables of boolean type, accepted values are ``0`` and ``1`` (interpreted as :obj:`False` and :obj:`True`, respectively), and strings specifying a `YAML boolean`_ value (case-insensitive). .. _YAML boolean: https://yaml.org/type/bool.html .. envvar:: MPI4PY_RC_INITIALIZE :type: :class:`bool` :default: :obj:`True` Whether to automatically initialize MPI at import time of the :mod:`mpi4py.MPI` module. .. seealso:: :attr:`mpi4py.rc.initialize` .. versionadded:: 3.1.0 .. envvar:: MPI4PY_RC_FINALIZE :type: :obj:`None` | :class:`bool` :default: :obj:`None` :choices: :obj:`None`, :obj:`True`, :obj:`False` Whether to automatically finalize MPI at exit time of the Python process. .. seealso:: :attr:`mpi4py.rc.finalize` .. versionadded:: 3.1.0 .. envvar:: MPI4PY_RC_THREADS :type: :class:`bool` :default: :obj:`True` Whether to initialize MPI with thread support. .. seealso:: :attr:`mpi4py.rc.threads` .. versionadded:: 3.1.0 .. envvar:: MPI4PY_RC_THREAD_LEVEL :default: ``"multiple"`` :choices: ``"single"``, ``"funneled"``, ``"serialized"``, ``"multiple"`` The level of required thread support. .. seealso:: :attr:`mpi4py.rc.thread_level` .. versionadded:: 3.1.0 .. envvar:: MPI4PY_RC_FAST_REDUCE :type: :class:`bool` :default: :obj:`True` Whether to use tree-based reductions for objects. .. seealso:: :attr:`mpi4py.rc.fast_reduce` .. versionadded:: 3.1.0 .. envvar:: MPI4PY_RC_RECV_MPROBE :type: :class:`bool` :default: :obj:`True` Whether to use matched probes to receive objects. .. seealso:: :attr:`mpi4py.rc.recv_mprobe` .. envvar:: MPI4PY_RC_ERRORS :default: ``"exception"`` :choices: ``"exception"``, ``"default"``, ``"fatal"`` Controls default MPI error handling policy. .. seealso:: :attr:`mpi4py.rc.errors` .. versionadded:: 3.1.0 .. envvar:: MPI4PY_PICKLE_PROTOCOL :type: :class:`int` :default: :data:`pickle.HIGHEST_PROTOCOL` Controls the default pickle protocol to use when communicating Python objects. .. seealso:: :attr:`~mpi4py.MPI.Pickle.PROTOCOL` attribute of the :data:`MPI.pickle` object within the :mod:`~mpi4py.MPI` module. .. versionadded:: 3.1.0 .. envvar:: MPI4PY_PICKLE_THRESHOLD :type: :class:`int` :default: ``262144`` Controls the default buffer size threshold for switching from in-band to out-of-band buffer handling when using pickle protocol version 5 or higher. .. seealso:: Module :mod:`mpi4py.util.pkl5`. .. versionadded:: 3.1.2 Miscellaneous functions ----------------------- .. autofunction:: mpi4py.profile .. autofunction:: mpi4py.get_config .. autofunction:: mpi4py.get_include .. Local variables: .. fill-column: 79 .. End: mpi4py-3.1.6/docs/source/usrman/mpi4py.run.rst000066400000000000000000000105071460670727200212750ustar00rootroot00000000000000mpi4py.run ========== .. module:: mpi4py.run :synopsis: Run Python code using ``-m mpi4py``. .. versionadded:: 3.0.0 At import time, :mod:`mpi4py` initializes the MPI execution environment calling :c:func:`MPI_Init_thread` and installs an exit hook to automatically call :c:func:`MPI_Finalize` just before the Python process terminates. Additionally, :mod:`mpi4py` overrides the default `ERRORS_ARE_FATAL` error handler in favor of `ERRORS_RETURN`, which allows translating MPI errors in Python exceptions. These departures from standard MPI behavior may be controversial, but are quite convenient within the highly dynamic Python programming environment. Third-party code using :mod:`mpi4py` can just ``from mpi4py import MPI`` and perform MPI calls without the tedious initialization/finalization handling. MPI errors, once translated automatically to Python exceptions, can be dealt with the common :keyword:`try`...\ :keyword:`except`...\ :keyword:`finally` clauses; unhandled MPI exceptions will print a traceback which helps in locating problems in source code. Unfortunately, the interplay of automatic MPI finalization and unhandled exceptions may lead to deadlocks. In unattended runs, these deadlocks will drain the battery of your laptop, or burn precious allocation hours in your supercomputing facility. Consider the following snippet of Python code. Assume this code is stored in a standard Python script file and run with :command:`mpiexec` in two or more processes. :: from mpi4py import MPI assert MPI.COMM_WORLD.Get_size() > 1 rank = MPI.COMM_WORLD.Get_rank() if rank == 0: 1/0 MPI.COMM_WORLD.send(None, dest=1, tag=42) elif rank == 1: MPI.COMM_WORLD.recv(source=0, tag=42) Process 0 raises `ZeroDivisionError` exception before performing a send call to process 1. As the exception is not handled, the Python interpreter running in process 0 will proceed to exit with non-zero status. However, as :mod:`mpi4py` installed a finalizer hook to call :c:func:`MPI_Finalize` before exit, process 0 will block waiting for other processes to also enter the :c:func:`MPI_Finalize` call. Meanwhile, process 1 will block waiting for a message to arrive from process 0, thus never reaching to :c:func:`MPI_Finalize`. The whole MPI execution environment is irremediably in a deadlock state. To alleviate this issue, :mod:`mpi4py` offers a simple, alternative command line execution mechanism based on using the :ref:`-m ` flag and implemented with the :mod:`runpy` module. To use this features, Python code should be run passing ``-m mpi4py`` in the command line invoking the Python interpreter. In case of unhandled exceptions, the finalizer hook will call :c:func:`MPI_Abort` on the :c:data:`MPI_COMM_WORLD` communicator, thus effectively aborting the MPI execution environment. .. warning:: When a process is forced to abort, resources (e.g. open files) are not cleaned-up and any registered finalizers (either with the :mod:`atexit` module, the Python C/API function :c:func:`Py_AtExit()`, or even the C standard library function :c:func:`atexit`) will not be executed. Thus, aborting execution is an extremely impolite way of ensuring process termination. However, MPI provides no other mechanism to recover from a deadlock state. Interface options ----------------- The use of ``-m mpi4py`` to execute Python code on the command line resembles that of the Python interpreter. * :samp:`mpiexec -n {numprocs} python -m mpi4py {pyfile} [arg] ...` * :samp:`mpiexec -n {numprocs} python -m mpi4py -m {mod} [arg] ...` * :samp:`mpiexec -n {numprocs} python -m mpi4py -c {cmd} [arg] ...` * :samp:`mpiexec -n {numprocs} python -m mpi4py - [arg] ...` .. describe:: Execute the Python code contained in *pyfile*, which must be a filesystem path referring to either a Python file, a directory containing a :file:`__main__.py` file, or a zipfile containing a :file:`__main__.py` file. .. cmdoption:: -m Search :data:`sys.path` for the named module *mod* and execute its contents. .. cmdoption:: -c Execute the Python code in the *cmd* string command. .. describe:: - Read commands from standard input (:data:`sys.stdin`). .. seealso:: :ref:`python:using-on-cmdline` Documentation on Python command line interface. .. Local variables: .. fill-column: 79 .. End: mpi4py-3.1.6/docs/source/usrman/mpi4py.util.dtlib.rst000066400000000000000000000006621460670727200225440ustar00rootroot00000000000000mpi4py.util.dtlib ----------------- .. module:: mpi4py.util.dtlib :synopsis: Convert NumPy and MPI datatypes. .. versionadded:: 3.1.0 The :mod:`mpi4py.util.dtlib` module provides converter routines between NumPy and MPI datatypes. .. autofunction:: from_numpy_dtype :param dtype: NumPy dtype-like object. .. autofunction:: to_numpy_dtype :param datatype: MPI datatype. .. Local variables: .. fill-column: 79 .. End: mpi4py-3.1.6/docs/source/usrman/mpi4py.util.pkl5.rst000066400000000000000000000103741460670727200223220ustar00rootroot00000000000000mpi4py.util.pkl5 ----------------- .. module:: mpi4py.util.pkl5 :synopsis: Pickle-based communication using protocol 5. .. versionadded:: 3.1.0 :mod:`pickle` protocol 5 (see :pep:`574`) introduced support for out-of-band buffers, allowing for more efficient handling of certain object types with large memory footprints. MPI for Python uses the traditional in-band handling of buffers. This approach is appropriate for communicating non-buffer Python objects, or buffer-like objects with small memory footprints. For point-to-point communication, in-band buffer handling allows for the communication of a pickled stream with a single MPI message, at the expense of additional CPU and memory overhead in the pickling and unpickling steps. The :mod:`mpi4py.util.pkl5` module provides communicator wrapper classes reimplementing pickle-based point-to-point communication methods using pickle protocol 5. Handling out-of-band buffers necessarily involve multiple MPI messages, thus increasing latency and hurting performance in case of small size data. However, in case of large size data, the zero-copy savings of out-of-band buffer handling more than offset the extra latency costs. Additionally, these wrapper methods overcome the infamous 2 GiB message count limit (MPI-1 to MPI-3). .. note:: Support for pickle protocol 5 is available in the :mod:`pickle` module within the Python standard library since Python 3.8. Previous Python 3 releases can use the :mod:`pickle5` backport, which is available on `PyPI `_ and can be installed with:: python -m pip install pickle5 .. _pickle5-pypi: https://pypi.org/project/pickle5/ .. autoclass:: Request Custom request class for nonblocking communications. .. note:: :class:`Request` is not a subclass of :class:`mpi4py.MPI.Request` .. automethod:: Free .. automethod:: cancel .. automethod:: get_status .. automethod:: test .. automethod:: wait .. automethod:: testall :classmethod: .. automethod:: waitall :classmethod: .. autoclass:: Message Custom message class for matching probes. .. note:: :class:`Message` is not a subclass of :class:`mpi4py.MPI.Message` .. automethod:: recv .. automethod:: irecv .. automethod:: probe :classmethod: .. automethod:: iprobe :classmethod: .. autoclass:: Comm Base communicator wrapper class. .. automethod:: send .. automethod:: bsend .. automethod:: ssend .. automethod:: isend .. automethod:: ibsend .. automethod:: issend .. automethod:: recv .. automethod:: irecv .. warning:: This method cannot be supported reliably and raises :exc:`RuntimeError`. .. automethod:: sendrecv .. automethod:: mprobe .. automethod:: improbe .. automethod:: bcast .. autoclass:: Intracomm Intracommunicator wrapper class. .. autoclass:: Intercomm Intercommunicator wrapper class. Examples ++++++++ .. code-block:: python :name: test-pkl5-1 :caption: :file:`test-pkl5-1.py` :emphasize-lines: 3,5,11 :linenos: import numpy as np from mpi4py import MPI from mpi4py.util import pkl5 comm = pkl5.Intracomm(MPI.COMM_WORLD) # comm wrapper size = comm.Get_size() rank = comm.Get_rank() dst = (rank + 1) % size src = (rank - 1) % size sobj = np.full(1024**3, rank, dtype='i4') # > 4 GiB sreq = comm.isend(sobj, dst, tag=42) robj = comm.recv (None, src, tag=42) sreq.Free() assert np.min(robj) == src assert np.max(robj) == src .. code-block:: python :name: test-pkl5-2 :caption: :file:`test-pkl5-2.py` :emphasize-lines: 3,5,11 :linenos: import numpy as np from mpi4py import MPI from mpi4py.util import pkl5 comm = pkl5.Intracomm(MPI.COMM_WORLD) # comm wrapper size = comm.Get_size() rank = comm.Get_rank() dst = (rank + 1) % size src = (rank - 1) % size sobj = np.full(1024**3, rank, dtype='i4') # > 4 GiB sreq = comm.isend(sobj, dst, tag=42) status = MPI.Status() rmsg = comm.mprobe(status=status) assert status.Get_source() == src assert status.Get_tag() == 42 rreq = rmsg.irecv() robj = rreq.wait() sreq.Free() assert np.max(robj) == src assert np.min(robj) == src .. Local variables: .. fill-column: 79 .. End: mpi4py-3.1.6/docs/source/usrman/mpi4py.util.rst000066400000000000000000000005261460670727200214460ustar00rootroot00000000000000mpi4py.util =========== .. module:: mpi4py.util :synopsis: Miscellaneous utilities. .. versionadded:: 3.1.0 The :mod:`mpi4py.util` package collects miscellaneous utilities within the intersection of Python and MPI. .. toctree:: :maxdepth: 1 mpi4py.util.pkl5 mpi4py.util.dtlib .. Local variables: .. fill-column: 79 .. End: mpi4py-3.1.6/docs/source/usrman/overview.rst000066400000000000000000000520771460670727200211260ustar00rootroot00000000000000Overview ======== .. currentmodule:: mpi4py.MPI MPI for Python provides an object oriented approach to message passing which grounds on the standard MPI-2 C++ bindings. The interface was designed with focus in translating MPI syntax and semantics of standard MPI-2 bindings for C++ to Python. Any user of the standard C/C++ MPI bindings should be able to use this module without need of learning a new interface. Communicating Python Objects and Array Data ------------------------------------------- The Python standard library supports different mechanisms for data persistence. Many of them rely on disk storage, but *pickling* and *marshaling* can also work with memory buffers. The :mod:`pickle` modules provide user-extensible facilities to serialize general Python objects using ASCII or binary formats. The :mod:`marshal` module provides facilities to serialize built-in Python objects using a binary format specific to Python, but independent of machine architecture issues. *MPI for Python* can communicate any built-in or user-defined Python object taking advantage of the features provided by the :mod:`pickle` module. These facilities will be routinely used to build binary representations of objects to communicate (at sending processes), and restoring them back (at receiving processes). Although simple and general, the serialization approach (i.e., *pickling* and *unpickling*) previously discussed imposes important overheads in memory as well as processor usage, especially in the scenario of objects with large memory footprints being communicated. Pickling general Python objects, ranging from primitive or container built-in types to user-defined classes, necessarily requires computer resources. Processing is also needed for dispatching the appropriate serialization method (that depends on the type of the object) and doing the actual packing. Additional memory is always needed, and if its total amount is not known *a priori*, many reallocations can occur. Indeed, in the case of large numeric arrays, this is certainly unacceptable and precludes communication of objects occupying half or more of the available memory resources. *MPI for Python* supports direct communication of any object exporting the single-segment buffer interface. This interface is a standard Python mechanism provided by some types (e.g., strings and numeric arrays), allowing access in the C side to a contiguous memory buffer (i.e., address and length) containing the relevant data. This feature, in conjunction with the capability of constructing user-defined MPI datatypes describing complicated memory layouts, enables the implementation of many algorithms involving multidimensional numeric arrays (e.g., image processing, fast Fourier transforms, finite difference schemes on structured Cartesian grids) directly in Python, with negligible overhead, and almost as fast as compiled Fortran, C, or C++ codes. Communicators ------------- In *MPI for Python*, `Comm` is the base class of communicators. The `Intracomm` and `Intercomm` classes are sublcasses of the `Comm` class. The `Comm.Is_inter` method (and `Comm.Is_intra`, provided for convenience but not part of the MPI specification) is defined for communicator objects and can be used to determine the particular communicator class. The two predefined intracommunicator instances are available: `COMM_SELF` and `COMM_WORLD`. From them, new communicators can be created as needed. The number of processes in a communicator and the calling process rank can be respectively obtained with methods `Comm.Get_size` and `Comm.Get_rank`. The associated process group can be retrieved from a communicator by calling the `Comm.Get_group` method, which returns an instance of the `Group` class. Set operations with `Group` objects like like `Group.Union`, `Group.Intersection` and `Group.Difference` are fully supported, as well as the creation of new communicators from these groups using `Comm.Create` and `Comm.Create_group`. New communicator instances can be obtained with the `Comm.Clone`, `Comm.Dup` and `Comm.Split` methods, as well methods `Intracomm.Create_intercomm` and `Intercomm.Merge`. Virtual topologies (`Cartcomm`, `Graphcomm` and `Distgraphcomm` classes, which are specializations of the `Intracomm` class) are fully supported. New instances can be obtained from intracommunicator instances with factory methods `Intracomm.Create_cart` and `Intracomm.Create_graph`. Point-to-Point Communications ----------------------------- Point to point communication is a fundamental capability of message passing systems. This mechanism enables the transmission of data between a pair of processes, one side sending, the other receiving. MPI provides a set of *send* and *receive* functions allowing the communication of *typed* data with an associated *tag*. The type information enables the conversion of data representation from one architecture to another in the case of heterogeneous computing environments; additionally, it allows the representation of non-contiguous data layouts and user-defined datatypes, thus avoiding the overhead of (otherwise unavoidable) packing/unpacking operations. The tag information allows selectivity of messages at the receiving end. Blocking Communications ^^^^^^^^^^^^^^^^^^^^^^^ MPI provides basic send and receive functions that are *blocking*. These functions block the caller until the data buffers involved in the communication can be safely reused by the application program. In *MPI for Python*, the `Comm.Send`, `Comm.Recv` and `Comm.Sendrecv` methods of communicator objects provide support for blocking point-to-point communications within `Intracomm` and `Intercomm` instances. These methods can communicate memory buffers. The variants `Comm.send`, `Comm.recv` and `Comm.sendrecv` can communicate general Python objects. Nonblocking Communications ^^^^^^^^^^^^^^^^^^^^^^^^^^ On many systems, performance can be significantly increased by overlapping communication and computation. This is particularly true on systems where communication can be executed autonomously by an intelligent, dedicated communication controller. MPI provides *nonblocking* send and receive functions. They allow the possible overlap of communication and computation. Non-blocking communication always come in two parts: posting functions, which begin the requested operation; and test-for-completion functions, which allow to discover whether the requested operation has completed. In *MPI for Python*, the `Comm.Isend` and `Comm.Irecv` methods initiate send and receive operations, respectively. These methods return a `Request` instance, uniquely identifying the started operation. Its completion can be managed using the `Request.Test`, `Request.Wait` and `Request.Cancel` methods. The management of `Request` objects and associated memory buffers involved in communication requires a careful, rather low-level coordination. Users must ensure that objects exposing their memory buffers are not accessed at the Python level while they are involved in nonblocking message-passing operations. Persistent Communications ^^^^^^^^^^^^^^^^^^^^^^^^^ Often a communication with the same argument list is repeatedly executed within an inner loop. In such cases, communication can be further optimized by using persistent communication, a particular case of nonblocking communication allowing the reduction of the overhead between processes and communication controllers. Furthermore , this kind of optimization can also alleviate the extra call overheads associated to interpreted, dynamic languages like Python. In *MPI for Python*, the `Comm.Send_init` and `Comm.Recv_init` methods create persistent requests for a send and receive operation, respectively. These methods return an instance of the `Prequest` class, a subclass of the `Request` class. The actual communication can be effectively started using the `Prequest.Start` method, and its completion can be managed as previously described. Collective Communications -------------------------- Collective communications allow the transmittal of data between multiple processes of a group simultaneously. The syntax and semantics of collective functions is consistent with point-to-point communication. Collective functions communicate *typed* data, but messages are not paired with an associated *tag*; selectivity of messages is implied in the calling order. Additionally, collective functions come in blocking versions only. The more commonly used collective communication operations are the following. * Barrier synchronization across all group members. * Global communication functions + Broadcast data from one member to all members of a group. + Gather data from all members to one member of a group. + Scatter data from one member to all members of a group. * Global reduction operations such as sum, maximum, minimum, etc. In *MPI for Python*, the `Comm.Bcast`, `Comm.Scatter`, `Comm.Gather`, `Comm.Allgather`, `Comm.Alltoall` methods provide support for collective communications of memory buffers. The lower-case variants `Comm.bcast`, `Comm.scatter`, `Comm.gather`, `Comm.allgather` and `Comm.alltoall` can communicate general Python objects. The vector variants (which can communicate different amounts of data to each process) `Comm.Scatterv`, `Comm.Gatherv`, `Comm.Allgatherv`, `Comm.Alltoallv` and `Comm.Alltoallw` are also supported, they can only communicate objects exposing memory buffers. Global reducion operations on memory buffers are accessible through the `Comm.Reduce`, `Comm.Reduce_scatter`, `Comm.Allreduce`, `Intracomm.Scan` and `Intracomm.Exscan` methods. The lower-case variants `Comm.reduce`, `Comm.allreduce`, `Intracomm.scan` and `Intracomm.exscan` can communicate general Python objects; however, the actual required reduction computations are performed sequentially at some process. All the predefined (i.e., `SUM`, `PROD`, `MAX`, etc.) reduction operations can be applied. Support for GPU-aware MPI ------------------------- Several MPI implementations, including Open MPI and MVAPICH, support passing GPU pointers to MPI calls to avoid explict data movement between the host and the device. On the Python side, GPU arrays have been implemented by many libraries that need GPU computation, such as CuPy, Numba, PyTorch, and PyArrow. In order to increase library interoperability, two kinds of zero-copy data exchange protocols are defined and agreed upon: `DLPack`_ and `CUDA Array Interface`_. For example, a CuPy array can be passed to a Numba CUDA-jit kernel. .. _DLPack: https://data-apis.org/array-api/latest/design_topics/data_interchange.html .. _CUDA Array Interface: https://numba.readthedocs.io/en/stable/cuda/cuda_array_interface.html *MPI for Python* provides an experimental support for GPU-aware MPI. This feature requires: 1. mpi4py is built against a GPU-aware MPI library. 2. The Python GPU arrays are compliant with either of the protocols. See the :doc:`tutorial` section for further information. We note that * Whether or not a MPI call can work for GPU arrays depends on the underlying MPI implementation, not on mpi4py. * This support is currently experimental and subject to change in the future. Dynamic Process Management -------------------------- In the context of the MPI-1 specification, a parallel application is static; that is, no processes can be added to or deleted from a running application after it has been started. Fortunately, this limitation was addressed in MPI-2. The new specification added a process management model providing a basic interface between an application and external resources and process managers. This MPI-2 extension can be really useful, especially for sequential applications built on top of parallel modules, or parallel applications with a client/server model. The MPI-2 process model provides a mechanism to create new processes and establish communication between them and the existing MPI application. It also provides mechanisms to establish communication between two existing MPI applications, even when one did not *start* the other. In *MPI for Python*, new independent process groups can be created by calling the `Intracomm.Spawn` method within an intracommunicator. This call returns a new intercommunicator (i.e., an `Intercomm` instance) at the parent process group. The child process group can retrieve the matching intercommunicator by calling the `Comm.Get_parent` class method. At each side, the new intercommunicator can be used to perform point to point and collective communications between the parent and child groups of processes. Alternatively, disjoint groups of processes can establish communication using a client/server approach. Any server application must first call the `Open_port` function to open a *port* and the `Publish_name` function to publish a provided *service*, and next call the `Intracomm.Accept` method. Any client applications can first find a published *service* by calling the `Lookup_name` function, which returns the *port* where a server can be contacted; and next call the `Intracomm.Connect` method. Both `Intracomm.Accept` and `Intracomm.Connect` methods return an `Intercomm` instance. When connection between client/server processes is no longer needed, all of them must cooperatively call the `Comm.Disconnect` method. Additionally, server applications should release resources by calling the `Unpublish_name` and `Close_port` functions. One-Sided Communications ------------------------ One-sided communications (also called *Remote Memory Access*, *RMA*) supplements the traditional two-sided, send/receive based MPI communication model with a one-sided, put/get based interface. One-sided communication that can take advantage of the capabilities of highly specialized network hardware. Additionally, this extension lowers latency and software overhead in applications written using a shared-memory-like paradigm. The MPI specification revolves around the use of objects called *windows*; they intuitively specify regions of a process's memory that have been made available for remote read and write operations. The published memory blocks can be accessed through three functions for put (remote send), get (remote write), and accumulate (remote update or reduction) data items. A much larger number of functions support different synchronization styles; the semantics of these synchronization operations are fairly complex. In *MPI for Python*, one-sided operations are available by using instances of the `Win` class. New window objects are created by calling the `Win.Create` method at all processes within a communicator and specifying a memory buffer . When a window instance is no longer needed, the `Win.Free` method should be called. The three one-sided MPI operations for remote write, read and reduction are available through calling the methods `Win.Put`, `Win.Get`, and `Win.Accumulate` respectively within a `Win` instance. These methods need an integer rank identifying the target process and an integer offset relative the base address of the remote memory block being accessed. The one-sided operations read, write, and reduction are implicitly nonblocking, and must be synchronized by using two primary modes. Active target synchronization requires the origin process to call the `Win.Start` and `Win.Complete` methods at the origin process, and target process cooperates by calling the `Win.Post` and `Win.Wait` methods. There is also a collective variant provided by the `Win.Fence` method. Passive target synchronization is more lenient, only the origin process calls the `Win.Lock` and `Win.Unlock` methods. Locks are used to protect remote accesses to the locked remote window and to protect local load/store accesses to a locked local window. Parallel Input/Output --------------------- The POSIX standard provides a model of a widely portable file system. However, the optimization needed for parallel input/output cannot be achieved with this generic interface. In order to ensure efficiency and scalability, the underlying parallel input/output system must provide a high-level interface supporting partitioning of file data among processes and a collective interface supporting complete transfers of global data structures between process memories and files. Additionally, further efficiencies can be gained via support for asynchronous input/output, strided accesses to data, and control over physical file layout on storage devices. This scenario motivated the inclusion in the MPI-2 standard of a custom interface in order to support more elaborated parallel input/output operations. The MPI specification for parallel input/output revolves around the use objects called *files*. As defined by MPI, files are not just contiguous byte streams. Instead, they are regarded as ordered collections of *typed* data items. MPI supports sequential or random access to any integral set of these items. Furthermore, files are opened collectively by a group of processes. The common patterns for accessing a shared file (broadcast, scatter, gather, reduction) is expressed by using user-defined datatypes. Compared to the communication patterns of point-to-point and collective communications, this approach has the advantage of added flexibility and expressiveness. Data access operations (read and write) are defined for different kinds of positioning (using explicit offsets, individual file pointers, and shared file pointers), coordination (non-collective and collective), and synchronism (blocking, nonblocking, and split collective with begin/end phases). In *MPI for Python*, all MPI input/output operations are performed through instances of the `File` class. File handles are obtained by calling the `File.Open` method at all processes within a communicator and providing a file name and the intended access mode. After use, they must be closed by calling the `File.Close` method. Files even can be deleted by calling method `File.Delete`. After creation, files are typically associated with a per-process *view*. The view defines the current set of data visible and accessible from an open file as an ordered set of elementary datatypes. This data layout can be set and queried with the `File.Set_view` and `File.Get_view` methods respectively. Actual input/output operations are achieved by many methods combining read and write calls with different behavior regarding positioning, coordination, and synchronism. Summing up, *MPI for Python* provides the thirty (30) methods defined in MPI-2 for reading from or writing to files using explicit offsets or file pointers (individual or shared), in blocking or nonblocking and collective or noncollective versions. Environmental Management ------------------------ Initialization and Exit ^^^^^^^^^^^^^^^^^^^^^^^ Module functions `Init` or `Init_thread` and `Finalize` provide MPI initialization and finalization respectively. Module functions `Is_initialized` and `Is_finalized` provide the respective tests for initialization and finalization. .. note:: :c:func:`MPI_Init` or :c:func:`MPI_Init_thread` is actually called when you import the :mod:`~mpi4py.MPI` module from the :mod:`mpi4py` package, but only if MPI is not already initialized. In such case, calling `Init` or `Init_thread` from Python is expected to generate an MPI error, and in turn an exception will be raised. .. note:: :c:func:`MPI_Finalize` is registered (by using Python C/API function :c:func:`Py_AtExit`) for being automatically called when Python processes exit, but only if :mod:`mpi4py` actually initialized MPI. Therefore, there is no need to call `Finalize` from Python to ensure MPI finalization. Implementation Information ^^^^^^^^^^^^^^^^^^^^^^^^^^ * The MPI version number can be retrieved from module function `Get_version`. It returns a two-integer tuple ``(version, subversion)``. * The `Get_processor_name` function can be used to access the processor name. * The values of predefined attributes attached to the world communicator can be obtained by calling the `Comm.Get_attr` method within the `COMM_WORLD` instance. Timers ^^^^^^ MPI timer functionalities are available through the `Wtime` and `Wtick` functions. Error Handling ^^^^^^^^^^^^^^ In order facilitate handle sharing with other Python modules interfacing MPI-based parallel libraries, the predefined MPI error handlers `ERRORS_RETURN` and `ERRORS_ARE_FATAL` can be assigned to and retrieved from communicators using methods `Comm.Set_errhandler` and `Comm.Get_errhandler`, and similarly for windows and files. When the predefined error handler `ERRORS_RETURN` is set, errors returned from MPI calls within Python code will raise an instance of the exception class `Exception`, which is a subclass of the standard Python exception `python:RuntimeError`. .. note:: After import, mpi4py overrides the default MPI rules governing inheritance of error handlers. The `ERRORS_RETURN` error handler is set in the predefined `COMM_SELF` and `COMM_WORLD` communicators, as well as any new `Comm`, `Win`, or `File` instance created through mpi4py. If you ever pass such handles to C/C++/Fortran library code, it is recommended to set the `ERRORS_ARE_FATAL` error handler on them to ensure MPI errors do not pass silently. .. warning:: Importing with ``from mpi4py.MPI import *`` will cause a name clashing with the standard Python `python:Exception` base class. mpi4py-3.1.6/docs/source/usrman/reference.rst000066400000000000000000000001351460670727200212020ustar00rootroot00000000000000.. _reference: Reference ========= .. autosummary:: :toctree: reference/ mpi4py.MPI mpi4py-3.1.6/docs/source/usrman/tutorial.rst000066400000000000000000000351001460670727200211070ustar00rootroot00000000000000.. _tutorial: Tutorial ======== .. currentmodule:: mpi4py.MPI .. warning:: Under construction. Contributions very welcome! .. tip:: `Rolf Rabenseifner`_ at `HLRS`_ developed a comprehensive MPI-3.1/4.0 course with slides and a large set of exercises including solutions. This material is `available online `_ for self-study. The slides and exercises show the C, Fortran, and Python (mpi4py) interfaces. For performance reasons, most Python exercises use NumPy arrays and communication routines involving buffer-like objects. .. _Rolf Rabenseifner: https://www.hlrs.de/people/rabenseifner/ .. _HLRS: https://www.hlrs.de/ .. _hlrs-mpi: https://www.hlrs.de/training/par-prog-ws/MPI-course-material .. tip:: `Victor Eijkhout`_ at `TACC`_ authored the book *Parallel Programming for Science and Engineering*. This book is available online in `PDF `_ and `HTML `_ formats. The book covers parallel programming with MPI and OpenMP in C/C++ and Fortran, and MPI in Python using mpi4py. .. _Victor Eijkhout: https://tacc.utexas.edu/~eijkhout/ .. _TACC: https://www.tacc.utexas.edu/ .. _ppse-pdf: https://tinyurl.com/vle335course .. _ppse-html: https://tacc.utexas.edu/~eijkhout/pcse/html/index.html *MPI for Python* supports convenient, *pickle*-based communication of generic Python object as well as fast, near C-speed, direct array data communication of buffer-provider objects (e.g., NumPy arrays). * Communication of generic Python objects You have to use methods with **all-lowercase** names, like `Comm.send`, `Comm.recv`, `Comm.bcast`, `Comm.scatter`, `Comm.gather` . An object to be sent is passed as a parameter to the communication call, and the received object is simply the return value. The `Comm.isend` and `Comm.irecv` methods return `Request` instances; completion of these methods can be managed using the `Request.test` and `Request.wait` methods. The `Comm.recv` and `Comm.irecv` methods may be passed a buffer object that can be repeatedly used to receive messages avoiding internal memory allocation. This buffer must be sufficiently large to accommodate the transmitted messages; hence, any buffer passed to `Comm.recv` or `Comm.irecv` must be at least as long as the *pickled* data transmitted to the receiver. Collective calls like `Comm.scatter`, `Comm.gather`, `Comm.allgather`, `Comm.alltoall` expect a single value or a sequence of `Comm.size` elements at the root or all process. They return a single value, a list of `Comm.size` elements, or `None`. .. note:: *MPI for Python* uses the **highest** :ref:`protocol version ` available in the Python runtime (see the :data:`~pickle.HIGHEST_PROTOCOL` constant in the :mod:`pickle` module). The default protocol can be changed at import time by setting the :envvar:`MPI4PY_PICKLE_PROTOCOL` environment variable, or at runtime by assigning a different value to the :attr:`~mpi4py.MPI.Pickle.PROTOCOL` attribute of the :obj:`~mpi4py.MPI.pickle` object within the :mod:`~mpi4py.MPI` module. * Communication of buffer-like objects You have to use method names starting with an **upper-case** letter, like `Comm.Send`, `Comm.Recv`, `Comm.Bcast`, `Comm.Scatter`, `Comm.Gather`. In general, buffer arguments to these calls must be explicitly specified by using a 2/3-list/tuple like ``[data, MPI.DOUBLE]``, or ``[data, count, MPI.DOUBLE]`` (the former one uses the byte-size of ``data`` and the extent of the MPI datatype to define ``count``). For vector collectives communication operations like `Comm.Scatterv` and `Comm.Gatherv`, buffer arguments are specified as ``[data, count, displ, datatype]``, where ``count`` and ``displ`` are sequences of integral values. Automatic MPI datatype discovery for NumPy/GPU arrays and PEP-3118 buffers is supported, but limited to basic C types (all C/C99-native signed/unsigned integral types and single/double precision real/complex floating types) and availability of matching datatypes in the underlying MPI implementation. In this case, the buffer-provider object can be passed directly as a buffer argument, the count and MPI datatype will be inferred. If mpi4py is built against a GPU-aware MPI implementation, GPU arrays can be passed to upper-case methods as long as they have either the ``__dlpack__`` and ``__dlpack_device__`` methods or the ``__cuda_array_interface__`` attribute that are compliant with the respective standard specifications. Moreover, only C-contiguous or Fortran-contiguous GPU arrays are supported. It is important to note that GPU buffers must be fully ready before any MPI routines operate on them to avoid race conditions. This can be ensured by using the synchronization API of your array library. mpi4py does not have access to any GPU-specific functionality and thus cannot perform this operation automatically for users. Running Python scripts with MPI ------------------------------- Most MPI programs can be run with the command :program:`mpiexec`. In practice, running Python programs looks like:: $ mpiexec -n 4 python script.py to run the program with 4 processors. Point-to-Point Communication ---------------------------- * Python objects (:mod:`pickle` under the hood):: from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() if rank == 0: data = {'a': 7, 'b': 3.14} comm.send(data, dest=1, tag=11) elif rank == 1: data = comm.recv(source=0, tag=11) * Python objects with non-blocking communication:: from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() if rank == 0: data = {'a': 7, 'b': 3.14} req = comm.isend(data, dest=1, tag=11) req.wait() elif rank == 1: req = comm.irecv(source=0, tag=11) data = req.wait() * NumPy arrays (the fast way!):: from mpi4py import MPI import numpy comm = MPI.COMM_WORLD rank = comm.Get_rank() # passing MPI datatypes explicitly if rank == 0: data = numpy.arange(1000, dtype='i') comm.Send([data, MPI.INT], dest=1, tag=77) elif rank == 1: data = numpy.empty(1000, dtype='i') comm.Recv([data, MPI.INT], source=0, tag=77) # automatic MPI datatype discovery if rank == 0: data = numpy.arange(100, dtype=numpy.float64) comm.Send(data, dest=1, tag=13) elif rank == 1: data = numpy.empty(100, dtype=numpy.float64) comm.Recv(data, source=0, tag=13) Collective Communication ------------------------ * Broadcasting a Python dictionary:: from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() if rank == 0: data = {'key1' : [7, 2.72, 2+3j], 'key2' : ( 'abc', 'xyz')} else: data = None data = comm.bcast(data, root=0) * Scattering Python objects:: from mpi4py import MPI comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() if rank == 0: data = [(i+1)**2 for i in range(size)] else: data = None data = comm.scatter(data, root=0) assert data == (rank+1)**2 * Gathering Python objects:: from mpi4py import MPI comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() data = (rank+1)**2 data = comm.gather(data, root=0) if rank == 0: for i in range(size): assert data[i] == (i+1)**2 else: assert data is None * Broadcasting a NumPy array:: from mpi4py import MPI import numpy as np comm = MPI.COMM_WORLD rank = comm.Get_rank() if rank == 0: data = np.arange(100, dtype='i') else: data = np.empty(100, dtype='i') comm.Bcast(data, root=0) for i in range(100): assert data[i] == i * Scattering NumPy arrays:: from mpi4py import MPI import numpy as np comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() sendbuf = None if rank == 0: sendbuf = np.empty([size, 100], dtype='i') sendbuf.T[:,:] = range(size) recvbuf = np.empty(100, dtype='i') comm.Scatter(sendbuf, recvbuf, root=0) assert np.allclose(recvbuf, rank) * Gathering NumPy arrays:: from mpi4py import MPI import numpy as np comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() sendbuf = np.zeros(100, dtype='i') + rank recvbuf = None if rank == 0: recvbuf = np.empty([size, 100], dtype='i') comm.Gather(sendbuf, recvbuf, root=0) if rank == 0: for i in range(size): assert np.allclose(recvbuf[i,:], i) * Parallel matrix-vector product:: from mpi4py import MPI import numpy def matvec(comm, A, x): m = A.shape[0] # local rows p = comm.Get_size() xg = numpy.zeros(m*p, dtype='d') comm.Allgather([x, MPI.DOUBLE], [xg, MPI.DOUBLE]) y = numpy.dot(A, xg) return y MPI-IO ------ * Collective I/O with NumPy arrays:: from mpi4py import MPI import numpy as np amode = MPI.MODE_WRONLY|MPI.MODE_CREATE comm = MPI.COMM_WORLD fh = MPI.File.Open(comm, "./datafile.contig", amode) buffer = np.empty(10, dtype=np.int) buffer[:] = comm.Get_rank() offset = comm.Get_rank()*buffer.nbytes fh.Write_at_all(offset, buffer) fh.Close() * Non-contiguous Collective I/O with NumPy arrays and datatypes:: from mpi4py import MPI import numpy as np comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() amode = MPI.MODE_WRONLY|MPI.MODE_CREATE fh = MPI.File.Open(comm, "./datafile.noncontig", amode) item_count = 10 buffer = np.empty(item_count, dtype='i') buffer[:] = rank filetype = MPI.INT.Create_vector(item_count, 1, size) filetype.Commit() displacement = MPI.INT.Get_size()*rank fh.Set_view(displacement, filetype=filetype) fh.Write_all(buffer) filetype.Free() fh.Close() Dynamic Process Management -------------------------- * Compute Pi - Master (or parent, or client) side:: #!/usr/bin/env python from mpi4py import MPI import numpy import sys comm = MPI.COMM_SELF.Spawn(sys.executable, args=['cpi.py'], maxprocs=5) N = numpy.array(100, 'i') comm.Bcast([N, MPI.INT], root=MPI.ROOT) PI = numpy.array(0.0, 'd') comm.Reduce(None, [PI, MPI.DOUBLE], op=MPI.SUM, root=MPI.ROOT) print(PI) comm.Disconnect() * Compute Pi - Worker (or child, or server) side:: #!/usr/bin/env python from mpi4py import MPI import numpy comm = MPI.Comm.Get_parent() size = comm.Get_size() rank = comm.Get_rank() N = numpy.array(0, dtype='i') comm.Bcast([N, MPI.INT], root=0) h = 1.0 / N; s = 0.0 for i in range(rank, N, size): x = h * (i + 0.5) s += 4.0 / (1.0 + x**2) PI = numpy.array(s * h, dtype='d') comm.Reduce([PI, MPI.DOUBLE], None, op=MPI.SUM, root=0) comm.Disconnect() CUDA-aware MPI + Python GPU arrays ---------------------------------- * Reduce-to-all CuPy arrays:: from mpi4py import MPI import cupy as cp comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() sendbuf = cp.arange(10, dtype='i') recvbuf = cp.empty_like(sendbuf) assert hasattr(sendbuf, '__cuda_array_interface__') assert hasattr(recvbuf, '__cuda_array_interface__') cp.cuda.get_current_stream().synchronize() comm.Allreduce(sendbuf, recvbuf) assert cp.allclose(recvbuf, sendbuf*size) One-Sided Communications ------------------------ * Read from (write to) the entire RMA window:: import numpy as np from mpi4py import MPI from mpi4py.util import dtlib comm = MPI.COMM_WORLD rank = comm.Get_rank() datatype = MPI.FLOAT np_dtype = dtlib.to_numpy_dtype(datatype) itemsize = datatype.Get_size() N = 10 win_size = N * itemsize if rank == 0 else 0 win = MPI.Win.Allocate(win_size, comm=comm) buf = np.empty(N, dtype=np_dtype) if rank == 0: buf.fill(42) win.Lock(rank=0) win.Put(buf, target_rank=0) win.Unlock(rank=0) comm.Barrier() else: comm.Barrier() win.Lock(rank=0) win.Get(buf, target_rank=0) win.Unlock(rank=0) assert np.all(buf == 42) * Accessing a part of the RMA window using the ``target`` argument, which is defined as ``(offset, count, datatype)``:: import numpy as np from mpi4py import MPI from mpi4py.util import dtlib comm = MPI.COMM_WORLD rank = comm.Get_rank() datatype = MPI.FLOAT np_dtype = dtlib.to_numpy_dtype(datatype) itemsize = datatype.Get_size() N = comm.Get_size() + 1 win_size = N * itemsize if rank == 0 else 0 win = MPI.Win.Allocate( size=win_size, disp_unit=itemsize, comm=comm, ) if rank == 0: mem = np.frombuffer(win, dtype=np_dtype) mem[:] = np.arange(len(mem), dtype=np_dtype) comm.Barrier() buf = np.zeros(3, dtype=np_dtype) target = (rank, 2, datatype) win.Lock(rank=0) win.Get(buf, target_rank=0, target=target) win.Unlock(rank=0) assert np.all(buf == [rank, rank+1, 0]) Wrapping with SWIG ------------------ * C source: .. sourcecode:: c /* file: helloworld.c */ void sayhello(MPI_Comm comm) { int size, rank; MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank); printf("Hello, World! " "I am process %d of %d.\n", rank, size); } * SWIG interface file: .. sourcecode:: c // file: helloworld.i %module helloworld %{ #include #include "helloworld.c" }% %include mpi4py/mpi4py.i %mpi4py_typemap(Comm, MPI_Comm); void sayhello(MPI_Comm comm); * Try it in the Python prompt:: >>> from mpi4py import MPI >>> import helloworld >>> helloworld.sayhello(MPI.COMM_WORLD) Hello, World! I am process 0 of 1. Wrapping with F2Py ------------------ * Fortran 90 source: .. sourcecode:: fortran ! file: helloworld.f90 subroutine sayhello(comm) use mpi implicit none integer :: comm, rank, size, ierr call MPI_Comm_size(comm, size, ierr) call MPI_Comm_rank(comm, rank, ierr) print *, 'Hello, World! I am process ',rank,' of ',size,'.' end subroutine sayhello * Compiling example using f2py :: $ f2py -c --f90exec=mpif90 helloworld.f90 -m helloworld * Try it in the Python prompt:: >>> from mpi4py import MPI >>> import helloworld >>> fcomm = MPI.COMM_WORLD.py2f() >>> helloworld.sayhello(fcomm) Hello, World! I am process 0 of 1. mpi4py-3.1.6/makefile000066400000000000000000000066441460670727200144700ustar00rootroot00000000000000.PHONY: default default: build PYTHON = python$(py) MPIEXEC = mpiexec # ---- .PHONY: config build test config: ${PYTHON} setup.py config $(opt) build: ${PYTHON} setup.py build $(opt) test: ${VALGRIND} ${PYTHON} ${PWD}/test/runtests.py test-%: ${MPIEXEC} -n $* ${VALGRIND} ${PYTHON} ${PWD}/test/runtests.py .PHONY: srcbuild srcclean srcbuild: ${PYTHON} setup.py build_src $(opt) srcclean: ${RM} src/mpi4py.MPI.c ${RM} src/mpi4py/include/mpi4py/mpi4py.MPI.h ${RM} src/mpi4py/include/mpi4py/mpi4py.MPI_api.h .PHONY: clean distclean fullclean clean: ${PYTHON} setup.py clean --all distclean: clean -${RM} -r build _configtest* *.py[co] -${RM} -r MANIFEST dist -${RM} -r conf/__pycache__ test/__pycache__ -${RM} -r demo/__pycache__ src/mpi4py/__pycache__ -find conf -name '*.py[co]' -exec rm -f {} ';' -find demo -name '*.py[co]' -exec rm -f {} ';' -find test -name '*.py[co]' -exec rm -f {} ';' -find src -name '*.py[co]' -exec rm -f {} ';' fullclean: distclean srcclean docsclean -find . -name '*~' -exec rm -f {} ';' # ---- .PHONY: install uninstall install: build ${PYTHON} setup.py install --prefix='' --user $(opt) uninstall: -${RM} -r $(shell ${PYTHON} -m site --user-site)/mpi4py -${RM} -r $(shell ${PYTHON} -m site --user-site)/mpi4py-*-py*.egg-info # ---- .PHONY: docs docs-html docs-pdf docs-misc docs: docs-html docs-pdf docs-misc docs-html: rst2html sphinx-html epydoc-html docs-pdf: sphinx-pdf epydoc-pdf docs-misc: sphinx-man sphinx-info RST2HTML = $(shell command -v rst2html5.py || command -v rst2html5 || false) RST2HTMLOPTS = --config=conf/docutils.conf .PHONY: rst2html rst2html: ${RST2HTML} ${RST2HTMLOPTS} LICENSE.rst docs/LICENSE.html ${RST2HTML} ${RST2HTMLOPTS} CHANGES.rst docs/CHANGES.html ${RST2HTML} ${RST2HTMLOPTS} docs/index.rst docs/index.html SPHINXBUILD = sphinx-build SPHINXOPTS = .PHONY: sphinx sphinx-html sphinx-pdf sphinx-man sphinx-info sphinx: sphinx-html sphinx-pdf sphinx-man sphinx-info sphinx-html: ${PYTHON} -c 'import mpi4py.MPI' mkdir -p build/doctrees docs/usrman ${SPHINXBUILD} -b html -d build/doctrees ${SPHINXOPTS} \ docs/source/usrman docs/usrman ${RM} docs/usrman/.buildinfo sphinx-pdf: ${PYTHON} -c 'import mpi4py.MPI' mkdir -p build/doctrees build/latex ${SPHINXBUILD} -b latex -d build/doctrees ${SPHINXOPTS} \ docs/source/usrman build/latex ${MAKE} -C build/latex all-pdf > /dev/null mv build/latex/*.pdf docs/ sphinx-man: ${PYTHON} -c 'import mpi4py.MPI' mkdir -p build/doctrees build/man ${SPHINXBUILD} -b man -d build/doctrees ${SPHINXOPTS} \ docs/source/usrman build/man mv build/man/*.[137] docs/ sphinx-info: ${PYTHON} -c 'import mpi4py.MPI' mkdir -p build/doctrees build/texinfo ${SPHINXBUILD} -b texinfo -d build/doctrees ${SPHINXOPTS} \ docs/source/usrman build/texinfo ${MAKE} -C build/texinfo info > /dev/null mv build/texinfo/*.info docs/ PYTHON2 = python2 EPYDOCBUILD = ${PYTHON2} ./conf/epydocify.py EPYDOCOPTS = .PHONY: epydoc epydoc-html epydoc-pdf epydoc: epydoc-html epydoc-pdf epydoc-html: mkdir -p docs/apiref ${PYTHON2} -c 'import epydoc, docutils' env CFLAGS=-O0 ${PYTHON2} setup.py -q build --build-lib build/lib.py2 2> /dev/null env PYTHONPATH=$$PWD/build/lib.py2 ${EPYDOCBUILD} ${EPYDOCOPTS} --html -o docs/apiref epydoc-pdf: .PHONY: docsclean docsclean: -${RM} docs/*.info docs/*.[137] -${RM} docs/*.html docs/*.pdf -${RM} -r docs/usrman docs/apiref # ---- .PHONY: sdist sdist: srcbuild docs ${PYTHON} setup.py sdist $(opt) # ---- mpi4py-3.1.6/misc/000077500000000000000000000000001460670727200137115ustar00rootroot00000000000000mpi4py-3.1.6/misc/THANKS.txt000066400000000000000000000010161460670727200154400ustar00rootroot00000000000000====================== THANKS: MPI for Python ====================== :Author: Lisandro Dalcin :Contact: dalcinl@gmail.com I would like to thank everybody who contributed in any way, with code, hints, testing, bug reports, ideas, moral support, or even complaints... -- Lisandro Brian Granger Albert Strasheim Frank Eisenmenger Fernando Perez Matthew Turk Wonseok Shin Sam Skillman Greg Tener Amir Khosrowshahi Eilif Muller Andreas Klöckner Christoph Statz Thomas Spura Yaroslav Halchenko Aron Ahmadia Christoph Deil mpi4py-3.1.6/misc/buildtestall.bat000066400000000000000000000027141460670727200170750ustar00rootroot00000000000000@echo off setlocal ENABLEEXTENSIONS set TEST_PY=27,35,35,37,38 set TEST_CC=msvc,mingw32 set TEST_MPI=msmpi for %%A in (%TEST_PY%) do ( for %%B in (%TEST_CC%) do ( for %%C in (%TEST_MPI%) do ( echo -------------------------------------------------------------------------------- call :Main %%A %%B %%C echo -------------------------------------------------------------------------------- ))) goto :eof :Main set PYVERSION=%1 set COMPILER=%2 set MPICONF=%3 set PYTHONDIR=C:\Python%PYVERSION% set PYTHON="%PYTHONDIR%\python.exe" set MINGWDIR=C:\MinGW set GCC=%MINGWDIR%\bin\gcc.exe set MPIDIR==%ProgramFiles%\MPI if %MPICONF%==msmpi set MPIDIR=%ProgramFiles%\Microsoft MPI set MPIEXEC="%MPIDIR%\bin\mpiexec.exe" echo Py: %PYVERSION% - CC: %COMPILER% - MPI: %MPICONF% if "%PYVERSION%-%COMPILER%"=="25-msvc" goto :eof if not exist %PYTHON% goto :eof if not exist %MPIEXEC% goto :eof if not exist %GCC% if %COMPILER%==mingw32 goto :eof if %COMPILER%==mingw32 set PATH=%MINGWDIR%\bin;%PATH% set INSTALLDIR=%TEMP%\mpi4py-buildtest set PYPATHDIR=%INSTALLDIR%\lib\python %PYTHON% setup.py -q clean --all %PYTHON% setup.py -q build --mpi=%MPICONF% --compiler=%COMPILER% %PYTHON% setup.py -q install --home=%INSTALLDIR% %PYTHON% setup.py -q clean --all set PATH_ORIG=%PATH% set PATH=%MPIDIR%\bin;%PATH% %MPIEXEC% -n 2 %PYTHON% test\runtests.py -q -f --path=%PYPATHDIR% set PATH=%PATH_ORIG% rmdir /S /Q %INSTALLDIR% > NUL goto :eof mpi4py-3.1.6/misc/env-mingw32.bat000066400000000000000000000000471460670727200164560ustar00rootroot00000000000000@echo off set PATH=C:\MinGW\bin;%PATH%mpi4py-3.1.6/misc/env-mingw64.bat000066400000000000000000000001351460670727200164610ustar00rootroot00000000000000@echo off set PATH=C:\Program Files\mingw-builds\x64-4.8.1-win32-seh-rev5\mingw64\bin;%PATH%mpi4py-3.1.6/misc/env-msmpi.bat000066400000000000000000000001201460670727200163050ustar00rootroot00000000000000@echo off set MPIDIR=%ProgramFiles%\Microsoft MPI set PATH=%MPIDIR%\bin;%PATH%mpi4py-3.1.6/misc/env-msvc10.bat000066400000000000000000000000551460670727200163000ustar00rootroot00000000000000@echo off set VS90COMNTOOLS=%VS100COMNTOOLS%mpi4py-3.1.6/misc/env-msvc11.bat000066400000000000000000000000551460670727200163010ustar00rootroot00000000000000@echo off set VS90COMNTOOLS=%VS110COMNTOOLS%mpi4py-3.1.6/misc/env-py27.bat000066400000000000000000000000461460670727200157700ustar00rootroot00000000000000@echo off set PATH=C:\Python27;%PATH%mpi4py-3.1.6/misc/env-py35.bat000066400000000000000000000000461460670727200157670ustar00rootroot00000000000000@echo off set PATH=C:\Python35;%PATH%mpi4py-3.1.6/misc/env-py36.bat000066400000000000000000000000461460670727200157700ustar00rootroot00000000000000@echo off set PATH=C:\Python36;%PATH%mpi4py-3.1.6/misc/env-py37.bat000066400000000000000000000000461460670727200157710ustar00rootroot00000000000000@echo off set PATH=C:\Python37;%PATH%mpi4py-3.1.6/misc/env-py38.bat000066400000000000000000000000461460670727200157720ustar00rootroot00000000000000@echo off set PATH=C:\Python38;%PATH%mpi4py-3.1.6/misc/mca-params.conf000066400000000000000000000006371460670727200166070ustar00rootroot00000000000000# Open MPI MCA configuration file # =============================== # # * Copy this file to $HOME/.openmpi/mca-params.conf # # + Get a list of all MCA parameters with: # # $ ompi_info --param all all # ### debug options ### ------------- # mca_verbose = 1 # mpi_param_check = 1 # mpi_show_handle_leaks = 1 ### thread warnings ### --------------- mpi_warn_if_thread_multiple = 0 mpi_warn_if_progress_threads = 0 mpi4py-3.1.6/misc/mingw64-msmpi.bat000066400000000000000000000014701460670727200170210ustar00rootroot00000000000000@echo off setlocal ENABLEEXTENSIONS if %PROCESSOR_ARCHITECTURE%==x86 (set ARCH=i386) else (set ARCH=amd64) rem MinGW-w64 rem http://sourceforge.net/projects/mingw-w64/ set MinGW=%ProgramFiles%\mingw-builds\x64-4.8.1-posix-seh-rev5\mingw64 set PATH=%MinGW%\bin;%PATH% set GENDEF=gendef.exe set DLLTOOL=dlltool.exe rem HPC Pack 2012 R2 MS-MPI Redistributable Package rem http://www.microsoft.com/en-us/download/details.aspx?id=41634 set MPIDIR="%ProgramFiles%\Microsoft MPI" set LIBDIR=%MPIDIR%\lib\%ARCH% set DLLDIR=%WinDir%\System32 set LIBNAME=msmpi set DLLNAME=msmpi %GENDEF% - %DLLDIR%\%DLLNAME%.dll > %TEMP%\%LIBNAME%.def %DLLTOOL% --dllname %DLLNAME%.dll --def %TEMP%\%LIBNAME%.def --output-lib %TEMP%\lib%LIBNAME%.a move /Y %TEMP%\%LIBNAME%.def %LIBDIR% move /Y %TEMP%\lib%LIBNAME%.a %LIBDIR%mpi4py-3.1.6/misc/mingw64-python.bat000066400000000000000000000013021460670727200172070ustar00rootroot00000000000000@echo off setlocal ENABLEEXTENSIONS rem MinGW-w64 rem http://sourceforge.net/projects/mingw-w64/ set MinGW=%ProgramFiles%\mingw-builds\x64-4.8.1-posix-seh-rev5\mingw64 set PATH=%MinGW%\bin;%PATH% set GENDEF=gendef.exe set DLLTOOL=dlltool.exe rem Python 2.7 rem http://www.python.org/download/releases/2.7/ set PYVER=27 set PYDIR=C:\Python%PYVER% set LIBDIR=%PYDIR%\libs set DLLDIR=%WinDir%\System32 set LIBNAME=python%PYVER% set DLLNAME=python%PYVER% %GENDEF% - %DLLDIR%\%DLLNAME%.dll > %TEMP%\%LIBNAME%.def %DLLTOOL% --dllname %DLLNAME%.dll --def %TEMP%\%LIBNAME%.def --output-lib %TEMP%\lib%LIBNAME%.a move /Y %TEMP%\%LIBNAME%.def %LIBDIR% move /Y %TEMP%\lib%LIBNAME%.a %LIBDIR%mpi4py-3.1.6/mpi.cfg000066400000000000000000000122511460670727200142250ustar00rootroot00000000000000# Some Linux distributions have RPM's for some MPI implementations. # In such a case, headers and libraries usually are in default system # locations, and you should not need any special configuration. # If you do not have MPI distribution in a default location, please # uncomment and fill-in appropriately the following lines. Yo can use # as examples the [mpich], [openmpi], and [msmpi] sections # below the [mpi] section (wich is the one used by default). # If you specify multiple locations for includes and libraries, # please separate them with the path separator for your platform, # i.e., ':' on Unix-like systems and ';' on Windows # Default configuration # --------------------- [mpi] ## mpi_dir = /usr ## mpi_dir = /usr/local ## mpi_dir = /usr/local/mpi ## mpi_dir = /opt ## mpi_dir = /opt/mpi ## mpi_dir = = $ProgramFiles\MPI ## mpicc = %(mpi_dir)s/bin/mpicc ## mpicxx = %(mpi_dir)s/bin/mpicxx ## define_macros = ## undef_macros = ## include_dirs = %(mpi_dir)s/include ## libraries = mpi ## library_dirs = %(mpi_dir)s/lib ## runtime_library_dirs = %(mpi_dir)s/lib ## extra_compile_args = ## extra_link_args = ## extra_objects = # MPICH example # ------------- [mpich] mpi_dir = /home/devel/mpi/mpich/4.0.0 mpicc = %(mpi_dir)s/bin/mpicc mpicxx = %(mpi_dir)s/bin/mpicxx #include_dirs = %(mpi_dir)s/include #libraries = mpi #library_dirs = %(mpi_dir)s/lib #runtime_library_dirs = %(library_dirs)s # Open MPI example # ---------------- [openmpi] mpi_dir = /home/devel/mpi/openmpi/5.0.0 mpicc = %(mpi_dir)s/bin/mpicc mpicxx = %(mpi_dir)s/bin/mpicxx #include_dirs = %(mpi_dir)s/include #libraries = mpi library_dirs = %(mpi_dir)s/lib runtime_library_dirs = %(library_dirs)s # Sun MPI example # --------------- [sunmpi] #mpi_dir = /opt/SUNWhpc/HPC8.2.1/gnu mpi_dir = /opt/SUNWhpc/HPC8.1/sun mpicc = %(mpi_dir)s/bin/mpicc mpicxx = %(mpi_dir)s/bin/mpicxx #include_dirs = %(mpi_dir)s/include #libraries = mpi open-rte open-pal library_dirs = %(mpi_dir)s/lib runtime_library_dirs = %(library_dirs)s # Platform MPI example # -------------------- [pcmpi-linux-64bit] mpi_dir = /opt/ibm/platform_mpi mpicc = %(mpi_dir)s/bin/mpicc mpicxx = %(mpi_dir)s/bin/mpiCC define_macros = NON_BLOCKING_COLLECTIVES runtime_library_dirs = %(mpi_dir)s/lib/linux_amd64 [pcmpi-linux-32bit] mpi_dir = /opt/ibm/platform_mpi mpicc = %(mpi_dir)s/bin/mpicc mpicxx = %(mpi_dir)s/bin/mpiCC define_macros = NON_BLOCKING_COLLECTIVES runtime_library_dirs = %(mpi_dir)s/lib/linux_ia32 # SGI MPI example # --------------- [sgimpi] define_macros = SGI_MPI=1 mpi_dir = /usr mpicc = icc mpicxx = icpc include_dirs = %(mpi_dir)s/include libraries = mpi library_dirs = %(mpi_dir)s/lib runtime_library_dirs = %(library_dirs)s # IBM POE/MPI example # ------------------- [poempi] mpicc = mpcc_r mpicxx = mpCC_r # Microsoft MPI example # --------------------- [msmpi-32bit] mpi_dir = $ProgramFiles\Microsoft SDKs\MPI include_dirs = %(mpi_dir)s\Include libraries = msmpi library_dirs = %(mpi_dir)s\Lib\x86 [msmpi-64bit] mpi_dir = $ProgramFiles\Microsoft SDKs\MPI include_dirs = %(mpi_dir)s\Include libraries = msmpi library_dirs = %(mpi_dir)s\Lib\x64 # Intel MPI for Windows # --------------------- [intelmpi-windows-32bit] mpi_dir = $I_MPI_ROOT\ia32 include_dirs = %(mpi_dir)s\include libraries = impi library_dirs = %(mpi_dir)s\lib\release [intelmpi-windows-64bit] mpi_dir = $I_MPI_ROOT\intel64 include_dirs = %(mpi_dir)s\include libraries = impi library_dirs = %(mpi_dir)s\lib\release # SiCortex MPI example # -------------------- [sicortex] mpicc = mpicc --gnu mpicxx = mpicxx --gnu # LAM/MPI example # --------------- [lammpi] mpi_dir = /home/devel/mpi/lam-7.1.4 mpicc = %(mpi_dir)s/bin/mpicc mpicxx = %(mpi_dir)s/bin/mpic++ include_dirs = %(mpi_dir)s/include libraries = lammpio mpi lam library_dirs = %(mpi_dir)s/lib runtime_library_dirs = %(library_dirs)s # MPICH1 example # -------------- [mpich1] mpi_dir = /home/devel/mpi/mpich-1.2.7p1 mpicc = %(mpi_dir)s/bin/mpicc mpicxx = %(mpi_dir)s/bin/mpicxx include_dirs = %(mpi_dir)s/include libraries = mpich library_dirs = %(mpi_dir)s/lib/shared:%(mpi_dir)s/lib runtime_library_dirs = %(mpi_dir)s/lib/shared # No MPI # ------ [nompi] include_dirs = conf/nompi # MPIUNI (PETSc) # -------------- [mpiuni] include_dirs = conf/mpiuni:$PETSC_DIR/include:$PETSC_DIR/$PETSC_ARCH/include # Fujitsu MPI example # ------------------- [fujitsu-mpi] mpicc = mpifcc mpicxx = mpiFCC define_macros = OPENMPI_DLOPEN_LIBMPI=1 extra_compile_args = -Nclang extra_link_args = -Knolargepage mpi4py-3.1.6/pyproject.toml000066400000000000000000000001441460670727200156710ustar00rootroot00000000000000[build-system] requires = ["setuptools >= 40.9.0", "wheel"] build-backend = "setuptools.build_meta" mpi4py-3.1.6/setup.cfg000066400000000000000000000010331460670727200145740ustar00rootroot00000000000000[config] # mpicc = mpicc # mpicxx = mpicxx # mpifort = mpifort # mpif90 = mpif90 # mpif77 = mpif77 [build] # debug = 0 # compiler = mingw32 [sdist] force_manifest = 1 [nosetests] where = test [tool:pytest] testpaths = test [flake8] ignore = E305,E306,E722,F401 [pycodestyle] ignore = E305,E306,E722 exclude = */futures/_base.py [pydocstyle] match = (?!_[a-z]).*\.py add_ignore = D402 [coverage:run] parallel = True branch = True source = mpi4py omit = */mpi4py/futures/_base.py [coverage:paths] source = src/mpi4py */mpi4py mpi4py-3.1.6/setup.py000066400000000000000000000501101460670727200144650ustar00rootroot00000000000000#!/usr/bin/env python # Author: Lisandro Dalcin # Contact: dalcinl@gmail.com __doc__ = \ """ Python bindings for MPI """ import sys import os import re try: import setuptools except ImportError: setuptools = None pyver = sys.version_info[:2] if pyver < (2, 6) or (3, 0) <= pyver <= (3, 2): raise RuntimeError("Python version 2.6+ or 3.3+ required") if pyver == (2, 6) or (3, 3) <= pyver <= (3, 4): sys.stderr.write( "WARNING: Python %d.%d is not supported.\n" % pyver) if (hasattr(sys, 'pypy_version_info') and sys.pypy_version_info[:2] < (2, 0)): raise RuntimeError("PyPy version >= 2.0 required") topdir = os.path.abspath(os.path.dirname(__file__)) sys.path.insert(0, os.path.join(topdir, 'conf')) # -------------------------------------------------------------------- # Metadata # -------------------------------------------------------------------- def name(): return 'mpi4py' def version(): srcdir = os.path.join(topdir, 'src') with open(os.path.join(srcdir, 'mpi4py', '__init__.py')) as f: m = re.search(r"__version__\s*=\s*'(.*)'", f.read()) public_version = m.groups()[0] local_version = os.environ.get('MPI4PY_LOCAL_VERSION') if local_version: return '{0}+{1}'.format(public_version, local_version) else: return public_version def description(): with open(os.path.join(topdir, 'DESCRIPTION.rst')) as f: return f.read() name = name() version = version() url = 'https://github.com/mpi4py/%(name)s/' % vars() download = url + 'releases/download/%(version)s/' % vars() download = download + '%(name)s-%(version)s.tar.gz' % vars() classifiers = """ Development Status :: 5 - Production/Stable Intended Audience :: Developers Intended Audience :: Science/Research License :: OSI Approved :: BSD License Operating System :: MacOS Operating System :: MacOS :: MacOS X Operating System :: Microsoft :: Windows Operating System :: POSIX Operating System :: POSIX :: BSD Operating System :: POSIX :: Linux Operating System :: Unix Programming Language :: C Programming Language :: Cython Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.5 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3.12 Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: Implementation :: PyPy Topic :: Scientific/Engineering Topic :: Software Development :: Libraries :: Python Modules Topic :: System :: Distributed Computing """ keywords = """ scientific computing parallel computing message passing interface MPI """ platforms = """ POSIX Linux macOS FreeBSD Windows """ metadata = { 'name' : name, 'version' : version, 'description' : __doc__.strip(), 'long_description' : description(), 'url' : url, 'download_url' : download, 'classifiers' : [c for c in classifiers.split('\n') if c], 'keywords' : [k for k in keywords.split('\n') if k], 'platforms' : [p for p in platforms.split('\n') if p], 'license' : 'BSD', 'author' : 'Lisandro Dalcin', 'author_email' : 'dalcinl@gmail.com', 'maintainer' : 'Lisandro Dalcin', 'maintainer_email' : 'dalcinl@gmail.com', } metadata['provides'] = ['mpi4py'] # -------------------------------------------------------------------- # Extension modules # -------------------------------------------------------------------- def run_command(exe, args): from distutils.spawn import find_executable from distutils.util import split_quoted cmd = find_executable(exe) if not cmd: return [] if not isinstance(args, str): args = ' '.join(args) try: with os.popen(cmd + ' ' + args) as f: return split_quoted(f.read()) except: return [] linux = sys.platform.startswith('linux') solaris = sys.platform.startswith('sunos') darwin = sys.platform.startswith('darwin') if linux: def whole_archive(compiler, name, library_dirs=[]): return ['-Wl,-whole-archive', '-l' + name, '-Wl,-no-whole-archive', ] elif darwin: def darwin_linker_dirs(compiler): from distutils.util import split_quoted linker_cmd = compiler.linker_so + ['-show'] linker_cmd = run_command(linker_cmd[0], linker_cmd[1:]) library_dirs = compiler.library_dirs[:] library_dirs += [flag[2:] for flag in linker_cmd if flag.startswith('-L')] library_dirs += ['/usr/lib'] library_dirs += ['/usr/local/lib'] return library_dirs def whole_archive(compiler, name, library_dirs=[]): library_dirs = library_dirs[:] library_dirs += darwin_linker_dirs(compiler) for libdir in library_dirs: libpath = os.path.join(libdir, 'lib%s.a' % name) if os.path.isfile(libpath): return ['-force_load', libpath] return ['-l%s' % name] elif solaris: def whole_archive(compiler, name, library_dirs=[]): return ['-Wl,-zallextract', '-l' + name, '-Wl,-zdefaultextract', ] else: whole_archive = None def configure_dl(ext, config_cmd): from distutils import log log.info("checking for dlopen() availability ...") ok = config_cmd.check_header('dlfcn.h') if ok : ext.define_macros += [('HAVE_DLFCN_H', 1)] ok = config_cmd.check_library('dl') if ok: ext.libraries += ['dl'] ok = config_cmd.check_function('dlopen', libraries=['dl'], decl=1, call=1) if ok: ext.define_macros += [('HAVE_DLOPEN', 1)] def configure_mpi(ext, config_cmd): from textwrap import dedent from distutils import log from distutils.errors import DistutilsPlatformError headers = ['stdlib.h', 'mpi.h'] # log.info("checking for MPI compile and link ...") ConfigTest = dedent("""\ int main(int argc, char **argv) { (void)MPI_Init(&argc, &argv); (void)MPI_Finalize(); return 0; } """) errmsg = "Cannot %s MPI programs. Check your configuration!!!" ok = config_cmd.try_compile(ConfigTest, headers=headers) if not ok: raise DistutilsPlatformError(errmsg % "compile") ok = config_cmd.try_link(ConfigTest, headers=headers) if not ok: raise DistutilsPlatformError(errmsg % "link") # log.info("checking for missing MPI functions/symbols ...") tests = ["defined(%s)" % macro for macro in ("OPEN_MPI", "MSMPI_VER",)] tests += ["(defined(MPICH_NAME)&&(MPICH_NAME>=3))"] tests += ["(defined(MPICH_NAME)&&(MPICH_NAME==2))"] ConfigTest = dedent("""\ #if !(%s) #error "Unknown MPI implementation" #endif """) % "||".join(tests) ok = config_cmd.try_compile(ConfigTest, headers=headers) if not ok: from mpidistutils import ConfigureMPI configure = ConfigureMPI(config_cmd) results = configure.run() configure.dump(results) ext.define_macros += [('HAVE_CONFIG_H', 1)] else: for function, arglist in ( ('MPI_Type_create_f90_integer', '0,(MPI_Datatype*)0'), ('MPI_Type_create_f90_real', '0,0,(MPI_Datatype*)0'), ('MPI_Type_create_f90_complex', '0,0,(MPI_Datatype*)0'), ('MPI_Status_c2f', '(MPI_Status*)0,(MPI_Fint*)0'), ('MPI_Status_f2c', '(MPI_Fint*)0,(MPI_Status*)0'), ): ok = config_cmd.check_function_call( function, arglist, headers=headers) if not ok: macro = 'PyMPI_MISSING_' + function ext.define_macros += [(macro, 1)] for symbol, stype in ( ('MPI_LB', 'MPI_Datatype'), ('MPI_UB', 'MPI_Datatype'), ): ok = config_cmd.check_symbol( symbol, type=stype, headers=headers) if not ok: macro = 'PyMPI_MISSING_' + symbol ext.define_macros += [(macro, 1)] # if os.name == 'posix': configure_dl(ext, config_cmd) def configure_libmpe(lib, config_cmd): # mpecc = os.environ.get('MPECC') or 'mpecc' command = run_command(mpecc, '-mpilog -show') for arg in command: if arg.startswith('-L'): libdir = arg[2:] lib.library_dirs.append(libdir) lib.runtime_library_dirs.append(libdir) # log_lib = 'lmpe' dep_libs = ('pthread', 'mpe') ok = config_cmd.check_library(log_lib, lib.library_dirs) if not ok: return libraries = [] for libname in dep_libs: if config_cmd.check_library( libname, lib.library_dirs, other_libraries=libraries): libraries.insert(0, libname) if whole_archive: cc = config_cmd.compiler dirs = lib.library_dirs[:] lib.extra_link_args += whole_archive(cc, log_lib, dirs) lib.extra_link_args += ['-l' + libname for libname in libraries] else: lib.libraries += [log_lib] + libraries def configure_libvt(lib, config_cmd): # vtcc = os.environ.get('VTCC') or 'vtcc' command = run_command(vtcc, '-vt:showme') for arg in command: if arg.startswith('-L'): libdir = arg[2:] lib.library_dirs.append(libdir) lib.runtime_library_dirs.append(libdir) # modern VampirTrace if lib.name == 'vt': log_lib = 'vt-mpi' else: log_lib = lib.name ok = config_cmd.check_library(log_lib, lib.library_dirs) if ok: lib.libraries = [log_lib] if ok: return # older VampirTrace, Open MPI <= 1.4 if lib.name == 'vt-hyb': log_lib = 'vt.ompi' else: log_lib = 'vt.mpi' dep_libs = ('dl', 'z', 'otf',) ok = config_cmd.check_library(log_lib, lib.library_dirs) if not ok: return libraries = [] for libname in dep_libs: if config_cmd.check_library( libname, lib.library_dirs, other_libraries=libraries): libraries.insert(0, libname) if whole_archive: cc = config_cmd.compiler dirs = lib.library_dirs[:] lib.extra_link_args += whole_archive(cc, log_lib, dirs) lib.extra_link_args += ['-l' + libname for libname in libraries] else: lib.libraries += [log_lib] + libraries lib.define_macros.append(('LIBVT_LEGACY', 1)) if lib.name == 'vt-hyb': openmp_flag = '-fopenmp' # GCC, Intel lib.extra_compile_args.append(openmp_flag) lib.extra_link_args.append(openmp_flag) def configure_pyexe(exe, config_cmd): from distutils import sysconfig if sys.platform.startswith('win'): return if (sys.platform == 'darwin' and ('Anaconda' in sys.version or 'Continuum Analytics' in sys.version)): py_version = sysconfig.get_python_version() py_abiflags = getattr(sys, 'abiflags', '') exe.libraries += ['python' + py_version + py_abiflags] return # from distutils.util import split_quoted cfg_vars = sysconfig.get_config_vars() libraries = [] library_dirs = [] link_args = [] if pyver >= (3, 8) or not cfg_vars.get('Py_ENABLE_SHARED'): py_version = sysconfig.get_python_version() py_abiflags = getattr(sys, 'abiflags', '') libraries = ['python' + py_version + py_abiflags] if hasattr(sys, 'pypy_version_info'): py_tag = py_version[0].replace('2', '') libraries = ['pypy%s-c' % py_tag] if sys.platform == 'darwin': fwkdir = cfg_vars.get('PYTHONFRAMEWORKDIR') if (fwkdir and fwkdir != 'no-framework' and fwkdir in cfg_vars.get('LINKFORSHARED', '')): del libraries[:] for var in ('LIBDIR', 'LIBPL'): library_dirs += split_quoted(cfg_vars.get(var, '')) for var in ('LDFLAGS', 'LIBS', 'MODLIBS', 'SYSLIBS', 'LDLAST'): link_args += split_quoted(cfg_vars.get(var, '')) exe.libraries += libraries exe.library_dirs += library_dirs exe.extra_link_args += link_args def ext_modules(): modules = [] # custom dl extension module dl = dict( name='mpi4py.dl', optional=True, sources=['src/dynload.c'], depends=['src/dynload.h'], configure=configure_dl, ) if os.name == 'posix': modules.append(dl) # MPI extension module from glob import glob MPI = dict( name='mpi4py.MPI', sources=['src/MPI.c'], depends=( ['src/mpi4py.MPI.c'] + glob('src/*.h') + glob('src/lib-mpi/*.h') + glob('src/lib-mpi/config/*.h') + glob('src/lib-mpi/compat/*.h') ), configure=configure_mpi, ) modules.append(MPI) # return modules def libraries(): # MPE logging pmpi_mpe = dict( name='mpe', kind='dylib', optional=True, package='mpi4py', dest_dir='lib-pmpi', sources=['src/lib-pmpi/mpe.c'], configure=configure_libmpe, ) # VampirTrace logging pmpi_vt = dict( name='vt', kind='dylib', optional=True, package='mpi4py', dest_dir='lib-pmpi', sources=['src/lib-pmpi/vt.c'], configure=configure_libvt, ) pmpi_vt_mpi = dict( name='vt-mpi', kind='dylib', optional=True, package='mpi4py', dest_dir='lib-pmpi', sources=['src/lib-pmpi/vt-mpi.c'], configure=configure_libvt, ) pmpi_vt_hyb = dict( name='vt-hyb', kind='dylib', optional=True, package='mpi4py', dest_dir='lib-pmpi', sources=['src/lib-pmpi/vt-hyb.c'], configure=configure_libvt, ) # return [ pmpi_mpe, pmpi_vt, pmpi_vt_mpi, pmpi_vt_hyb, ] def executables(): # MPI-enabled Python interpreter pyexe = dict(name='python-mpi', optional=True, package='mpi4py', dest_dir='bin', sources=['src/python.c'], configure=configure_pyexe, ) # if hasattr(sys, 'pypy_version_info'): return [] return [pyexe] # -------------------------------------------------------------------- # Setup # -------------------------------------------------------------------- from mpidistutils import setup from mpidistutils import Extension as Ext from mpidistutils import Library as Lib from mpidistutils import Executable as Exe CYTHON = '0.27' def run_setup(): """ Call setup(*args, **kargs) """ setup_args = metadata.copy() if setuptools: setup_args['zip_safe'] = False setup_args['setup_requires'] = [] setup_args['python_requires'] = """ >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.* """.strip() if setuptools and pyver < (3, 0): setup_args['setup_requires'] += ['3to2'] if setuptools and not os.getenv('CONDA_BUILD'): src = os.path.join('src', 'mpi4py.MPI.c') has_src = os.path.exists(os.path.join(topdir, src)) has_git = os.path.isdir(os.path.join(topdir, '.git')) has_hg = os.path.isdir(os.path.join(topdir, '.hg')) if not has_src or has_git or has_hg: setup_args['setup_requires'] += ['Cython>='+CYTHON+',<3.0.0'] # setup( packages = [ 'mpi4py', 'mpi4py.futures', 'mpi4py.util', ], package_data = { 'mpi4py' : [ '*.pxd', 'include/mpi4py/*.h', 'include/mpi4py/*.i', 'include/mpi4py/*.pxi', ], '' : [ 'py.typed', '*.pyi', ], }, package_dir = {'' : 'src'}, ext_modules = [Ext(**ext) for ext in ext_modules()], executables = [Exe(**exe) for exe in executables()], libraries = [Lib(**lib) for lib in libraries() ], **setup_args ) def chk_cython(VERSION): from distutils import log from distutils.version import LooseVersion from distutils.version import StrictVersion warn = lambda msg='': sys.stderr.write(msg+'\n') # try: import Cython except ImportError: warn("*"*80) warn() warn(" You need to generate C source files with Cython!!") warn(" Download and install Cython ") warn() warn("*"*80) return False # try: CYTHON_VERSION = Cython.__version__ except AttributeError: from Cython.Compiler.Version import version as CYTHON_VERSION REQUIRED = VERSION m = re.match(r"(\d+\.\d+(?:\.\d+)?).*", CYTHON_VERSION) if m: Version = StrictVersion AVAILABLE = m.groups()[0] else: Version = LooseVersion AVAILABLE = CYTHON_VERSION if (REQUIRED is not None and Version(AVAILABLE) < Version(REQUIRED)): warn("*"*80) warn() warn(" You need to install Cython %s (you have version %s)" % (REQUIRED, CYTHON_VERSION)) warn(" Download and install Cython ") warn() warn("*"*80) return False # log.info("using Cython version %s" % CYTHON_VERSION) return True def run_cython(source, target=None, depends=(), includes=(), destdir_c=None, destdir_h=None, wdir=None, force=False, VERSION=None): from glob import glob from distutils import log from distutils import dep_util from distutils.errors import DistutilsError if target is None: target = os.path.splitext(source)[0]+'.c' cwd = os.getcwd() try: if wdir: os.chdir(wdir) alldeps = [source] for dep in depends: alldeps += glob(dep) if not (force or dep_util.newer_group(alldeps, target)): log.debug("skipping '%s' -> '%s' (up-to-date)", source, target) return finally: os.chdir(cwd) if not chk_cython(VERSION): raise DistutilsError("requires Cython>=%s" % VERSION) log.info("cythonizing '%s' -> '%s'", source, target) from cythonize import cythonize err = cythonize(source, target, includes=includes, destdir_c=destdir_c, destdir_h=destdir_h, wdir=wdir) if err: raise DistutilsError( "Cython failure: '%s' -> '%s'" % (source, target)) def build_sources(cmd): from distutils.errors import DistutilsError has_src = os.path.exists(os.path.join( topdir, 'src', 'mpi4py.MPI.c')) has_vcs = (os.path.isdir(os.path.join(topdir, '.git')) or os.path.isdir(os.path.join(topdir, '.hg' ))) if (has_src and not has_vcs and not cmd.force): return # mpi4py.MPI source = 'mpi4py/MPI.pyx' target = 'mpi4py.MPI.c' depends = [ 'mpi4py/*.pyx', 'mpi4py/*.pxd', 'mpi4py/MPI/*.pyx', 'mpi4py/MPI/*.pxd', 'mpi4py/MPI/*.pxi', ] destdir_h = os.path.join('mpi4py', 'include', 'mpi4py') run_cython(source, target, depends, destdir_h=destdir_h, wdir='src', force=cmd.force, VERSION=CYTHON) from mpidistutils import build_src build_src.run = build_sources def run_testsuite(cmd): from distutils.errors import DistutilsError sys.path.insert(0, 'test') try: from runtests import main finally: del sys.path[0] if cmd.dry_run: return args = cmd.args[:] or [] if cmd.verbose < 1: args.insert(0,'-q') if cmd.verbose > 1: args.insert(0,'-v') err = main(args) if err: raise DistutilsError("test") from mpidistutils import test test.run = run_testsuite def main(): run_setup() if __name__ == '__main__': main() # -------------------------------------------------------------------- mpi4py-3.1.6/src/000077500000000000000000000000001460670727200135455ustar00rootroot00000000000000mpi4py-3.1.6/src/MPI.c000066400000000000000000000001721460670727200143360ustar00rootroot00000000000000#define MPICH_SKIP_MPICXX 1 #define OMPI_SKIP_MPICXX 1 #define OMPI_WANT_MPI_INTERFACE_WARNING 0 #include "mpi4py.MPI.c" mpi4py-3.1.6/src/atimport.h000066400000000000000000000022771460670727200155650ustar00rootroot00000000000000/* Author: Lisandro Dalcin */ /* Contact: dalcinl@gmail.com */ /* ------------------------------------------------------------------------- */ #include "Python.h" #include "mpi.h" /* ------------------------------------------------------------------------- */ #include "lib-mpi/config.h" #include "lib-mpi/missing.h" #include "lib-mpi/fallback.h" #include "lib-mpi/compat.h" #include "pympivendor.h" #include "pympistatus.h" #include "pympicommctx.h" /* ------------------------------------------------------------------------- */ #include "pycompat.h" #ifdef PYPY_VERSION #define PyMPI_RUNTIME_PYPY 1 #define PyMPI_RUNTIME_CPYTHON 0 #else #define PyMPI_RUNTIME_PYPY 0 #define PyMPI_RUNTIME_CPYTHON 1 #endif /* ------------------------------------------------------------------------- */ #if !defined(PyMPI_USE_MATCHED_RECV) #if defined(PyMPI_HAVE_MPI_Mprobe) && \ defined(PyMPI_HAVE_MPI_Mrecv) && \ MPI_VERSION >= 3 #define PyMPI_USE_MATCHED_RECV 1 #else #define PyMPI_USE_MATCHED_RECV 0 #endif #endif /* ------------------------------------------------------------------------- */ /* Local variables: c-basic-offset: 2 indent-tabs-mode: nil End: */ mpi4py-3.1.6/src/dynload.c000066400000000000000000000077121460670727200153520ustar00rootroot00000000000000/* Author: Lisandro Dalcin * Contact: dalcinl@gmail.com */ #include "Python.h" #include "dynload.h" static PyObject * dl_dlopen(PyObject *self, PyObject *args) { void *handle = NULL; char *filename = NULL; int mode = 0; (void)self; /* unused */ if (!PyArg_ParseTuple(args, (char *)"zi:dlopen", &filename, &mode)) return NULL; handle = dlopen(filename, mode); return PyLong_FromVoidPtr(handle); } static PyObject * dl_dlsym(PyObject *self, PyObject *args) { PyObject *arg0 = NULL; void *handle = NULL; char *symbol = NULL; void *symval = NULL; (void)self; /* unused */ if (!PyArg_ParseTuple(args, (char *)"Os:dlsym", &arg0, &symbol)) return NULL; #ifdef RTLD_DEFAULT handle = (void *)RTLD_DEFAULT; #endif if (arg0 != Py_None) { handle = PyLong_AsVoidPtr(arg0); if (PyErr_Occurred()) return NULL; } symval = dlsym(handle, symbol); return PyLong_FromVoidPtr(symval); } static PyObject * dl_dlclose(PyObject *self, PyObject *arg0) { int err = 0; void *handle = NULL; (void)self; /* unused */ if (arg0 != Py_None) { handle = PyLong_AsVoidPtr(arg0); if (PyErr_Occurred()) return NULL; } if (handle) err = dlclose(handle); return Py_BuildValue((char *)"i", err); } static PyObject * dl_dlerror(PyObject *self, PyObject *args) { char *errmsg = NULL; (void)self; (void)args; /* unused */ errmsg = dlerror(); return Py_BuildValue((char *)"z", errmsg); } static PyMethodDef dl_methods[] = { { (char *)"dlopen", dl_dlopen, METH_VARARGS, NULL }, { (char *)"dlsym", dl_dlsym, METH_VARARGS, NULL }, { (char *)"dlclose", dl_dlclose, METH_O, NULL }, { (char *)"dlerror", dl_dlerror, METH_NOARGS, NULL }, { (char *)NULL, NULL, 0, NULL } /* sentinel */ }; PyDoc_STRVAR(dl_doc, "POSIX dynamic linking loader"); #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef dl_module = { PyModuleDef_HEAD_INIT, /* m_base */ (char *)"dl", /* m_name */ dl_doc, /* m_doc */ -1, /* m_size */ dl_methods, /* m_methods */ NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #if !defined(PyModule_AddIntMacro) #define PyModule_AddIntMacro(m, c) \ PyModule_AddIntConstant(m, (char *)#c, c) #endif #define PyModule_AddPtrMacro(m, c) \ PyModule_AddObject(m, (char *)#c, PyLong_FromVoidPtr((void *)c)) #if PY_MAJOR_VERSION >= 3 PyMODINIT_FUNC PyInit_dl(void); PyMODINIT_FUNC PyInit_dl(void) #else PyMODINIT_FUNC initdl(void); PyMODINIT_FUNC initdl(void) #endif { PyObject *m = NULL; #if PY_MAJOR_VERSION >= 3 m = PyModule_Create(&dl_module); #else m = Py_InitModule3((char *)"dl", dl_methods, (char *)dl_doc); #endif if (!m) goto bad; if (PyModule_AddIntMacro(m, RTLD_LAZY ) < 0) goto bad; if (PyModule_AddIntMacro(m, RTLD_NOW ) < 0) goto bad; if (PyModule_AddIntMacro(m, RTLD_LOCAL ) < 0) goto bad; if (PyModule_AddIntMacro(m, RTLD_GLOBAL ) < 0) goto bad; #ifdef RTLD_NOLOAD if (PyModule_AddIntMacro(m, RTLD_NOLOAD ) < 0) goto bad; #endif #ifdef RTLD_NODELETE if (PyModule_AddIntMacro(m, RTLD_NODELETE ) < 0) goto bad; #endif #ifdef RTLD_DEEPBIND if (PyModule_AddIntMacro(m, RTLD_DEEPBIND ) < 0) goto bad; #endif #ifdef RTLD_FIRST if (PyModule_AddIntMacro(m, RTLD_FIRST ) < 0) goto bad; #endif #ifdef RTLD_DEFAULT if (PyModule_AddPtrMacro(m, RTLD_DEFAULT) < 0) goto bad; #endif #ifdef RTLD_NEXT if (PyModule_AddPtrMacro(m, RTLD_NEXT) < 0) goto bad; #endif #ifdef RTLD_SELF if (PyModule_AddPtrMacro(m, RTLD_SELF) < 0) goto bad; #endif #ifdef RTLD_MAIN_ONLY if (PyModule_AddPtrMacro(m, RTLD_MAIN_ONLY) < 0) goto bad; #endif finally: #if PY_MAJOR_VERSION >= 3 return m; #else return; #endif bad: Py_XDECREF(m); m = NULL; goto finally; } /* Local variables: c-basic-offset: 2 indent-tabs-mode: nil End: */ mpi4py-3.1.6/src/dynload.h000066400000000000000000000032021460670727200153450ustar00rootroot00000000000000/* Author: Lisandro Dalcin * Contact: dalcinl@gmail.com */ #ifndef PyMPI_DYNLOAD_H #define PyMPI_DYNLOAD_H #if HAVE_DLFCN_H #include #else #if defined(__linux) || defined(__linux__) #define RTLD_LAZY 0x00001 #define RTLD_NOW 0x00002 #define RTLD_LOCAL 0x00000 #define RTLD_GLOBAL 0x00100 #define RTLD_NOLOAD 0x00004 #define RTLD_NODELETE 0x01000 #define RTLD_DEEPBIND 0x00008 #elif defined(__sun) || defined(__sun__) #define RTLD_LAZY 0x00001 #define RTLD_NOW 0x00002 #define RTLD_LOCAL 0x00000 #define RTLD_GLOBAL 0x00100 #define RTLD_NOLOAD 0x00004 #define RTLD_NODELETE 0x01000 #define RTLD_FIRST 0x02000 #elif defined(__APPLE__) #define RTLD_LAZY 0x1 #define RTLD_NOW 0x2 #define RTLD_LOCAL 0x4 #define RTLD_GLOBAL 0x8 #define RTLD_NOLOAD 0x10 #define RTLD_NODELETE 0x80 #define RTLD_FIRST 0x100 #elif defined(__CYGWIN__) #define RTLD_LAZY 1 #define RTLD_NOW 2 #define RTLD_LOCAL 0 #define RTLD_GLOBAL 4 #endif #if defined(__cplusplus) extern "C" { #endif extern void *dlopen(const char *, int); extern void *dlsym(void *, const char *); extern int dlclose(void *); extern char *dlerror(void); #if defined(__cplusplus) } #endif #endif #ifndef RTLD_LAZY #define RTLD_LAZY 1 #endif #ifndef RTLD_NOW #define RTLD_NOW RTLD_LAZY #endif #ifndef RTLD_LOCAL #define RTLD_LOCAL 0 #endif #ifndef RTLD_GLOBAL #define RTLD_GLOBAL RTLD_LOCAL #endif #endif /* !PyMPI_DYNLOAD_H */ /* Local variables: c-basic-offset: 2 indent-tabs-mode: nil End: */ mpi4py-3.1.6/src/lib-mpi/000077500000000000000000000000001460670727200150765ustar00rootroot00000000000000mpi4py-3.1.6/src/lib-mpi/compat.h000066400000000000000000000006631460670727200165370ustar00rootroot00000000000000#if defined(MSMPI_VER) #include "compat/msmpi.h" #elif defined(MPICH_NAME) && (MPICH_NAME >= 3) #include "compat/mpich.h" #elif defined(MPICH_NAME) && (MPICH_NAME == 2) #include "compat/mpich2.h" #elif defined(MPICH_NAME) && (MPICH_NAME == 1) #include "compat/mpich1.h" #elif defined(OPEN_MPI) #include "compat/openmpi.h" #elif defined(PLATFORM_MPI) #include "compat/pcmpi.h" #elif defined(LAM_MPI) #include "compat/lammpi.h" #endif mpi4py-3.1.6/src/lib-mpi/compat/000077500000000000000000000000001460670727200163615ustar00rootroot00000000000000mpi4py-3.1.6/src/lib-mpi/compat/lammpi.h000066400000000000000000000271221460670727200200150ustar00rootroot00000000000000#ifndef PyMPI_COMPAT_LAMMPI_H #define PyMPI_COMPAT_LAMMPI_H /* ---------------------------------------------------------------- */ static int PyMPI_LAMMPI_MPI_Info_free(MPI_Info *info) { if (info == 0) return MPI_ERR_ARG; if (*info == MPI_INFO_NULL) return MPI_ERR_ARG; return MPI_Info_free(info); } #undef MPI_Info_free #define MPI_Info_free PyMPI_LAMMPI_MPI_Info_free /* ---------------------------------------------------------------- */ static int PyMPI_LAMMPI_MPI_Cancel(MPI_Request *request) { int ierr = MPI_SUCCESS; ierr = MPI_Cancel(request); if (ierr == MPI_ERR_ARG) { if (request != 0 && *request == MPI_REQUEST_NULL) ierr = MPI_ERR_REQUEST; } return ierr; } #undef MPI_Cancel #define MPI_Cancel PyMPI_LAMMPI_MPI_Cancel static int PyMPI_LAMMPI_MPI_Comm_disconnect(MPI_Comm *comm) { if (comm == 0) return MPI_ERR_ARG; if (*comm == MPI_COMM_NULL) return MPI_ERR_COMM; if (*comm == MPI_COMM_SELF) return MPI_ERR_COMM; if (*comm == MPI_COMM_WORLD) return MPI_ERR_COMM; return MPI_Comm_disconnect(comm); } #undef MPI_Comm_disconnect #define MPI_Comm_disconnect PyMPI_LAMMPI_MPI_Comm_disconnect /* ---------------------------------------------------------------- */ #if defined(__cplusplus) extern "C" { #endif struct _errhdl { void (*eh_func)(void); int eh_refcount; int eh_f77handle; int eh_flags; }; #if defined(__cplusplus) } #endif static int PyMPI_LAMMPI_Errhandler_free(MPI_Errhandler *errhandler) { if (errhandler == 0) return MPI_ERR_ARG; if (*errhandler == MPI_ERRORS_RETURN || *errhandler == MPI_ERRORS_ARE_FATAL) { struct _errhdl *eh = (struct _errhdl *) (*errhandler); eh->eh_refcount--; *errhandler = MPI_ERRHANDLER_NULL; return MPI_SUCCESS; } else { return MPI_Errhandler_free(errhandler); } } #undef MPI_Errhandler_free #define MPI_Errhandler_free PyMPI_LAMMPI_Errhandler_free /* -- */ static int PyMPI_LAMMPI_MPI_Comm_get_errhandler(MPI_Comm comm, MPI_Errhandler *errhandler) { int ierr = MPI_SUCCESS; if (comm == MPI_COMM_NULL) return MPI_ERR_COMM; if (errhandler == 0) return MPI_ERR_ARG; /* get error handler */ ierr = MPI_Errhandler_get(comm, errhandler); if (ierr != MPI_SUCCESS) return ierr; return MPI_SUCCESS; } #undef MPI_Errhandler_get #define MPI_Errhandler_get PyMPI_LAMMPI_MPI_Comm_get_errhandler #undef MPI_Comm_get_errhandler #define MPI_Comm_get_errhandler PyMPI_LAMMPI_MPI_Comm_get_errhandler static int PyMPI_LAMMPI_MPI_Comm_set_errhandler(MPI_Comm comm, MPI_Errhandler errhandler) { int ierr = MPI_SUCCESS, ierr2 = MPI_SUCCESS; MPI_Errhandler previous = MPI_ERRHANDLER_NULL; if (comm == MPI_COMM_NULL) return MPI_ERR_COMM; if (errhandler == MPI_ERRHANDLER_NULL) return MPI_ERR_ARG; /* get previous error handler*/ ierr2 = MPI_Errhandler_get(comm, &previous); if (ierr2 != MPI_SUCCESS) return ierr2; /* increment reference counter */ if (errhandler != MPI_ERRHANDLER_NULL) { struct _errhdl *eh = (struct _errhdl *) (errhandler); eh->eh_refcount++; } /* set error handler */ ierr = MPI_Errhandler_set(comm, errhandler); /* decrement reference counter */ if (errhandler != MPI_ERRHANDLER_NULL) { struct _errhdl *eh = (struct _errhdl *) (errhandler); eh->eh_refcount--; } /* free previous error handler*/ if (previous != MPI_ERRHANDLER_NULL) { ierr2 = MPI_Errhandler_free(&previous); } if (ierr != MPI_SUCCESS) return ierr; if (ierr2 != MPI_SUCCESS) return ierr2; return MPI_SUCCESS; } #undef MPI_Errhandler_set #define MPI_Errhandler_set PyMPI_LAMMPI_MPI_Comm_set_errhandler #undef MPI_Comm_set_errhandler #define MPI_Comm_set_errhandler PyMPI_LAMMPI_MPI_Comm_set_errhandler /* -- */ static int PyMPI_LAMMPI_MPI_Win_get_errhandler(MPI_Win win, MPI_Errhandler *errhandler) { int ierr = MPI_SUCCESS; if (win == MPI_WIN_NULL) return MPI_ERR_WIN; if (errhandler == 0) return MPI_ERR_ARG; /* get error handler */ ierr = MPI_Win_get_errhandler(win, errhandler); if (ierr != MPI_SUCCESS) return ierr; /* increment reference counter */ if (*errhandler != MPI_ERRHANDLER_NULL) { struct _errhdl *eh = (struct _errhdl *) (*errhandler); eh->eh_refcount++; } return MPI_SUCCESS; } #undef MPI_Win_get_errhandler #define MPI_Win_get_errhandler PyMPI_LAMMPI_MPI_Win_get_errhandler static int PyMPI_LAMMPI_MPI_Win_set_errhandler(MPI_Win win, MPI_Errhandler errhandler) { int ierr = MPI_SUCCESS, ierr2 = MPI_SUCCESS; MPI_Errhandler previous = MPI_ERRHANDLER_NULL; if (win == MPI_WIN_NULL) return MPI_ERR_WIN; if (errhandler == MPI_ERRHANDLER_NULL) return MPI_ERR_ARG; /* get previous error handler*/ ierr2 = MPI_Win_get_errhandler(win, &previous); if (ierr2 != MPI_SUCCESS) return ierr2; /* increment reference counter */ if (errhandler != MPI_ERRHANDLER_NULL) { struct _errhdl *eh = (struct _errhdl *) (errhandler); eh->eh_refcount++; } /* set error handler */ ierr = MPI_Win_set_errhandler(win, errhandler); /* decrement reference counter */ if (errhandler != MPI_ERRHANDLER_NULL) { struct _errhdl *eh = (struct _errhdl *) (errhandler); eh->eh_refcount--; } /* free previous error handler*/ if (previous != MPI_ERRHANDLER_NULL) { ierr2 = MPI_Errhandler_free(&previous); } if (ierr != MPI_SUCCESS) return ierr; if (ierr2 != MPI_SUCCESS) return ierr2; return MPI_SUCCESS; } #undef MPI_Win_set_errhandler #define MPI_Win_set_errhandler PyMPI_LAMMPI_MPI_Win_set_errhandler static int PyMPI_LAMMPI_MPI_Win_create(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, MPI_Win *win) { int ierr = MPI_SUCCESS; MPI_Errhandler errhandler = MPI_ERRHANDLER_NULL; ierr = MPI_Win_create(base, size, disp_unit, info, comm, win); if (ierr != MPI_SUCCESS) return ierr; ierr = MPI_Win_get_errhandler(*win, &errhandler); if (ierr != MPI_SUCCESS) return ierr; return MPI_SUCCESS; } #undef MPI_Win_create #define MPI_Win_create PyMPI_LAMMPI_MPI_Win_create static int PyMPI_LAMMPI_MPI_Win_free(MPI_Win *win) { int ierr = MPI_SUCCESS, ierr2 = MPI_SUCCESS; MPI_Errhandler errhandler = MPI_ERRHANDLER_NULL; if (win != 0 && *win != MPI_WIN_NULL ) { MPI_Errhandler previous; ierr2 = MPI_Win_get_errhandler(*win, &previous); if (ierr2 != MPI_SUCCESS) return ierr2; errhandler = previous; if (previous != MPI_ERRHANDLER_NULL) { ierr2 = MPI_Errhandler_free(&previous); if (ierr2 != MPI_SUCCESS) return ierr2; } } ierr = MPI_Win_free(win); if (errhandler != MPI_ERRHANDLER_NULL) { ierr2 = MPI_Errhandler_free(&errhandler); if (ierr2 != MPI_SUCCESS) return ierr2; } if (ierr != MPI_SUCCESS) return ierr; return MPI_SUCCESS; } #undef MPI_Win_free #define MPI_Win_free PyMPI_LAMMPI_MPI_Win_free /* -- */ #if defined(ROMIO_VERSION) #if defined(__cplusplus) extern "C" { #endif #define ADIOI_FILE_COOKIE 2487376 #define FDTYPE int #define ADIO_Offset MPI_Offset #define ADIOI_Fns struct ADIOI_Fns_struct #define ADIOI_Hints struct ADIOI_Hints_struct extern MPI_Errhandler ADIOI_DFLT_ERR_HANDLER; struct ADIOI_FileD { int cookie; /* for error checking */ FDTYPE fd_sys; /* system file descriptor */ #ifdef XFS int fd_direct; /* On XFS, this is used for direct I/O; fd_sys is used for buffered I/O */ int direct_read; /* flag; 1 means use direct read */ int direct_write; /* flag; 1 means use direct write */ /* direct I/O attributes */ unsigned d_mem; /* data buffer memory alignment */ unsigned d_miniosz; /* min xfer size, xfer size multiple, and file seek offset alignment */ unsigned d_maxiosz; /* max xfer size */ #endif ADIO_Offset fp_ind; /* individual file pointer in MPI-IO (in bytes)*/ ADIO_Offset fp_sys_posn; /* current location of the system file-pointer in bytes */ ADIOI_Fns *fns; /* struct of I/O functions to use */ MPI_Comm comm; /* communicator indicating who called open */ char *filename; int file_system; /* type of file system */ int access_mode; ADIO_Offset disp; /* reqd. for MPI-IO */ MPI_Datatype etype; /* reqd. for MPI-IO */ MPI_Datatype filetype; /* reqd. for MPI-IO */ int etype_size; /* in bytes */ ADIOI_Hints *hints; /* structure containing fs-indep. info values */ MPI_Info info; int split_coll_count; /* count of outstanding split coll. ops. */ char *shared_fp_fname; /* name of file containing shared file pointer */ struct ADIOI_FileD *shared_fp_fd; /* file handle of file containing shared fp */ int async_count; /* count of outstanding nonblocking operations */ int perm; int atomicity; /* true=atomic, false=nonatomic */ int iomode; /* reqd. to implement Intel PFS modes */ MPI_Errhandler err_handler; }; #if defined(__cplusplus) } #endif static int PyMPI_LAMMPI_MPI_File_get_errhandler(MPI_File file, MPI_Errhandler *errhandler) { /* check arguments */ if (file != MPI_FILE_NULL) { struct ADIOI_FileD * fh = (struct ADIOI_FileD *) file; if (fh->cookie != ADIOI_FILE_COOKIE) return MPI_ERR_ARG; } if (errhandler == 0) return MPI_ERR_ARG; /* get error handler */ if (file == MPI_FILE_NULL) { *errhandler = ADIOI_DFLT_ERR_HANDLER; } else { struct ADIOI_FileD * fh = (struct ADIOI_FileD *) file; *errhandler = fh->err_handler; } /* increment reference counter */ if (*errhandler != MPI_ERRHANDLER_NULL) { struct _errhdl *eh = (struct _errhdl *) (*errhandler); eh->eh_refcount++; } return MPI_SUCCESS; } #undef MPI_File_get_errhandler #define MPI_File_get_errhandler PyMPI_LAMMPI_MPI_File_get_errhandler static int PyMPI_LAMMPI_MPI_File_set_errhandler(MPI_File file, MPI_Errhandler errhandler) { /* check arguments */ if (file != MPI_FILE_NULL) { struct ADIOI_FileD * fh = (struct ADIOI_FileD *) file; if (fh->cookie != ADIOI_FILE_COOKIE) return MPI_ERR_ARG; } if (errhandler == MPI_ERRHANDLER_NULL) return MPI_ERR_ARG; if (errhandler != MPI_ERRORS_RETURN && errhandler != MPI_ERRORS_ARE_FATAL) return MPI_ERR_ARG; /* increment reference counter */ if (errhandler != MPI_ERRHANDLER_NULL ) { struct _errhdl *eh = (struct _errhdl *) errhandler; eh->eh_refcount++; } /* set error handler */ if (file == MPI_FILE_NULL) { MPI_Errhandler tmp = ADIOI_DFLT_ERR_HANDLER; ADIOI_DFLT_ERR_HANDLER = errhandler; errhandler = tmp; } else { struct ADIOI_FileD *fh = (struct ADIOI_FileD *) file; MPI_Errhandler tmp = fh->err_handler; fh->err_handler = errhandler; errhandler = tmp; } /* decrement reference counter */ if (errhandler != MPI_ERRHANDLER_NULL ) { struct _errhdl *eh = (struct _errhdl *) errhandler; eh->eh_refcount--; } return MPI_SUCCESS; } #undef MPI_File_set_errhandler #define MPI_File_set_errhandler PyMPI_LAMMPI_MPI_File_set_errhandler #endif /* ---------------------------------------------------------------- */ #endif /* !PyMPI_COMPAT_LAMMPI_H */ /* Local variables: c-basic-offset: 2 indent-tabs-mode: nil End: */ mpi4py-3.1.6/src/lib-mpi/compat/mpich.h000066400000000000000000000131331460670727200176330ustar00rootroot00000000000000#ifndef PyMPI_COMPAT_MPICH_H #define PyMPI_COMPAT_MPICH_H #if defined(MPICH_NUMVERSION) #if (MPICH_NUMVERSION >= 30400000 && MPICH_NUMVERSION < 40000000) static int PyMPI_MPICH_MPI_Win_get_attr(MPI_Win win, int keyval, void *attrval, int *flag) { int ierr; static MPI_Aint zero[1] = {0}; zero[0] = 0; ierr = MPI_Win_get_attr(win, keyval, attrval, flag); if (ierr) return ierr; if (keyval == MPI_WIN_SIZE && flag && *flag && attrval) if (**((MPI_Aint**)attrval) == -1) *((void**)attrval) = zero; return ierr; } #define MPI_Win_get_attr PyMPI_MPICH_MPI_Win_get_attr #endif #if (MPICH_NUMVERSION == 30101300) static int PyMPI_MPICH_MPI_Status_c2f(const MPI_Status *c_status, MPI_Fint *f_status) { if (c_status == MPI_STATUS_IGNORE || c_status == MPI_STATUSES_IGNORE) return MPI_ERR_OTHER; *(MPI_Status *)f_status = *c_status; return MPI_SUCCESS; } #define MPI_Status_c2f PyMPI_MPICH_MPI_Status_c2f #endif #if (MPICH_NUMVERSION < 30100301) static int PyMPI_MPICH_MPI_Add_error_class(int *errorclass) { int ierr; char errstr[1] = {0}; ierr = MPI_Add_error_class(errorclass); if (ierr) return ierr; return MPI_Add_error_string(*errorclass,errstr); } #undef MPI_Add_error_class #define MPI_Add_error_class PyMPI_MPICH_MPI_Add_error_class static int PyMPI_MPICH_MPI_Add_error_code(int errorclass, int *errorcode) { int ierr; char errstr[1] = {0}; ierr = MPI_Add_error_code(errorclass,errorcode); if (ierr) return ierr; return MPI_Add_error_string(*errorcode,errstr); } #undef MPI_Add_error_code #define MPI_Add_error_code PyMPI_MPICH_MPI_Add_error_code #endif #if (MPICH_NUMVERSION < 30100000) static int PyMPI_MPICH_MPI_Type_size_x(MPI_Datatype datatype, MPI_Count *size) { int ierr = MPI_Type_commit(&datatype); if (ierr) return ierr; return MPI_Type_size_x(datatype,size); } #undef MPI_Type_size_x #define MPI_Type_size_x PyMPI_MPICH_MPI_Type_size_x static int PyMPI_MPICH_MPI_Type_get_extent_x(MPI_Datatype datatype, MPI_Count *lb, MPI_Count *extent) { int ierr = MPI_Type_commit(&datatype); if (ierr) return ierr; return MPI_Type_get_extent_x(datatype,lb,extent); } #undef MPI_Type_get_extent_x #define MPI_Type_get_extent_x PyMPI_MPICH_MPI_Type_get_extent_x static int PyMPI_MPICH_MPI_Type_get_true_extent_x(MPI_Datatype datatype, MPI_Count *lb, MPI_Count *extent) { int ierr = MPI_Type_commit(&datatype); if (ierr) return ierr; return MPI_Type_get_true_extent_x(datatype,lb,extent); } #undef MPI_Type_get_true_extent_x #define MPI_Type_get_true_extent_x PyMPI_MPICH_MPI_Type_get_true_extent_x static int PyMPI_MPICH_MPI_Get_accumulate(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr, int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win) { double origin_buf, result_buf; if (!origin_addr && !origin_count) origin_addr = (const void *)&origin_buf; if (!result_addr && !result_count) result_addr = (void *)&result_buf; return MPI_Get_accumulate(origin_addr, origin_count, origin_datatype, result_addr, result_count, result_datatype, target_rank, target_disp, target_count, target_datatype, op, win); } #undef MPI_Get_accumulate #define MPI_Get_accumulate PyMPI_MPICH_MPI_Get_accumulate static int PyMPI_MPICH_MPI_Rget_accumulate(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr, int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request *request) { double origin_buf, result_buf; if (!origin_addr && !origin_count) origin_addr = (const void *)&origin_buf; if (!result_addr && !result_count) result_addr = (void *)&result_buf; return MPI_Rget_accumulate(origin_addr, origin_count, origin_datatype, result_addr, result_count, result_datatype, target_rank, target_disp, target_count, target_datatype, op, win, request); } #undef MPI_Rget_accumulate #define MPI_Rget_accumulate PyMPI_MPICH_MPI_Rget_accumulate #endif #endif /* !MPICH_NUMVERSION */ #endif /* !PyMPI_COMPAT_MPICH_H */ mpi4py-3.1.6/src/lib-mpi/compat/mpich1.h000066400000000000000000000136161460670727200177220ustar00rootroot00000000000000#ifndef PyMPI_COMPAT_MPICH1_H #define PyMPI_COMPAT_MPICH1_H /* ---------------------------------------------------------------- */ static int PyMPI_MPICH1_argc = 0; static char *PyMPI_MPICH1_argv[1] = {(char*)0}; static void PyMPI_MPICH1_FixArgs(int **argc, char ****argv) { if (argc[0] && argv[0]) return; argc[0] = (int *) &PyMPI_MPICH1_argc; argv[0] = (char ***) &PyMPI_MPICH1_argv; } static int PyMPI_MPICH1_MPI_Init(int *argc, char ***argv) { PyMPI_MPICH1_FixArgs(&argc, &argv); return MPI_Init(argc, argv); } #undef MPI_Init #define MPI_Init PyMPI_MPICH1_MPI_Init static int PyMPI_MPICH1_MPI_Init_thread(int *argc, char ***argv, int required, int *provided) { PyMPI_MPICH1_FixArgs(&argc, &argv); return MPI_Init_thread(argc, argv, required, provided); } #undef MPI_Init_thread #define MPI_Init_thread PyMPI_MPICH1_MPI_Init_thread /* ---------------------------------------------------------------- */ #undef MPI_SIGNED_CHAR #define MPI_SIGNED_CHAR MPI_CHAR /* ---------------------------------------------------------------- */ static int PyMPI_MPICH1_MPI_Status_set_elements(MPI_Status *status, MPI_Datatype datatype, int count) { if (datatype == MPI_DATATYPE_NULL) return MPI_ERR_TYPE; return MPI_Status_set_elements(status, datatype, count); } #undef MPI_Status_set_elements #define MPI_Status_set_elements PyMPI_MPICH1_MPI_Status_set_elements /* ---------------------------------------------------------------- */ static int PyMPI_MPICH1_MPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dest, int sendtag, void *recvbuf, int recvcount, MPI_Datatype recvtype, int source, int recvtag, MPI_Comm comm, MPI_Status *status) { MPI_Status dummy; if (status == MPI_STATUS_IGNORE) status = &dummy; return MPI_Sendrecv(sendbuf, sendcount, sendtype, dest, sendtag, recvbuf, recvcount, recvtype, source, recvtag, comm, status); } #undef MPI_Sendrecv #define MPI_Sendrecv PyMPI_MPICH1_MPI_Sendrecv static int PyMPI_MPICH1_MPI_Sendrecv_replace(void *buf, int count, MPI_Datatype datatype, int dest, int sendtag, int source, int recvtag, MPI_Comm comm, MPI_Status *status) { MPI_Status dummy; if (status == MPI_STATUS_IGNORE) status = &dummy; return MPI_Sendrecv_replace(buf, count, datatype, dest, sendtag, source, recvtag, comm, status); } #undef MPI_Sendrecv_replace #define MPI_Sendrecv_replace PyMPI_MPICH1_MPI_Sendrecv_replace /* ---------------------------------------------------------------- */ #ifndef PyMPI_HAVE_MPI_Win #undef MPI_Win_c2f #define MPI_Win_c2f(win) ((MPI_Fint)0) #undef MPI_Win_f2c #define MPI_Win_f2c(win) MPI_WIN_NULL #endif /* ---------------------------------------------------------------- */ #if defined(__cplusplus) extern "C" { #endif extern void *MPIR_ToPointer(int); #if defined(__cplusplus) } #endif #if defined(ROMIO_VERSION) #if defined(__cplusplus) extern "C" { #endif struct MPIR_Errhandler { unsigned long cookie; MPI_Handler_function *routine; int ref_count; }; #if defined(__cplusplus) } #endif static int PyMPI_MPICH1_MPI_File_get_errhandler(MPI_File file, MPI_Errhandler *errhandler) { int ierr = MPI_SUCCESS; ierr = MPI_File_get_errhandler(file, errhandler); if (ierr != MPI_SUCCESS) return ierr; if (errhandler == 0) return ierr; /* just in case */ /* manage reference counting */ if (*errhandler != MPI_ERRHANDLER_NULL) { struct MPIR_Errhandler *eh = (struct MPIR_Errhandler *) MPIR_ToPointer(*errhandler); if (eh) eh->ref_count++; } return MPI_SUCCESS; } static int PyMPI_MPICH1_MPI_File_set_errhandler(MPI_File file, MPI_Errhandler errhandler) { int ierr = MPI_SUCCESS; MPI_Errhandler previous = MPI_ERRHANDLER_NULL; ierr = MPI_File_get_errhandler(file, &previous); if (ierr != MPI_SUCCESS) return ierr; ierr = MPI_File_set_errhandler(file, errhandler); if (ierr != MPI_SUCCESS) return ierr; /* manage reference counting */ if (previous != MPI_ERRHANDLER_NULL) { struct MPIR_Errhandler *eh = (struct MPIR_Errhandler *) MPIR_ToPointer(previous); if (eh) eh->ref_count--; } if (errhandler != MPI_ERRHANDLER_NULL) { struct MPIR_Errhandler *eh = (struct MPIR_Errhandler *) MPIR_ToPointer(errhandler); if (eh) eh->ref_count++; } return MPI_SUCCESS; } #undef MPI_File_get_errhandler #define MPI_File_get_errhandler PyMPI_MPICH1_MPI_File_get_errhandler #undef MPI_File_set_errhandler #define MPI_File_set_errhandler PyMPI_MPICH1_MPI_File_set_errhandler #endif /* !ROMIO_VERSION */ /* ---------------------------------------------------------------- */ #undef MPI_ERR_KEYVAL #define MPI_ERR_KEYVAL MPI_ERR_OTHER #undef MPI_MAX_OBJECT_NAME #define MPI_MAX_OBJECT_NAME MPI_MAX_NAME_STRING /* ---------------------------------------------------------------- */ #endif /* !PyMPI_COMPAT_MPICH1_H */ /* Local variables: c-basic-offset: 2 indent-tabs-mode: nil End: */ mpi4py-3.1.6/src/lib-mpi/compat/mpich2.h000066400000000000000000000014711460670727200177170ustar00rootroot00000000000000#ifndef PyMPI_COMPAT_MPICH2_H #define PyMPI_COMPAT_MPICH2_H static int PyMPI_MPICH2_MPI_Add_error_class(int *errorclass) { int ierr; char errstr[1] = {0}; ierr = MPI_Add_error_class(errorclass); if (ierr) return ierr; return MPI_Add_error_string(*errorclass,errstr); } #undef MPI_Add_error_class #define MPI_Add_error_class PyMPI_MPICH2_MPI_Add_error_class static int PyMPI_MPICH2_MPI_Add_error_code(int errorclass, int *errorcode) { int ierr; char errstr[1] = {0}; ierr = MPI_Add_error_code(errorclass,errorcode); if (ierr) return ierr; return MPI_Add_error_string(*errorcode,errstr); } #undef MPI_Add_error_code #define MPI_Add_error_code PyMPI_MPICH2_MPI_Add_error_code #if defined(__SICORTEX__) #include "sicortex.h" #endif #endif /* !PyMPI_COMPAT_MPICH2_H */ mpi4py-3.1.6/src/lib-mpi/compat/msmpi.h000066400000000000000000000016121460670727200176570ustar00rootroot00000000000000#ifndef PyMPI_COMPAT_MSMPI_H #define PyMPI_COMPAT_MSMPI_H static int PyMPI_MSMPI_MPI_Add_error_class(int *errorclass) { int ierr; char errstr[1] = {0}; ierr = MPI_Add_error_class(errorclass); if (ierr) return ierr; return MPI_Add_error_string(*errorclass,errstr); } #undef MPI_Add_error_class #define MPI_Add_error_class PyMPI_MSMPI_MPI_Add_error_class static int PyMPI_MSMPI_MPI_Add_error_code(int errorclass, int *errorcode) { int ierr; char errstr[1] = {0}; ierr = MPI_Add_error_code(errorclass,errorcode); if (ierr) return ierr; return MPI_Add_error_string(*errorcode,errstr); } #undef MPI_Add_error_code #define MPI_Add_error_code PyMPI_MSMPI_MPI_Add_error_code #if defined(MPICH_NAME) #undef MPI_File_c2f #define MPI_File_c2f PMPI_File_c2f #undef MPI_File_f2c #define MPI_File_f2c PMPI_File_f2c #endif #endif /* !PyMPI_COMPAT_MSMPI_H */ mpi4py-3.1.6/src/lib-mpi/compat/openmpi.h000066400000000000000000000254671460670727200202170ustar00rootroot00000000000000#ifndef PyMPI_COMPAT_OPENMPI_H #define PyMPI_COMPAT_OPENMPI_H /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* * The hackery below redefines the actuall calls to 'MPI_Init()' and * 'MPI_Init_thread()' in order to preload the main MPI dynamic * library with appropriate flags to 'dlopen()' ensuring global * availability of library symbols. */ #if !defined(OPENMPI_DLOPEN_LIBMPI) && defined(OMPI_MAJOR_VERSION) #if OMPI_MAJOR_VERSION >= 3 && OMPI_MAJOR_VERSION < 10 #define OPENMPI_DLOPEN_LIBMPI 0 #endif #endif #ifndef OPENMPI_DLOPEN_LIBMPI #define OPENMPI_DLOPEN_LIBMPI 1 #endif #if OPENMPI_DLOPEN_LIBMPI #if HAVE_DLOPEN #include "../../dynload.h" /* static void * my_dlopen(const char *name, int mode) { void *handle; static int called = 0; if (!called) { called = 1; #if HAVE_DLFCN_H printf("HAVE_DLFCN_H: yes\n"); #else printf("HAVE_DLFCN_H: no\n"); #endif printf("\n"); printf("RTLD_LAZY: 0x%X\n", RTLD_LAZY ); printf("RTLD_NOW: 0x%X\n", RTLD_NOW ); printf("RTLD_LOCAL: 0x%X\n", RTLD_LOCAL ); printf("RTLD_GLOBAL: 0x%X\n", RTLD_GLOBAL ); #ifdef RTLD_NOLOAD printf("RTLD_NOLOAD: 0x%X\n", RTLD_NOLOAD ); #endif printf("\n"); } handle = dlopen(name, mode); printf("dlopen(\"%s\",0x%X) -> %p\n", name, mode, handle); printf("dlerror() -> %s\n\n", dlerror()); return handle; } #define dlopen my_dlopen */ static void PyMPI_OPENMPI_dlopen_libmpi(void) { void *handle = 0; int mode = RTLD_NOW | RTLD_GLOBAL; #if defined(__APPLE__) /* macOS */ #ifdef RTLD_NOLOAD mode |= RTLD_NOLOAD; #endif #if defined(OMPI_MAJOR_VERSION) #if OMPI_MAJOR_VERSION >= 4 if (!handle) handle = dlopen("libmpi.40.dylib", mode); #elif OMPI_MAJOR_VERSION == 3 if (!handle) handle = dlopen("libmpi.40.dylib", mode); #elif OMPI_MAJOR_VERSION == 2 if (!handle) handle = dlopen("libmpi.20.dylib", mode); #elif OMPI_MAJOR_VERSION == 1 && OMPI_MINOR_VERSION >= 10 if (!handle) handle = dlopen("libmpi.12.dylib", mode); #elif OMPI_MAJOR_VERSION == 1 && OMPI_MINOR_VERSION >= 6 if (!handle) handle = dlopen("libmpi.1.dylib", mode); #elif OMPI_MAJOR_VERSION == 1 if (!handle) handle = dlopen("libmpi.0.dylib", mode); #endif #endif if (!handle) handle = dlopen("libmpi.dylib", mode); #else /* GNU/Linux and others */ #ifdef RTLD_NOLOAD mode |= RTLD_NOLOAD; #endif #if defined(OMPI_MAJOR_VERSION) #if OMPI_MAJOR_VERSION >= 10 /* IBM Spectrum MPI */ if (!handle) handle = dlopen("libmpi_ibm.so.2", mode); if (!handle) handle = dlopen("libmpi_ibm.so.1", mode); if (!handle) handle = dlopen("libmpi_ibm.so", mode); #elif OMPI_MAJOR_VERSION >= 4 if (!handle) handle = dlopen("libmpi.so.40", mode); #elif OMPI_MAJOR_VERSION == 3 if (!handle) handle = dlopen("libmpi.so.40", mode); #elif OMPI_MAJOR_VERSION == 2 if (!handle) handle = dlopen("libmpi.so.20", mode); #elif OMPI_MAJOR_VERSION == 1 && OMPI_MINOR_VERSION >= 10 if (!handle) handle = dlopen("libmpi.so.12", mode); #elif OMPI_MAJOR_VERSION == 1 && OMPI_MINOR_VERSION >= 6 if (!handle) handle = dlopen("libmpi.so.1", mode); #elif OMPI_MAJOR_VERSION == 1 if (!handle) handle = dlopen("libmpi.so.0", mode); #endif #endif if (!handle) handle = dlopen("libmpi.so", mode); #endif } static int PyMPI_OPENMPI_MPI_Init(int *argc, char ***argv) { PyMPI_OPENMPI_dlopen_libmpi(); return MPI_Init(argc, argv); } #undef MPI_Init #define MPI_Init PyMPI_OPENMPI_MPI_Init static int PyMPI_OPENMPI_MPI_Init_thread(int *argc, char ***argv, int required, int *provided) { PyMPI_OPENMPI_dlopen_libmpi(); return MPI_Init_thread(argc, argv, required, provided); } #undef MPI_Init_thread #define MPI_Init_thread PyMPI_OPENMPI_MPI_Init_thread #endif /* !HAVE_DLOPEN */ #endif /* !OPENMPI_DLOPEN_LIBMPI */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ #if (defined(OMPI_MAJOR_VERSION) && \ defined(OMPI_MINOR_VERSION) && \ defined(OMPI_RELEASE_VERSION)) #define PyMPI_OPENMPI_VERSION ((OMPI_MAJOR_VERSION * 10000) + \ (OMPI_MINOR_VERSION * 100) + \ (OMPI_RELEASE_VERSION * 1)) #else #define PyMPI_OPENMPI_VERSION 10000 #endif /* ------------------------------------------------------------------------- */ /* * Open MPI < 1.1.3 generates an error when MPI_File_get_errhandler() * is called with the predefined error handlers MPI_ERRORS_RETURN and * MPI_ERRORS_ARE_FATAL. */ #if PyMPI_OPENMPI_VERSION < 10103 static int PyMPI_OPENMPI_Errhandler_free(MPI_Errhandler *errhandler) { if (errhandler && ((*errhandler == MPI_ERRORS_RETURN) || (*errhandler == MPI_ERRORS_ARE_FATAL))) { *errhandler = MPI_ERRHANDLER_NULL; return MPI_SUCCESS; } return MPI_Errhandler_free(errhandler); } #undef MPI_Errhandler_free #define MPI_Errhandler_free PyMPI_OPENMPI_Errhandler_free #endif /* !(PyMPI_OPENMPI_VERSION < 10103) */ /* ------------------------------------------------------------------------- */ /* * Open MPI 1.1 generates an error when MPI_File_get_errhandler() is * called with the MPI_FILE_NULL handle. The code below try to fix * this bug by intercepting the calls to the functions setting and * getting the error handlers for MPI_File's. */ #if PyMPI_OPENMPI_VERSION < 10200 static MPI_Errhandler PyMPI_OPENMPI_FILE_NULL_ERRHANDLER = (MPI_Errhandler)0; static int PyMPI_OPENMPI_File_get_errhandler(MPI_File file, MPI_Errhandler *errhandler) { if (file == MPI_FILE_NULL) { if (PyMPI_OPENMPI_FILE_NULL_ERRHANDLER == (MPI_Errhandler)0) { PyMPI_OPENMPI_FILE_NULL_ERRHANDLER = MPI_ERRORS_RETURN; } *errhandler = PyMPI_OPENMPI_FILE_NULL_ERRHANDLER; return MPI_SUCCESS; } return MPI_File_get_errhandler(file, errhandler); } #undef MPI_File_get_errhandler #define MPI_File_get_errhandler PyMPI_OPENMPI_File_get_errhandler static int PyMPI_OPENMPI_File_set_errhandler(MPI_File file, MPI_Errhandler errhandler) { int ierr = MPI_File_set_errhandler(file, errhandler); if (ierr != MPI_SUCCESS) return ierr; if (file == MPI_FILE_NULL) { PyMPI_OPENMPI_FILE_NULL_ERRHANDLER = errhandler; } return ierr; } #undef MPI_File_set_errhandler #define MPI_File_set_errhandler PyMPI_OPENMPI_File_set_errhandler #endif /* !(PyMPI_OPENMPI_VERSION < 10200) */ /* ---------------------------------------------------------------- */ #if PyMPI_OPENMPI_VERSION < 10301 static MPI_Fint PyMPI_OPENMPI_File_c2f(MPI_File file) { if (file == MPI_FILE_NULL) return (MPI_Fint)0; return MPI_File_c2f(file); } #define MPI_File_c2f PyMPI_OPENMPI_File_c2f #endif /* !(PyMPI_OPENMPI_VERSION < 10301) */ /* ------------------------------------------------------------------------- */ #if PyMPI_OPENMPI_VERSION < 10402 static int PyMPI_OPENMPI_MPI_Cancel(MPI_Request *request) { if (request && *request == MPI_REQUEST_NULL) { MPI_Comm_call_errhandler(MPI_COMM_WORLD, MPI_ERR_REQUEST); return MPI_ERR_REQUEST; } return MPI_Cancel(request); } #undef MPI_Cancel #define MPI_Cancel PyMPI_OPENMPI_MPI_Cancel static int PyMPI_OPENMPI_MPI_Request_free(MPI_Request *request) { if (request && *request == MPI_REQUEST_NULL) { MPI_Comm_call_errhandler(MPI_COMM_WORLD, MPI_ERR_REQUEST); return MPI_ERR_REQUEST; } return MPI_Request_free(request); } #undef MPI_Request_free #define MPI_Request_free PyMPI_OPENMPI_MPI_Request_free static int PyMPI_OPENMPI_MPI_Win_get_errhandler(MPI_Win win, MPI_Errhandler *errhandler) { if (win == MPI_WIN_NULL) { MPI_Comm_call_errhandler(MPI_COMM_WORLD, MPI_ERR_WIN); return MPI_ERR_WIN; } return MPI_Win_get_errhandler(win, errhandler); } #undef MPI_Win_get_errhandler #define MPI_Win_get_errhandler PyMPI_OPENMPI_MPI_Win_get_errhandler static int PyMPI_OPENMPI_MPI_Win_set_errhandler(MPI_Win win, MPI_Errhandler errhandler) { if (win == MPI_WIN_NULL) { MPI_Comm_call_errhandler(MPI_COMM_WORLD, MPI_ERR_WIN); return MPI_ERR_WIN; } return MPI_Win_set_errhandler(win, errhandler); } #undef MPI_Win_set_errhandler #define MPI_Win_set_errhandler PyMPI_OPENMPI_MPI_Win_set_errhandler #endif /* !(PyMPI_OPENMPI_VERSION < 10402) */ /* ------------------------------------------------------------------------- */ /* * Open MPI 1.7 tries to set status even in the case of MPI_STATUS_IGNORE. */ #if PyMPI_OPENMPI_VERSION >= 10700 && PyMPI_OPENMPI_VERSION < 10800 static int PyMPI_OPENMPI_MPI_Mrecv(void *buf, int count, MPI_Datatype type, MPI_Message *message, MPI_Status *status) { MPI_Status sts; if (status == MPI_STATUS_IGNORE) status = &sts; return MPI_Mrecv(buf, count, type, message, status); } #undef MPI_Mrecv #define MPI_Mrecv PyMPI_OPENMPI_MPI_Mrecv #endif /* !(PyMPI_OPENMPI_VERSION > 10700) */ /* ------------------------------------------------------------------------- */ /* * Open MPI < 1.10.3 errors with MPI_Get_address(MPI_BOTTOM, &address). */ #if PyMPI_OPENMPI_VERSION < 11003 static int PyMPI_OPENMPI_Get_address(const void *location, MPI_Aint *address) { if (location == MPI_BOTTOM && address) { *address = 0; return MPI_SUCCESS; } return MPI_Get_address(location, address); } #undef MPI_Get_address #define MPI_Get_address PyMPI_OPENMPI_Get_address #endif /* ------------------------------------------------------------------------- */ /* * Open MPI < 2.0.0 matched probes do not return MPI_MESSAGE_NO_PROC * for source=MPI_PROC_NULL if status=MPI_STATUS_IGNORE. */ #if PyMPI_OPENMPI_VERSION < 20000 static int PyMPI_OPENMPI_Mprobe(int source, int tag, MPI_Comm comm, MPI_Message *message, MPI_Status *status) { MPI_Status _pympi_status; if (source == MPI_PROC_NULL && status == MPI_STATUS_IGNORE) status = &_pympi_status; return MPI_Mprobe(source, tag, comm, message, status); } #undef MPI_Mprobe #define MPI_Mprobe PyMPI_OPENMPI_Mprobe static int PyMPI_OPENMPI_Improbe(int source, int tag, MPI_Comm comm, int *flag, MPI_Message *message, MPI_Status *status) { MPI_Status _pympi_status; if (source == MPI_PROC_NULL && status == MPI_STATUS_IGNORE) status = &_pympi_status; return MPI_Improbe(source, tag, comm, flag, message, status); } #undef MPI_Improbe #define MPI_Improbe PyMPI_OPENMPI_Improbe #endif /* ------------------------------------------------------------------------- */ #endif /* !PyMPI_COMPAT_OPENMPI_H */ /* Local Variables: c-basic-offset: 2 indent-tabs-mode: nil End: */ mpi4py-3.1.6/src/lib-mpi/compat/pcmpi.h000066400000000000000000000055601460670727200176500ustar00rootroot00000000000000#ifndef PyMPI_COMPAT_PCMPI_H #define PyMPI_COMPAT_PCMPI_H /* ---------------------------------------------------------------- */ static int PyMPI_PCMPI_MPI_Request_get_status(MPI_Request request, int *flag, MPI_Status *status) { MPI_Status sts; if (!status || status == MPI_STATUS_IGNORE || status == MPI_STATUSES_IGNORE) status = &sts; return MPI_Request_get_status(request, flag, status); } #undef MPI_Request_get_status #define MPI_Request_get_status PyMPI_PCMPI_MPI_Request_get_status /* ---------------------------------------------------------------- */ static int PyMPI_PCMPI_MPI_Win_get_attr(MPI_Win win, int keyval, void *attrval, int *flag) { int ierr; ierr = MPI_Win_get_attr(win, keyval, attrval, flag); if (ierr == MPI_SUCCESS && keyval == MPI_WIN_BASE && *flag) *((void **)attrval) = **((void ***)attrval); return ierr; } #undef MPI_Win_get_attr #define MPI_Win_get_attr PyMPI_PCMPI_MPI_Win_get_attr /* ---------------------------------------------------------------- */ #ifndef PCMPI_DLOPEN_LIBMPI #define PCMPI_DLOPEN_LIBMPI 1 #endif #if PCMPI_DLOPEN_LIBMPI #if HAVE_DLOPEN #include "../../dynload.h" static void PyMPI_PCMPI_dlopen_libmpi(void) { void *handle1 = (void *)0; void *handle2 = (void *)0; int mode = RTLD_NOW | RTLD_GLOBAL; #ifdef RTLD_NOLOAD mode |= RTLD_NOLOAD; #endif #if defined(__APPLE__) if (!handle1) handle1 = dlopen("libmpi.2.dylib", mode); if (!handle1) handle1 = dlopen("libmpi.1.dylib", mode); if (!handle1) handle1 = dlopen("libmpi.dylib", mode); if (!handle2) handle2 = dlopen("libmpio.2.dylib", mode); if (!handle2) handle2 = dlopen("libmpio.1.dylib", mode); if (!handle2) handle2 = dlopen("libmpio.dylib", mode); #else if (!handle1) handle1 = dlopen("libmpi.so.2", mode); if (!handle1) handle1 = dlopen("libmpi.so.1", mode); if (!handle1) handle1 = dlopen("libmpi.so", mode); if (!handle2) handle2 = dlopen("libmpio.so.2", mode); if (!handle2) handle2 = dlopen("libmpio.so.1", mode); if (!handle2) handle2 = dlopen("libmpio.so", mode); #endif } static int PyMPI_PCMPI_MPI_Init(int *argc, char ***argv) { PyMPI_PCMPI_dlopen_libmpi(); return MPI_Init(argc, argv); } #undef MPI_Init #define MPI_Init PyMPI_PCMPI_MPI_Init static int PyMPI_PCMPI_MPI_Init_thread(int *argc, char ***argv, int required, int *provided) { PyMPI_PCMPI_dlopen_libmpi(); return MPI_Init_thread(argc, argv, required, provided); } #undef MPI_Init_thread #define MPI_Init_thread PyMPI_PCMPI_MPI_Init_thread #endif /* !HAVE_DLOPEN */ #endif /* !PCMPI_DLOPEN_LIBMPI */ /* ---------------------------------------------------------------- */ #endif /* !PyMPI_COMPAT_PCMPI_H */ mpi4py-3.1.6/src/lib-mpi/compat/sicortex.h000066400000000000000000000013571460670727200204000ustar00rootroot00000000000000#ifndef PyMPI_COMPAT_SICORTEX_H #define PyMPI_COMPAT_SICORTEX_H #include "../../dynload.h" static void PyMPI_SCMPI_dlopen_libslurm(void) { (void)dlopen("libslurm.so", RTLD_NOW|RTLD_GLOBAL|RTLD_NOLOAD); (void)dlerror(); } static int PyMPI_SCMPI_MPI_Init(int *argc, char ***argv) { PyMPI_SCMPI_dlopen_libslurm(); return MPI_Init(argc, argv); } #undef MPI_Init #define MPI_Init PyMPI_SCMPI_MPI_Init static int PyMPI_SCMPI_MPI_Init_thread(int *argc, char ***argv, int required, int *provided) { PyMPI_SCMPI_dlopen_libslurm(); return MPI_Init_thread(argc, argv, required, provided); } #undef MPI_Init_thread #define MPI_Init_thread PyMPI_SCMPI_MPI_Init_thread #endif /* !PyMPI_COMPAT_SICORTEX_H */ mpi4py-3.1.6/src/lib-mpi/config.h000066400000000000000000000021621460670727200165150ustar00rootroot00000000000000#if defined(MS_WINDOWS) # if !defined(MSMPI_VER) # if defined(MPICH2) && defined(MPIAPI) # define MSMPI_VER 0x100 # endif # endif #endif #if !defined(MPIAPI) # define MPIAPI #endif #if defined(HAVE_CONFIG_H) #include "config/config.h" #elif defined(MSMPI_VER) #include "config/msmpi.h" #elif defined(MPICH_NAME) && (MPICH_NAME >= 3) #include "config/mpich.h" #elif defined(MPICH_NAME) && (MPICH_NAME == 2) #include "config/mpich2.h" #elif defined(OPEN_MPI) #include "config/openmpi.h" #else /* Unknown MPI*/ #include "config/unknown.h" #endif #ifdef PyMPI_MISSING_MPI_Type_create_f90_integer #undef PyMPI_HAVE_MPI_Type_create_f90_integer #endif #ifdef PyMPI_MISSING_MPI_Type_create_f90_real #undef PyMPI_HAVE_MPI_Type_create_f90_real #endif #ifdef PyMPI_MISSING_MPI_Type_create_f90_complex #undef PyMPI_HAVE_MPI_Type_create_f90_complex #endif #ifdef PyMPI_MISSING_MPI_Status_c2f #undef PyMPI_HAVE_MPI_Status_c2f #endif #ifdef PyMPI_MISSING_MPI_Status_f2c #undef PyMPI_HAVE_MPI_Status_f2c #endif #ifdef PyMPI_MISSING_MPI_LB #undef PyMPI_HAVE_MPI_LB #endif #ifdef PyMPI_MISSING_MPI_UB #undef PyMPI_HAVE_MPI_UB #endif mpi4py-3.1.6/src/lib-mpi/config/000077500000000000000000000000001460670727200163435ustar00rootroot00000000000000mpi4py-3.1.6/src/lib-mpi/config/mpi-11.h000066400000000000000000000203451460670727200175240ustar00rootroot00000000000000#define PyMPI_HAVE_MPI_UNDEFINED 1 #define PyMPI_HAVE_MPI_ANY_SOURCE 1 #define PyMPI_HAVE_MPI_ANY_TAG 1 #define PyMPI_HAVE_MPI_PROC_NULL 1 #define PyMPI_HAVE_MPI_Aint 1 #define PyMPI_HAVE_MPI_Datatype 1 #define PyMPI_HAVE_MPI_DATATYPE_NULL 1 #define PyMPI_HAVE_MPI_UB 1 #define PyMPI_HAVE_MPI_LB 1 #define PyMPI_HAVE_MPI_PACKED 1 #define PyMPI_HAVE_MPI_BYTE 1 #define PyMPI_HAVE_MPI_CHAR 1 #define PyMPI_HAVE_MPI_SHORT 1 #define PyMPI_HAVE_MPI_INT 1 #define PyMPI_HAVE_MPI_LONG 1 #define PyMPI_HAVE_MPI_LONG_LONG_INT 1 #define PyMPI_HAVE_MPI_UNSIGNED_CHAR 1 #define PyMPI_HAVE_MPI_UNSIGNED_SHORT 1 #define PyMPI_HAVE_MPI_UNSIGNED 1 #define PyMPI_HAVE_MPI_UNSIGNED_LONG 1 #define PyMPI_HAVE_MPI_FLOAT 1 #define PyMPI_HAVE_MPI_DOUBLE 1 #define PyMPI_HAVE_MPI_LONG_DOUBLE 1 #define PyMPI_HAVE_MPI_SHORT_INT 1 #define PyMPI_HAVE_MPI_2INT 1 #define PyMPI_HAVE_MPI_LONG_INT 1 #define PyMPI_HAVE_MPI_FLOAT_INT 1 #define PyMPI_HAVE_MPI_DOUBLE_INT 1 #define PyMPI_HAVE_MPI_LONG_DOUBLE_INT 1 #define PyMPI_HAVE_MPI_CHARACTER 1 #define PyMPI_HAVE_MPI_LOGICAL 1 #define PyMPI_HAVE_MPI_INTEGER 1 #define PyMPI_HAVE_MPI_REAL 1 #define PyMPI_HAVE_MPI_DOUBLE_PRECISION 1 #define PyMPI_HAVE_MPI_COMPLEX 1 #define PyMPI_HAVE_MPI_DOUBLE_COMPLEX 1 #define PyMPI_HAVE_MPI_BOTTOM 1 #define PyMPI_HAVE_MPI_Address 1 #define PyMPI_HAVE_MPI_Type_size 1 #define PyMPI_HAVE_MPI_Type_extent 1 #define PyMPI_HAVE_MPI_Type_lb 1 #define PyMPI_HAVE_MPI_Type_ub 1 #define PyMPI_HAVE_MPI_Type_dup 1 #define PyMPI_HAVE_MPI_Type_contiguous 1 #define PyMPI_HAVE_MPI_Type_vector 1 #define PyMPI_HAVE_MPI_Type_indexed 1 #define PyMPI_HAVE_MPI_Type_hvector 1 #define PyMPI_HAVE_MPI_Type_hindexed 1 #define PyMPI_HAVE_MPI_Type_struct 1 #define PyMPI_HAVE_MPI_Type_commit 1 #define PyMPI_HAVE_MPI_Type_free 1 #define PyMPI_HAVE_MPI_Pack 1 #define PyMPI_HAVE_MPI_Unpack 1 #define PyMPI_HAVE_MPI_Pack_size 1 #define PyMPI_HAVE_MPI_Status 1 #define PyMPI_HAVE_MPI_Get_count 1 #define PyMPI_HAVE_MPI_Get_elements 1 #define PyMPI_HAVE_MPI_Test_cancelled 1 #define PyMPI_HAVE_MPI_Request 1 #define PyMPI_HAVE_MPI_REQUEST_NULL 1 #define PyMPI_HAVE_MPI_Request_free 1 #define PyMPI_HAVE_MPI_Wait 1 #define PyMPI_HAVE_MPI_Test 1 #define PyMPI_HAVE_MPI_Request_get_status 1 #define PyMPI_HAVE_MPI_Cancel 1 #define PyMPI_HAVE_MPI_Waitany 1 #define PyMPI_HAVE_MPI_Testany 1 #define PyMPI_HAVE_MPI_Waitall 1 #define PyMPI_HAVE_MPI_Testall 1 #define PyMPI_HAVE_MPI_Waitsome 1 #define PyMPI_HAVE_MPI_Testsome 1 #define PyMPI_HAVE_MPI_Start 1 #define PyMPI_HAVE_MPI_Startall 1 #define PyMPI_HAVE_MPI_Op 1 #define PyMPI_HAVE_MPI_OP_NULL 1 #define PyMPI_HAVE_MPI_MAX 1 #define PyMPI_HAVE_MPI_MIN 1 #define PyMPI_HAVE_MPI_SUM 1 #define PyMPI_HAVE_MPI_PROD 1 #define PyMPI_HAVE_MPI_LAND 1 #define PyMPI_HAVE_MPI_BAND 1 #define PyMPI_HAVE_MPI_LOR 1 #define PyMPI_HAVE_MPI_BOR 1 #define PyMPI_HAVE_MPI_LXOR 1 #define PyMPI_HAVE_MPI_BXOR 1 #define PyMPI_HAVE_MPI_MAXLOC 1 #define PyMPI_HAVE_MPI_MINLOC 1 #define PyMPI_HAVE_MPI_REPLACE 1 #define PyMPI_HAVE_MPI_Op_free 1 #define PyMPI_HAVE_MPI_User_function 1 #define PyMPI_HAVE_MPI_Op_create 1 #define PyMPI_HAVE_MPI_Group 1 #define PyMPI_HAVE_MPI_GROUP_NULL 1 #define PyMPI_HAVE_MPI_GROUP_EMPTY 1 #define PyMPI_HAVE_MPI_Group_free 1 #define PyMPI_HAVE_MPI_Group_size 1 #define PyMPI_HAVE_MPI_Group_rank 1 #define PyMPI_HAVE_MPI_Group_translate_ranks 1 #define PyMPI_HAVE_MPI_IDENT 1 #define PyMPI_HAVE_MPI_CONGRUENT 1 #define PyMPI_HAVE_MPI_SIMILAR 1 #define PyMPI_HAVE_MPI_UNEQUAL 1 #define PyMPI_HAVE_MPI_Group_compare 1 #define PyMPI_HAVE_MPI_Group_union 1 #define PyMPI_HAVE_MPI_Group_intersection 1 #define PyMPI_HAVE_MPI_Group_difference 1 #define PyMPI_HAVE_MPI_Group_incl 1 #define PyMPI_HAVE_MPI_Group_excl 1 #define PyMPI_HAVE_MPI_Group_range_incl 1 #define PyMPI_HAVE_MPI_Group_range_excl 1 #define PyMPI_HAVE_MPI_Comm 1 #define PyMPI_HAVE_MPI_COMM_NULL 1 #define PyMPI_HAVE_MPI_COMM_SELF 1 #define PyMPI_HAVE_MPI_COMM_WORLD 1 #define PyMPI_HAVE_MPI_Comm_free 1 #define PyMPI_HAVE_MPI_Comm_group 1 #define PyMPI_HAVE_MPI_Comm_size 1 #define PyMPI_HAVE_MPI_Comm_rank 1 #define PyMPI_HAVE_MPI_Comm_compare 1 #define PyMPI_HAVE_MPI_Topo_test 1 #define PyMPI_HAVE_MPI_Comm_test_inter 1 #define PyMPI_HAVE_MPI_Abort 1 #define PyMPI_HAVE_MPI_Send 1 #define PyMPI_HAVE_MPI_Recv 1 #define PyMPI_HAVE_MPI_Sendrecv 1 #define PyMPI_HAVE_MPI_Sendrecv_replace 1 #define PyMPI_HAVE_MPI_BSEND_OVERHEAD 1 #define PyMPI_HAVE_MPI_Buffer_attach 1 #define PyMPI_HAVE_MPI_Buffer_detach 1 #define PyMPI_HAVE_MPI_Bsend 1 #define PyMPI_HAVE_MPI_Ssend 1 #define PyMPI_HAVE_MPI_Rsend 1 #define PyMPI_HAVE_MPI_Isend 1 #define PyMPI_HAVE_MPI_Ibsend 1 #define PyMPI_HAVE_MPI_Issend 1 #define PyMPI_HAVE_MPI_Irsend 1 #define PyMPI_HAVE_MPI_Irecv 1 #define PyMPI_HAVE_MPI_Send_init 1 #define PyMPI_HAVE_MPI_Bsend_init 1 #define PyMPI_HAVE_MPI_Ssend_init 1 #define PyMPI_HAVE_MPI_Rsend_init 1 #define PyMPI_HAVE_MPI_Recv_init 1 #define PyMPI_HAVE_MPI_Probe 1 #define PyMPI_HAVE_MPI_Iprobe 1 #define PyMPI_HAVE_MPI_Barrier 1 #define PyMPI_HAVE_MPI_Bcast 1 #define PyMPI_HAVE_MPI_Gather 1 #define PyMPI_HAVE_MPI_Gatherv 1 #define PyMPI_HAVE_MPI_Scatter 1 #define PyMPI_HAVE_MPI_Scatterv 1 #define PyMPI_HAVE_MPI_Allgather 1 #define PyMPI_HAVE_MPI_Allgatherv 1 #define PyMPI_HAVE_MPI_Alltoall 1 #define PyMPI_HAVE_MPI_Alltoallv 1 #define PyMPI_HAVE_MPI_Reduce 1 #define PyMPI_HAVE_MPI_Allreduce 1 #define PyMPI_HAVE_MPI_Reduce_scatter 1 #define PyMPI_HAVE_MPI_Scan 1 #define PyMPI_HAVE_MPI_Comm_dup 1 #define PyMPI_HAVE_MPI_Comm_create 1 #define PyMPI_HAVE_MPI_Comm_split 1 #define PyMPI_HAVE_MPI_CART 1 #define PyMPI_HAVE_MPI_Cart_create 1 #define PyMPI_HAVE_MPI_Cartdim_get 1 #define PyMPI_HAVE_MPI_Cart_get 1 #define PyMPI_HAVE_MPI_Cart_rank 1 #define PyMPI_HAVE_MPI_Cart_coords 1 #define PyMPI_HAVE_MPI_Cart_shift 1 #define PyMPI_HAVE_MPI_Cart_sub 1 #define PyMPI_HAVE_MPI_Cart_map 1 #define PyMPI_HAVE_MPI_Dims_create 1 #define PyMPI_HAVE_MPI_GRAPH 1 #define PyMPI_HAVE_MPI_Graph_create 1 #define PyMPI_HAVE_MPI_Graphdims_get 1 #define PyMPI_HAVE_MPI_Graph_get 1 #define PyMPI_HAVE_MPI_Graph_map 1 #define PyMPI_HAVE_MPI_Graph_neighbors_count 1 #define PyMPI_HAVE_MPI_Graph_neighbors 1 #define PyMPI_HAVE_MPI_Intercomm_create 1 #define PyMPI_HAVE_MPI_Comm_remote_group 1 #define PyMPI_HAVE_MPI_Comm_remote_size 1 #define PyMPI_HAVE_MPI_Intercomm_merge 1 #define PyMPI_HAVE_MPI_Errhandler_get 1 #define PyMPI_HAVE_MPI_Errhandler_set 1 #define PyMPI_HAVE_MPI_Handler_function 1 #define PyMPI_HAVE_MPI_Errhandler_create 1 #define PyMPI_HAVE_MPI_Init 1 #define PyMPI_HAVE_MPI_Finalize 1 #define PyMPI_HAVE_MPI_Initialized 1 #define PyMPI_HAVE_MPI_Finalized 1 #define PyMPI_HAVE_MPI_MAX_PROCESSOR_NAME 1 #define PyMPI_HAVE_MPI_Get_processor_name 1 #define PyMPI_HAVE_MPI_Wtime 1 #define PyMPI_HAVE_MPI_Wtick 1 #define PyMPI_HAVE_MPI_Pcontrol 1 #define PyMPI_HAVE_MPI_Errhandler 1 #define PyMPI_HAVE_MPI_ERRHANDLER_NULL 1 #define PyMPI_HAVE_MPI_ERRORS_RETURN 1 #define PyMPI_HAVE_MPI_ERRORS_ARE_FATAL 1 #define PyMPI_HAVE_MPI_Errhandler_free 1 #define PyMPI_HAVE_MPI_KEYVAL_INVALID 1 #define PyMPI_HAVE_MPI_TAG_UB 1 #define PyMPI_HAVE_MPI_HOST 1 #define PyMPI_HAVE_MPI_IO 1 #define PyMPI_HAVE_MPI_WTIME_IS_GLOBAL 1 #define PyMPI_HAVE_MPI_Attr_get 1 #define PyMPI_HAVE_MPI_Attr_put 1 #define PyMPI_HAVE_MPI_Attr_delete 1 #define PyMPI_HAVE_MPI_Copy_function 1 #define PyMPI_HAVE_MPI_Delete_function 1 #define PyMPI_HAVE_MPI_DUP_FN 1 #define PyMPI_HAVE_MPI_NULL_COPY_FN 1 #define PyMPI_HAVE_MPI_NULL_DELETE_FN 1 #define PyMPI_HAVE_MPI_Keyval_create 1 #define PyMPI_HAVE_MPI_Keyval_free 1 #define PyMPI_HAVE_MPI_SUCCESS 1 #define PyMPI_HAVE_MPI_ERR_LASTCODE 1 #define PyMPI_HAVE_MPI_ERR_COMM 1 #define PyMPI_HAVE_MPI_ERR_GROUP 1 #define PyMPI_HAVE_MPI_ERR_TYPE 1 #define PyMPI_HAVE_MPI_ERR_REQUEST 1 #define PyMPI_HAVE_MPI_ERR_OP 1 #define PyMPI_HAVE_MPI_ERR_BUFFER 1 #define PyMPI_HAVE_MPI_ERR_COUNT 1 #define PyMPI_HAVE_MPI_ERR_TAG 1 #define PyMPI_HAVE_MPI_ERR_RANK 1 #define PyMPI_HAVE_MPI_ERR_ROOT 1 #define PyMPI_HAVE_MPI_ERR_TRUNCATE 1 #define PyMPI_HAVE_MPI_ERR_IN_STATUS 1 #define PyMPI_HAVE_MPI_ERR_PENDING 1 #define PyMPI_HAVE_MPI_ERR_TOPOLOGY 1 #define PyMPI_HAVE_MPI_ERR_DIMS 1 #define PyMPI_HAVE_MPI_ERR_ARG 1 #define PyMPI_HAVE_MPI_ERR_OTHER 1 #define PyMPI_HAVE_MPI_ERR_UNKNOWN 1 #define PyMPI_HAVE_MPI_ERR_INTERN 1 #define PyMPI_HAVE_MPI_MAX_ERROR_STRING 1 #define PyMPI_HAVE_MPI_Error_class 1 #define PyMPI_HAVE_MPI_Error_string 1 mpi4py-3.1.6/src/lib-mpi/config/mpi-12.h000066400000000000000000000003261460670727200175220ustar00rootroot00000000000000#if defined(MPI_VERSION) #if (MPI_VERSION > 1) || (MPI_VERSION == 1 && MPI_SUBVERSION >= 2) #define PyMPI_HAVE_MPI_VERSION 1 #define PyMPI_HAVE_MPI_SUBVERSION 1 #define PyMPI_HAVE_MPI_Get_version 1 #endif #endif mpi4py-3.1.6/src/lib-mpi/config/mpi-20.h000066400000000000000000000315501460670727200175240ustar00rootroot00000000000000#if defined(MPI_VERSION) #if (MPI_VERSION >= 2) #define PyMPI_HAVE_MPI_ERR_KEYVAL 1 #define PyMPI_HAVE_MPI_MAX_OBJECT_NAME 1 #define PyMPI_HAVE_MPI_WCHAR 1 #define PyMPI_HAVE_MPI_SIGNED_CHAR 1 #define PyMPI_HAVE_MPI_LONG_LONG 1 #define PyMPI_HAVE_MPI_UNSIGNED_LONG_LONG 1 #define PyMPI_HAVE_MPI_INTEGER1 1 #define PyMPI_HAVE_MPI_INTEGER2 1 #define PyMPI_HAVE_MPI_INTEGER4 1 #define PyMPI_HAVE_MPI_INTEGER8 1 #define PyMPI_HAVE_MPI_INTEGER16 1 #define PyMPI_HAVE_MPI_REAL4 1 #define PyMPI_HAVE_MPI_REAL8 1 #define PyMPI_HAVE_MPI_REAL16 1 #define PyMPI_HAVE_MPI_COMPLEX8 1 #define PyMPI_HAVE_MPI_COMPLEX16 1 #define PyMPI_HAVE_MPI_COMPLEX32 1 #define PyMPI_HAVE_MPI_Type_dup 1 #define PyMPI_HAVE_MPI_Type_create_indexed_block 1 #define PyMPI_HAVE_MPI_ORDER_C 1 #define PyMPI_HAVE_MPI_ORDER_FORTRAN 1 #define PyMPI_HAVE_MPI_Type_create_subarray 1 #define PyMPI_HAVE_MPI_DISTRIBUTE_NONE 1 #define PyMPI_HAVE_MPI_DISTRIBUTE_BLOCK 1 #define PyMPI_HAVE_MPI_DISTRIBUTE_CYCLIC 1 #define PyMPI_HAVE_MPI_DISTRIBUTE_DFLT_DARG 1 #define PyMPI_HAVE_MPI_Type_create_darray 1 #define PyMPI_HAVE_MPI_Get_address 1 #define PyMPI_HAVE_MPI_Type_create_hvector 1 #define PyMPI_HAVE_MPI_Type_create_hindexed 1 #define PyMPI_HAVE_MPI_Type_create_struct 1 #define PyMPI_HAVE_MPI_Type_get_extent 1 #define PyMPI_HAVE_MPI_Type_create_resized 1 #define PyMPI_HAVE_MPI_Type_get_true_extent 1 #define PyMPI_HAVE_MPI_Type_create_f90_integer 1 #define PyMPI_HAVE_MPI_Type_create_f90_real 1 #define PyMPI_HAVE_MPI_Type_create_f90_complex 1 #define PyMPI_HAVE_MPI_TYPECLASS_INTEGER 1 #define PyMPI_HAVE_MPI_TYPECLASS_REAL 1 #define PyMPI_HAVE_MPI_TYPECLASS_COMPLEX 1 #define PyMPI_HAVE_MPI_Type_match_size 1 #define PyMPI_HAVE_MPI_Pack_external 1 #define PyMPI_HAVE_MPI_Unpack_external 1 #define PyMPI_HAVE_MPI_Pack_external_size 1 #define PyMPI_HAVE_MPI_COMBINER_NAMED 1 #define PyMPI_HAVE_MPI_COMBINER_DUP 1 #define PyMPI_HAVE_MPI_COMBINER_CONTIGUOUS 1 #define PyMPI_HAVE_MPI_COMBINER_VECTOR 1 #define PyMPI_HAVE_MPI_COMBINER_HVECTOR_INTEGER 1 #define PyMPI_HAVE_MPI_COMBINER_HVECTOR 1 #define PyMPI_HAVE_MPI_COMBINER_INDEXED 1 #define PyMPI_HAVE_MPI_COMBINER_HINDEXED_INTEGER 1 #define PyMPI_HAVE_MPI_COMBINER_HINDEXED 1 #define PyMPI_HAVE_MPI_COMBINER_INDEXED_BLOCK 1 #define PyMPI_HAVE_MPI_COMBINER_STRUCT_INTEGER 1 #define PyMPI_HAVE_MPI_COMBINER_STRUCT 1 #define PyMPI_HAVE_MPI_COMBINER_SUBARRAY 1 #define PyMPI_HAVE_MPI_COMBINER_DARRAY 1 #define PyMPI_HAVE_MPI_COMBINER_F90_REAL 1 #define PyMPI_HAVE_MPI_COMBINER_F90_COMPLEX 1 #define PyMPI_HAVE_MPI_COMBINER_F90_INTEGER 1 #define PyMPI_HAVE_MPI_COMBINER_RESIZED 1 #define PyMPI_HAVE_MPI_Type_get_envelope 1 #define PyMPI_HAVE_MPI_Type_get_contents 1 #define PyMPI_HAVE_MPI_Type_get_name 1 #define PyMPI_HAVE_MPI_Type_set_name 1 #define PyMPI_HAVE_MPI_Type_get_attr 1 #define PyMPI_HAVE_MPI_Type_set_attr 1 #define PyMPI_HAVE_MPI_Type_delete_attr 1 #define PyMPI_HAVE_MPI_Type_copy_attr_function 1 #define PyMPI_HAVE_MPI_Type_delete_attr_function 1 #define PyMPI_HAVE_MPI_TYPE_NULL_COPY_FN 1 #define PyMPI_HAVE_MPI_TYPE_DUP_FN 1 #define PyMPI_HAVE_MPI_TYPE_NULL_DELETE_FN 1 #define PyMPI_HAVE_MPI_Type_create_keyval 1 #define PyMPI_HAVE_MPI_Type_free_keyval 1 #define PyMPI_HAVE_MPI_STATUS_IGNORE 1 #define PyMPI_HAVE_MPI_STATUSES_IGNORE 1 #define PyMPI_HAVE_MPI_Status_set_elements 1 #define PyMPI_HAVE_MPI_Status_set_cancelled 1 #define PyMPI_HAVE_MPI_Request_get_status 1 #define PyMPI_HAVE_MPI_Grequest_cancel_function 1 #define PyMPI_HAVE_MPI_Grequest_free_function 1 #define PyMPI_HAVE_MPI_Grequest_query_function 1 #define PyMPI_HAVE_MPI_Grequest_start 1 #define PyMPI_HAVE_MPI_Grequest_complete 1 #define PyMPI_HAVE_MPI_ROOT 1 #define PyMPI_HAVE_MPI_IN_PLACE 1 #define PyMPI_HAVE_MPI_Alltoallw 1 #define PyMPI_HAVE_MPI_Exscan 1 #define PyMPI_HAVE_MPI_Comm_get_errhandler 1 #define PyMPI_HAVE_MPI_Comm_set_errhandler 1 #define PyMPI_HAVE_MPI_Comm_errhandler_fn 1 #define PyMPI_HAVE_MPI_Comm_create_errhandler 1 #define PyMPI_HAVE_MPI_Comm_call_errhandler 1 #define PyMPI_HAVE_MPI_Comm_get_name 1 #define PyMPI_HAVE_MPI_Comm_set_name 1 #define PyMPI_HAVE_MPI_Comm_get_attr 1 #define PyMPI_HAVE_MPI_Comm_set_attr 1 #define PyMPI_HAVE_MPI_Comm_delete_attr 1 #define PyMPI_HAVE_MPI_Comm_copy_attr_function 1 #define PyMPI_HAVE_MPI_Comm_delete_attr_function 1 #define PyMPI_HAVE_MPI_COMM_DUP_FN 1 #define PyMPI_HAVE_MPI_COMM_NULL_COPY_FN 1 #define PyMPI_HAVE_MPI_COMM_NULL_DELETE_FN 1 #define PyMPI_HAVE_MPI_Comm_create_keyval 1 #define PyMPI_HAVE_MPI_Comm_free_keyval 1 #define PyMPI_HAVE_MPI_MAX_PORT_NAME 1 #define PyMPI_HAVE_MPI_Open_port 1 #define PyMPI_HAVE_MPI_Close_port 1 #define PyMPI_HAVE_MPI_Publish_name 1 #define PyMPI_HAVE_MPI_Unpublish_name 1 #define PyMPI_HAVE_MPI_Lookup_name 1 #define PyMPI_HAVE_MPI_Comm_accept 1 #define PyMPI_HAVE_MPI_Comm_connect 1 #define PyMPI_HAVE_MPI_Comm_join 1 #define PyMPI_HAVE_MPI_Comm_disconnect 1 #define PyMPI_HAVE_MPI_ARGV_NULL 1 #define PyMPI_HAVE_MPI_ARGVS_NULL 1 #define PyMPI_HAVE_MPI_ERRCODES_IGNORE 1 #define PyMPI_HAVE_MPI_Comm_spawn 1 #define PyMPI_HAVE_MPI_Comm_spawn_multiple 1 #define PyMPI_HAVE_MPI_Comm_get_parent 1 #define PyMPI_HAVE_MPI_UNIVERSE_SIZE 1 #define PyMPI_HAVE_MPI_APPNUM 1 #define PyMPI_HAVE_MPI_ERR_SPAWN 1 #define PyMPI_HAVE_MPI_ERR_PORT 1 #define PyMPI_HAVE_MPI_ERR_SERVICE 1 #define PyMPI_HAVE_MPI_ERR_NAME 1 #define PyMPI_HAVE_MPI_Alloc_mem 1 #define PyMPI_HAVE_MPI_Free_mem 1 #define PyMPI_HAVE_MPI_ERR_NO_MEM 1 #define PyMPI_HAVE_MPI_Info 1 #define PyMPI_HAVE_MPI_INFO_NULL 1 #define PyMPI_HAVE_MPI_Info_free 1 #define PyMPI_HAVE_MPI_Info_create 1 #define PyMPI_HAVE_MPI_Info_dup 1 #define PyMPI_HAVE_MPI_MAX_INFO_KEY 1 #define PyMPI_HAVE_MPI_MAX_INFO_VAL 1 #define PyMPI_HAVE_MPI_Info_get 1 #define PyMPI_HAVE_MPI_Info_set 1 #define PyMPI_HAVE_MPI_Info_delete 1 #define PyMPI_HAVE_MPI_Info_get_nkeys 1 #define PyMPI_HAVE_MPI_Info_get_nthkey 1 #define PyMPI_HAVE_MPI_Info_get_valuelen 1 #define PyMPI_HAVE_MPI_ERR_INFO 1 #define PyMPI_HAVE_MPI_ERR_INFO_KEY 1 #define PyMPI_HAVE_MPI_ERR_INFO_VALUE 1 #define PyMPI_HAVE_MPI_ERR_INFO_NOKEY 1 #define PyMPI_HAVE_MPI_Win 1 #define PyMPI_HAVE_MPI_WIN_NULL 1 #define PyMPI_HAVE_MPI_Win_free 1 #define PyMPI_HAVE_MPI_Win_create 1 #define PyMPI_HAVE_MPI_Win_get_group 1 #define PyMPI_HAVE_MPI_Get 1 #define PyMPI_HAVE_MPI_Put 1 #define PyMPI_HAVE_MPI_REPLACE 1 #define PyMPI_HAVE_MPI_Accumulate 1 #define PyMPI_HAVE_MPI_MODE_NOCHECK 1 #define PyMPI_HAVE_MPI_MODE_NOSTORE 1 #define PyMPI_HAVE_MPI_MODE_NOPUT 1 #define PyMPI_HAVE_MPI_MODE_NOPRECEDE 1 #define PyMPI_HAVE_MPI_MODE_NOSUCCEED 1 #define PyMPI_HAVE_MPI_Win_fence 1 #define PyMPI_HAVE_MPI_Win_post 1 #define PyMPI_HAVE_MPI_Win_start 1 #define PyMPI_HAVE_MPI_Win_complete 1 #define PyMPI_HAVE_MPI_Win_wait 1 #define PyMPI_HAVE_MPI_Win_test 1 #define PyMPI_HAVE_MPI_LOCK_EXCLUSIVE 1 #define PyMPI_HAVE_MPI_LOCK_SHARED 1 #define PyMPI_HAVE_MPI_Win_lock 1 #define PyMPI_HAVE_MPI_Win_unlock 1 #define PyMPI_HAVE_MPI_Win_get_errhandler 1 #define PyMPI_HAVE_MPI_Win_set_errhandler 1 #define PyMPI_HAVE_MPI_Win_errhandler_fn 1 #define PyMPI_HAVE_MPI_Win_create_errhandler 1 #define PyMPI_HAVE_MPI_Win_call_errhandler 1 #define PyMPI_HAVE_MPI_Win_get_name 1 #define PyMPI_HAVE_MPI_Win_set_name 1 #define PyMPI_HAVE_MPI_WIN_BASE 1 #define PyMPI_HAVE_MPI_WIN_SIZE 1 #define PyMPI_HAVE_MPI_WIN_DISP_UNIT 1 #define PyMPI_HAVE_MPI_Win_get_attr 1 #define PyMPI_HAVE_MPI_Win_set_attr 1 #define PyMPI_HAVE_MPI_Win_delete_attr 1 #define PyMPI_HAVE_MPI_Win_copy_attr_function 1 #define PyMPI_HAVE_MPI_Win_delete_attr_function 1 #define PyMPI_HAVE_MPI_WIN_DUP_FN 1 #define PyMPI_HAVE_MPI_WIN_NULL_COPY_FN 1 #define PyMPI_HAVE_MPI_WIN_NULL_DELETE_FN 1 #define PyMPI_HAVE_MPI_Win_create_keyval 1 #define PyMPI_HAVE_MPI_Win_free_keyval 1 #define PyMPI_HAVE_MPI_ERR_WIN 1 #define PyMPI_HAVE_MPI_ERR_BASE 1 #define PyMPI_HAVE_MPI_ERR_SIZE 1 #define PyMPI_HAVE_MPI_ERR_DISP 1 #define PyMPI_HAVE_MPI_ERR_ASSERT 1 #define PyMPI_HAVE_MPI_ERR_LOCKTYPE 1 #define PyMPI_HAVE_MPI_ERR_RMA_CONFLICT 1 #define PyMPI_HAVE_MPI_ERR_RMA_SYNC 1 #define PyMPI_HAVE_MPI_Offset 1 #define PyMPI_HAVE_MPI_File 1 #define PyMPI_HAVE_MPI_FILE_NULL 1 #define PyMPI_HAVE_MPI_MODE_RDONLY 1 #define PyMPI_HAVE_MPI_MODE_RDWR 1 #define PyMPI_HAVE_MPI_MODE_WRONLY 1 #define PyMPI_HAVE_MPI_MODE_CREATE 1 #define PyMPI_HAVE_MPI_MODE_EXCL 1 #define PyMPI_HAVE_MPI_MODE_DELETE_ON_CLOSE 1 #define PyMPI_HAVE_MPI_MODE_UNIQUE_OPEN 1 #define PyMPI_HAVE_MPI_MODE_APPEND 1 #define PyMPI_HAVE_MPI_MODE_SEQUENTIAL 1 #define PyMPI_HAVE_MPI_File_open 1 #define PyMPI_HAVE_MPI_File_close 1 #define PyMPI_HAVE_MPI_File_delete 1 #define PyMPI_HAVE_MPI_File_set_size 1 #define PyMPI_HAVE_MPI_File_preallocate 1 #define PyMPI_HAVE_MPI_File_get_size 1 #define PyMPI_HAVE_MPI_File_get_group 1 #define PyMPI_HAVE_MPI_File_get_amode 1 #define PyMPI_HAVE_MPI_File_set_info 1 #define PyMPI_HAVE_MPI_File_get_info 1 #define PyMPI_HAVE_MPI_MAX_DATAREP_STRING 1 #define PyMPI_HAVE_MPI_File_get_view 1 #define PyMPI_HAVE_MPI_File_set_view 1 #define PyMPI_HAVE_MPI_File_read_at 1 #define PyMPI_HAVE_MPI_File_read_at_all 1 #define PyMPI_HAVE_MPI_File_write_at 1 #define PyMPI_HAVE_MPI_File_write_at_all 1 #define PyMPI_HAVE_MPI_File_iread_at 1 #define PyMPI_HAVE_MPI_File_iwrite_at 1 #define PyMPI_HAVE_MPI_SEEK_SET 1 #define PyMPI_HAVE_MPI_SEEK_CUR 1 #define PyMPI_HAVE_MPI_SEEK_END 1 #define PyMPI_HAVE_MPI_DISPLACEMENT_CURRENT 1 #define PyMPI_HAVE_MPI_File_seek 1 #define PyMPI_HAVE_MPI_File_get_position 1 #define PyMPI_HAVE_MPI_File_get_byte_offset 1 #define PyMPI_HAVE_MPI_File_read 1 #define PyMPI_HAVE_MPI_File_read_all 1 #define PyMPI_HAVE_MPI_File_write 1 #define PyMPI_HAVE_MPI_File_write_all 1 #define PyMPI_HAVE_MPI_File_iread 1 #define PyMPI_HAVE_MPI_File_iwrite 1 #define PyMPI_HAVE_MPI_File_read_shared 1 #define PyMPI_HAVE_MPI_File_write_shared 1 #define PyMPI_HAVE_MPI_File_iread_shared 1 #define PyMPI_HAVE_MPI_File_iwrite_shared 1 #define PyMPI_HAVE_MPI_File_read_ordered 1 #define PyMPI_HAVE_MPI_File_write_ordered 1 #define PyMPI_HAVE_MPI_File_seek_shared 1 #define PyMPI_HAVE_MPI_File_get_position_shared 1 #define PyMPI_HAVE_MPI_File_read_at_all_begin 1 #define PyMPI_HAVE_MPI_File_read_at_all_end 1 #define PyMPI_HAVE_MPI_File_write_at_all_begin 1 #define PyMPI_HAVE_MPI_File_write_at_all_end 1 #define PyMPI_HAVE_MPI_File_read_all_begin 1 #define PyMPI_HAVE_MPI_File_read_all_end 1 #define PyMPI_HAVE_MPI_File_write_all_begin 1 #define PyMPI_HAVE_MPI_File_write_all_end 1 #define PyMPI_HAVE_MPI_File_read_ordered_begin 1 #define PyMPI_HAVE_MPI_File_read_ordered_end 1 #define PyMPI_HAVE_MPI_File_write_ordered_begin 1 #define PyMPI_HAVE_MPI_File_write_ordered_end 1 #define PyMPI_HAVE_MPI_File_get_type_extent 1 #define PyMPI_HAVE_MPI_File_set_atomicity 1 #define PyMPI_HAVE_MPI_File_get_atomicity 1 #define PyMPI_HAVE_MPI_File_sync 1 #define PyMPI_HAVE_MPI_File_get_errhandler 1 #define PyMPI_HAVE_MPI_File_set_errhandler 1 #define PyMPI_HAVE_MPI_File_errhandler_fn 1 #define PyMPI_HAVE_MPI_File_create_errhandler 1 #define PyMPI_HAVE_MPI_File_call_errhandler 1 #define PyMPI_HAVE_MPI_Datarep_conversion_function 1 #define PyMPI_HAVE_MPI_Datarep_extent_function 1 #define PyMPI_HAVE_MPI_CONVERSION_FN_NULL 1 #define PyMPI_HAVE_MPI_MAX_DATAREP_STRING 1 #define PyMPI_HAVE_MPI_Register_datarep 1 #define PyMPI_HAVE_MPI_ERR_FILE 1 #define PyMPI_HAVE_MPI_ERR_NOT_SAME 1 #define PyMPI_HAVE_MPI_ERR_BAD_FILE 1 #define PyMPI_HAVE_MPI_ERR_NO_SUCH_FILE 1 #define PyMPI_HAVE_MPI_ERR_FILE_EXISTS 1 #define PyMPI_HAVE_MPI_ERR_FILE_IN_USE 1 #define PyMPI_HAVE_MPI_ERR_AMODE 1 #define PyMPI_HAVE_MPI_ERR_ACCESS 1 #define PyMPI_HAVE_MPI_ERR_READ_ONLY 1 #define PyMPI_HAVE_MPI_ERR_NO_SPACE 1 #define PyMPI_HAVE_MPI_ERR_QUOTA 1 #define PyMPI_HAVE_MPI_ERR_UNSUPPORTED_DATAREP 1 #define PyMPI_HAVE_MPI_ERR_UNSUPPORTED_OPERATION 1 #define PyMPI_HAVE_MPI_ERR_CONVERSION 1 #define PyMPI_HAVE_MPI_ERR_DUP_DATAREP 1 #define PyMPI_HAVE_MPI_ERR_IO 1 #define PyMPI_HAVE_MPI_LASTUSEDCODE 1 #define PyMPI_HAVE_MPI_Add_error_class 1 #define PyMPI_HAVE_MPI_Add_error_code 1 #define PyMPI_HAVE_MPI_Add_error_string 1 #define PyMPI_HAVE_MPI_THREAD_SINGLE 1 #define PyMPI_HAVE_MPI_THREAD_FUNNELED 1 #define PyMPI_HAVE_MPI_THREAD_SERIALIZED 1 #define PyMPI_HAVE_MPI_THREAD_MULTIPLE 1 #define PyMPI_HAVE_MPI_Init_thread 1 #define PyMPI_HAVE_MPI_Query_thread 1 #define PyMPI_HAVE_MPI_Is_thread_main 1 #define PyMPI_HAVE_MPI_Fint 1 #define PyMPI_HAVE_MPI_F_STATUS_IGNORE 1 #define PyMPI_HAVE_MPI_F_STATUSES_IGNORE 1 #define PyMPI_HAVE_MPI_Status_c2f 1 #define PyMPI_HAVE_MPI_Status_f2c 1 #define PyMPI_HAVE_MPI_Type_c2f 1 #define PyMPI_HAVE_MPI_Request_c2f 1 #define PyMPI_HAVE_MPI_Op_c2f 1 #define PyMPI_HAVE_MPI_Info_c2f 1 #define PyMPI_HAVE_MPI_Group_c2f 1 #define PyMPI_HAVE_MPI_Comm_c2f 1 #define PyMPI_HAVE_MPI_Win_c2f 1 #define PyMPI_HAVE_MPI_File_c2f 1 #define PyMPI_HAVE_MPI_Errhandler_c2f 1 #define PyMPI_HAVE_MPI_Type_f2c 1 #define PyMPI_HAVE_MPI_Request_f2c 1 #define PyMPI_HAVE_MPI_Op_f2c 1 #define PyMPI_HAVE_MPI_Info_f2c 1 #define PyMPI_HAVE_MPI_Group_f2c 1 #define PyMPI_HAVE_MPI_Comm_f2c 1 #define PyMPI_HAVE_MPI_Win_f2c 1 #define PyMPI_HAVE_MPI_File_f2c 1 #define PyMPI_HAVE_MPI_Errhandler_f2c 1 #endif #endif mpi4py-3.1.6/src/lib-mpi/config/mpi-22.h000066400000000000000000000023311460670727200175210ustar00rootroot00000000000000#if defined(MPI_VERSION) #if (MPI_VERSION > 2) || (MPI_VERSION == 2 && MPI_SUBVERSION >= 2) #define PyMPI_HAVE_MPI_AINT 1 #define PyMPI_HAVE_MPI_OFFSET 1 #define PyMPI_HAVE_MPI_C_BOOL 1 #define PyMPI_HAVE_MPI_INT8_T 1 #define PyMPI_HAVE_MPI_INT16_T 1 #define PyMPI_HAVE_MPI_INT32_T 1 #define PyMPI_HAVE_MPI_INT64_T 1 #define PyMPI_HAVE_MPI_UINT8_T 1 #define PyMPI_HAVE_MPI_UINT16_T 1 #define PyMPI_HAVE_MPI_UINT32_T 1 #define PyMPI_HAVE_MPI_UINT64_T 1 #define PyMPI_HAVE_MPI_C_COMPLEX 1 #define PyMPI_HAVE_MPI_C_FLOAT_COMPLEX 1 #define PyMPI_HAVE_MPI_C_DOUBLE_COMPLEX 1 #define PyMPI_HAVE_MPI_C_LONG_DOUBLE_COMPLEX 1 #define PyMPI_HAVE_MPI_REAL2 1 #define PyMPI_HAVE_MPI_COMPLEX4 1 #define PyMPI_HAVE_MPI_Op_commutative 1 #define PyMPI_HAVE_MPI_Reduce_local 1 #define PyMPI_HAVE_MPI_Reduce_scatter_block 1 #define PyMPI_HAVE_MPI_DIST_GRAPH 1 #define PyMPI_HAVE_MPI_UNWEIGHTED 1 #define PyMPI_HAVE_MPI_Dist_graph_create_adjacent 1 #define PyMPI_HAVE_MPI_Dist_graph_create 1 #define PyMPI_HAVE_MPI_Dist_graph_neighbors_count 1 #define PyMPI_HAVE_MPI_Dist_graph_neighbors 1 #define PyMPI_HAVE_MPI_Comm_errhandler_function 1 #define PyMPI_HAVE_MPI_Win_errhandler_function 1 #define PyMPI_HAVE_MPI_File_errhandler_function 1 #endif #endif mpi4py-3.1.6/src/lib-mpi/config/mpi-30.h000066400000000000000000000077731460670727200175370ustar00rootroot00000000000000#if defined(MPI_VERSION) #if (MPI_VERSION >= 3) #define PyMPI_HAVE_MPI_Count 1 #define PyMPI_HAVE_MPI_COUNT 1 #define PyMPI_HAVE_MPI_CXX_BOOL 1 #define PyMPI_HAVE_MPI_CXX_FLOAT_COMPLEX 1 #define PyMPI_HAVE_MPI_CXX_DOUBLE_COMPLEX 1 #define PyMPI_HAVE_MPI_CXX_LONG_DOUBLE_COMPLEX 1 #define PyMPI_HAVE_MPI_Type_size_x 1 #define PyMPI_HAVE_MPI_Type_get_extent_x 1 #define PyMPI_HAVE_MPI_Type_get_true_extent_x 1 #define PyMPI_HAVE_MPI_Get_elements_x 1 #define PyMPI_HAVE_MPI_Status_set_elements_x 1 #define PyMPI_HAVE_MPI_COMBINER_HINDEXED_BLOCK #define PyMPI_HAVE_MPI_Type_create_hindexed_block 1 #define PyMPI_HAVE_MPI_NO_OP 1 #define PyMPI_HAVE_MPI_Message 1 #define PyMPI_HAVE_MPI_MESSAGE_NULL 1 #define PyMPI_HAVE_MPI_MESSAGE_NO_PROC 1 #define PyMPI_HAVE_MPI_Message_c2f 1 #define PyMPI_HAVE_MPI_Message_f2c 1 #define PyMPI_HAVE_MPI_Mprobe 1 #define PyMPI_HAVE_MPI_Improbe 1 #define PyMPI_HAVE_MPI_Mrecv 1 #define PyMPI_HAVE_MPI_Imrecv 1 #define PyMPI_HAVE_MPI_Neighbor_allgather 1 #define PyMPI_HAVE_MPI_Neighbor_allgatherv 1 #define PyMPI_HAVE_MPI_Neighbor_alltoall 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallv 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallw 1 #define PyMPI_HAVE_MPI_Ibarrier 1 #define PyMPI_HAVE_MPI_Ibcast 1 #define PyMPI_HAVE_MPI_Igather 1 #define PyMPI_HAVE_MPI_Igatherv 1 #define PyMPI_HAVE_MPI_Iscatter 1 #define PyMPI_HAVE_MPI_Iscatterv 1 #define PyMPI_HAVE_MPI_Iallgather 1 #define PyMPI_HAVE_MPI_Iallgatherv 1 #define PyMPI_HAVE_MPI_Ialltoall 1 #define PyMPI_HAVE_MPI_Ialltoallv 1 #define PyMPI_HAVE_MPI_Ialltoallw 1 #define PyMPI_HAVE_MPI_Ireduce 1 #define PyMPI_HAVE_MPI_Iallreduce 1 #define PyMPI_HAVE_MPI_Ireduce_scatter_block 1 #define PyMPI_HAVE_MPI_Ireduce_scatter 1 #define PyMPI_HAVE_MPI_Iscan 1 #define PyMPI_HAVE_MPI_Iexscan 1 #define PyMPI_HAVE_MPI_Ineighbor_allgather 1 #define PyMPI_HAVE_MPI_Ineighbor_allgatherv 1 #define PyMPI_HAVE_MPI_Ineighbor_alltoall 1 #define PyMPI_HAVE_MPI_Ineighbor_alltoallv 1 #define PyMPI_HAVE_MPI_Ineighbor_alltoallw 1 #define PyMPI_HAVE_MPI_WEIGHTS_EMPTY 1 #define PyMPI_HAVE_MPI_Comm_dup_with_info 1 #define PyMPI_HAVE_MPI_Comm_idup 1 #define PyMPI_HAVE_MPI_Comm_create_group 1 #define PyMPI_HAVE_MPI_COMM_TYPE_SHARED 1 #define PyMPI_HAVE_MPI_Comm_split_type 1 #define PyMPI_HAVE_MPI_Comm_set_info 1 #define PyMPI_HAVE_MPI_Comm_get_info 1 #define PyMPI_HAVE_MPI_WIN_CREATE_FLAVOR 1 #define PyMPI_HAVE_MPI_WIN_FLAVOR_CREATE 1 #define PyMPI_HAVE_MPI_WIN_FLAVOR_ALLOCATE 1 #define PyMPI_HAVE_MPI_WIN_FLAVOR_DYNAMIC 1 #define PyMPI_HAVE_MPI_WIN_FLAVOR_SHARED 1 #define PyMPI_HAVE_MPI_WIN_MODEL 1 #define PyMPI_HAVE_MPI_WIN_SEPARATE 1 #define PyMPI_HAVE_MPI_WIN_UNIFIED 1 #define PyMPI_HAVE_MPI_Win_allocate 1 #define PyMPI_HAVE_MPI_Win_allocate_shared 1 #define PyMPI_HAVE_MPI_Win_shared_query 1 #define PyMPI_HAVE_MPI_Win_create_dynamic 1 #define PyMPI_HAVE_MPI_Win_attach 1 #define PyMPI_HAVE_MPI_Win_detach 1 #define PyMPI_HAVE_MPI_Win_set_info 1 #define PyMPI_HAVE_MPI_Win_get_info 1 #define PyMPI_HAVE_MPI_Get_accumulate 1 #define PyMPI_HAVE_MPI_Fetch_and_op 1 #define PyMPI_HAVE_MPI_Compare_and_swap 1 #define PyMPI_HAVE_MPI_Rget 1 #define PyMPI_HAVE_MPI_Rput 1 #define PyMPI_HAVE_MPI_Raccumulate 1 #define PyMPI_HAVE_MPI_Rget_accumulate 1 #define PyMPI_HAVE_MPI_Win_lock_all 1 #define PyMPI_HAVE_MPI_Win_unlock_all 1 #define PyMPI_HAVE_MPI_Win_flush 1 #define PyMPI_HAVE_MPI_Win_flush_all 1 #define PyMPI_HAVE_MPI_Win_flush_local 1 #define PyMPI_HAVE_MPI_Win_flush_local_all 1 #define PyMPI_HAVE_MPI_Win_sync 1 #define PyMPI_HAVE_MPI_ERR_RMA_RANGE 1 #define PyMPI_HAVE_MPI_ERR_RMA_ATTACH 1 #define PyMPI_HAVE_MPI_ERR_RMA_SHARED 1 #define PyMPI_HAVE_MPI_ERR_RMA_FLAVOR 1 #define PyMPI_HAVE_MPI_MAX_LIBRARY_VERSION_STRING 1 #define PyMPI_HAVE_MPI_Get_library_version 1 #define PyMPI_HAVE_MPI_INFO_ENV 1 /* #define PyMPI_HAVE_MPI_F08_status 1 #define PyMPI_HAVE_MPI_F08_STATUS_IGNORE 1 #define PyMPI_HAVE_MPI_F08_STATUSES_IGNORE 1 #define PyMPI_HAVE_MPI_Status_c2f08 1 #define PyMPI_HAVE_MPI_Status_f082c 1 #define PyMPI_HAVE_MPI_Status_f2f08 1 #define PyMPI_HAVE_MPI_Status_f082f 1 */ #endif #endif mpi4py-3.1.6/src/lib-mpi/config/mpi-31.h000066400000000000000000000005341460670727200175240ustar00rootroot00000000000000#if defined(MPI_VERSION) #if (MPI_VERSION > 3) || (MPI_VERSION == 3 && MPI_SUBVERSION >= 1) #define PyMPI_HAVE_MPI_Aint_add 1 #define PyMPI_HAVE_MPI_Aint_diff 1 #define PyMPI_HAVE_MPI_File_iread_at_all 1 #define PyMPI_HAVE_MPI_File_iwrite_at_all 1 #define PyMPI_HAVE_MPI_File_iread_all 1 #define PyMPI_HAVE_MPI_File_iwrite_all 1 #endif #endif mpi4py-3.1.6/src/lib-mpi/config/mpi-40.h000066400000000000000000000000771460670727200175260ustar00rootroot00000000000000#if defined(MPI_VERSION) #if (MPI_VERSION >= 4) #endif #endif mpi4py-3.1.6/src/lib-mpi/config/mpi-io.h000066400000000000000000000073421460670727200177140ustar00rootroot00000000000000#undef PyMPI_HAVE_MPI_File #undef PyMPI_HAVE_MPI_FILE_NULL #undef PyMPI_HAVE_MPI_MODE_RDONLY #undef PyMPI_HAVE_MPI_MODE_RDWR #undef PyMPI_HAVE_MPI_MODE_WRONLY #undef PyMPI_HAVE_MPI_MODE_CREATE #undef PyMPI_HAVE_MPI_MODE_EXCL #undef PyMPI_HAVE_MPI_MODE_DELETE_ON_CLOSE #undef PyMPI_HAVE_MPI_MODE_UNIQUE_OPEN #undef PyMPI_HAVE_MPI_MODE_APPEND #undef PyMPI_HAVE_MPI_MODE_SEQUENTIAL #undef PyMPI_HAVE_MPI_File_open #undef PyMPI_HAVE_MPI_File_close #undef PyMPI_HAVE_MPI_File_delete #undef PyMPI_HAVE_MPI_File_set_size #undef PyMPI_HAVE_MPI_File_preallocate #undef PyMPI_HAVE_MPI_File_get_size #undef PyMPI_HAVE_MPI_File_get_group #undef PyMPI_HAVE_MPI_File_get_amode #undef PyMPI_HAVE_MPI_File_set_info #undef PyMPI_HAVE_MPI_File_get_info #undef PyMPI_HAVE_MPI_File_get_view #undef PyMPI_HAVE_MPI_File_set_view #undef PyMPI_HAVE_MPI_File_read_at #undef PyMPI_HAVE_MPI_File_read_at_all #undef PyMPI_HAVE_MPI_File_write_at #undef PyMPI_HAVE_MPI_File_write_at_all #undef PyMPI_HAVE_MPI_File_iread_at #undef PyMPI_HAVE_MPI_File_iread_at_all #undef PyMPI_HAVE_MPI_File_iwrite_at #undef PyMPI_HAVE_MPI_File_iwrite_at_all #undef PyMPI_HAVE_MPI_SEEK_SET #undef PyMPI_HAVE_MPI_SEEK_CUR #undef PyMPI_HAVE_MPI_SEEK_END #undef PyMPI_HAVE_MPI_DISPLACEMENT_CURRENT #undef PyMPI_HAVE_MPI_File_seek #undef PyMPI_HAVE_MPI_File_get_position #undef PyMPI_HAVE_MPI_File_get_byte_offset #undef PyMPI_HAVE_MPI_File_read #undef PyMPI_HAVE_MPI_File_read_all #undef PyMPI_HAVE_MPI_File_write #undef PyMPI_HAVE_MPI_File_write_all #undef PyMPI_HAVE_MPI_File_iread #undef PyMPI_HAVE_MPI_File_iread_all #undef PyMPI_HAVE_MPI_File_iwrite #undef PyMPI_HAVE_MPI_File_iwrite_all #undef PyMPI_HAVE_MPI_File_read_shared #undef PyMPI_HAVE_MPI_File_write_shared #undef PyMPI_HAVE_MPI_File_iread_shared #undef PyMPI_HAVE_MPI_File_iwrite_shared #undef PyMPI_HAVE_MPI_File_read_ordered #undef PyMPI_HAVE_MPI_File_write_ordered #undef PyMPI_HAVE_MPI_File_seek_shared #undef PyMPI_HAVE_MPI_File_get_position_shared #undef PyMPI_HAVE_MPI_File_read_at_all_begin #undef PyMPI_HAVE_MPI_File_read_at_all_end #undef PyMPI_HAVE_MPI_File_write_at_all_begin #undef PyMPI_HAVE_MPI_File_write_at_all_end #undef PyMPI_HAVE_MPI_File_read_all_begin #undef PyMPI_HAVE_MPI_File_read_all_end #undef PyMPI_HAVE_MPI_File_write_all_begin #undef PyMPI_HAVE_MPI_File_write_all_end #undef PyMPI_HAVE_MPI_File_read_ordered_begin #undef PyMPI_HAVE_MPI_File_read_ordered_end #undef PyMPI_HAVE_MPI_File_write_ordered_begin #undef PyMPI_HAVE_MPI_File_write_ordered_end #undef PyMPI_HAVE_MPI_File_get_type_extent #undef PyMPI_HAVE_MPI_File_set_atomicity #undef PyMPI_HAVE_MPI_File_get_atomicity #undef PyMPI_HAVE_MPI_File_sync #undef PyMPI_HAVE_MPI_File_get_errhandler #undef PyMPI_HAVE_MPI_File_set_errhandler #undef PyMPI_HAVE_MPI_File_errhandler_fn #undef PyMPI_HAVE_MPI_File_errhandler_function #undef PyMPI_HAVE_MPI_File_create_errhandler #undef PyMPI_HAVE_MPI_File_call_errhandler #undef PyMPI_HAVE_MPI_Datarep_conversion_function #undef PyMPI_HAVE_MPI_Datarep_extent_function #undef PyMPI_HAVE_MPI_CONVERSION_FN_NULL #undef PyMPI_HAVE_MPI_MAX_DATAREP_STRING #undef PyMPI_HAVE_MPI_Register_datarep #undef PyMPI_HAVE_MPI_File_c2f #undef PyMPI_HAVE_MPI_File_f2c #if !defined(MPI_ERR_FILE) #undef PyMPI_HAVE_MPI_ERR_FILE #undef PyMPI_HAVE_MPI_ERR_NOT_SAME #undef PyMPI_HAVE_MPI_ERR_BAD_FILE #undef PyMPI_HAVE_MPI_ERR_NO_SUCH_FILE #undef PyMPI_HAVE_MPI_ERR_FILE_EXISTS #undef PyMPI_HAVE_MPI_ERR_FILE_IN_USE #undef PyMPI_HAVE_MPI_ERR_AMODE #undef PyMPI_HAVE_MPI_ERR_ACCESS #undef PyMPI_HAVE_MPI_ERR_READ_ONLY #undef PyMPI_HAVE_MPI_ERR_NO_SPACE #undef PyMPI_HAVE_MPI_ERR_QUOTA #undef PyMPI_HAVE_MPI_ERR_UNSUPPORTED_DATAREP #undef PyMPI_HAVE_MPI_ERR_UNSUPPORTED_OPERATION #undef PyMPI_HAVE_MPI_ERR_CONVERSION #undef PyMPI_HAVE_MPI_ERR_DUP_DATAREP #undef PyMPI_HAVE_MPI_ERR_IO #endif mpi4py-3.1.6/src/lib-mpi/config/mpich.h000066400000000000000000000006171460670727200176200ustar00rootroot00000000000000#ifndef PyMPI_CONFIG_MPICH_H #define PyMPI_CONFIG_MPICH_H #include "mpi-11.h" #include "mpi-12.h" #include "mpi-20.h" #include "mpi-22.h" #include "mpi-30.h" #include "mpi-31.h" #include "mpi-40.h" /* These types are difficult to implement portably */ #undef PyMPI_HAVE_MPI_REAL2 #undef PyMPI_HAVE_MPI_COMPLEX4 #ifndef ROMIO_VERSION #include "mpi-io.h" #endif #endif /* !PyMPI_CONFIG_MPICH_H */ mpi4py-3.1.6/src/lib-mpi/config/mpich2.h000066400000000000000000000215721460670727200177050ustar00rootroot00000000000000#ifndef PyMPI_CONFIG_MPICH2_H #define PyMPI_CONFIG_MPICH2_H #include "mpi-11.h" #include "mpi-12.h" #include "mpi-20.h" #include "mpi-22.h" #include "mpi-30.h" #include "mpi-31.h" #include "mpi-40.h" /* These types are difficult to implement portably */ #undef PyMPI_HAVE_MPI_REAL2 #undef PyMPI_HAVE_MPI_COMPLEX4 #if defined(MPI_UNWEIGHTED) && (MPICH2_NUMVERSION < 10300000) #undef MPI_UNWEIGHTED #define MPI_UNWEIGHTED ((int *)0) #endif /* MPICH2 < 1.3.0 */ #if !defined(MPICH2_NUMVERSION) || (MPICH2_NUMVERSION < 10100000) #undef PyMPI_HAVE_MPI_Type_create_f90_integer #undef PyMPI_HAVE_MPI_Type_create_f90_real #undef PyMPI_HAVE_MPI_Type_create_f90_complex #endif /* MPICH2 < 1.1.0 */ #ifndef ROMIO_VERSION #include "mpi-io.h" #endif #if MPI_VERSION < 3 && defined(MPICH2_NUMVERSION) #if MPICH2_NUMVERSION >= 10500000 && \ MPICH2_NUMVERSION < 20000000 /* #define PyMPI_HAVE_MPI_Count 1 #define PyMPI_HAVE_MPI_COUNT 1 #define PyMPI_HAVE_MPI_Type_size_x 1 #define PyMPI_HAVE_MPI_Type_get_extent_x 1 #define PyMPI_HAVE_MPI_Type_get_true_extent_x 1 #define PyMPI_HAVE_MPI_Get_elements_x 1 #define PyMPI_HAVE_MPI_Status_set_elements_x 1 #define MPI_Count MPIX_Count #define MPI_COUNT MPIX_COUNT #define MPI_Type_size_x MPIX_Type_size_x #define MPI_Type_get_extent_x MPIX_Type_get_extent_x #define MPI_Type_get_true_extent_x MPIX_Type_get_true_extent_x #define MPI_Get_elements_x MPIX_Get_elements_x #define MPI_Status_set_elements_x MPIX_Status_set_elements_x */ #define PyMPI_HAVE_MPI_COMBINER_HINDEXED_BLOCK 1 #define PyMPI_HAVE_MPI_Type_create_hindexed_block 1 #define MPI_COMBINER_HINDEXED_BLOCK MPIX_COMBINER_HINDEXED_BLOCK #define MPI_Type_create_hindexed_block MPIX_Type_create_hindexed_block #define PyMPI_HAVE_MPI_NO_OP 1 #define MPI_NO_OP MPIX_NO_OP #define PyMPI_HAVE_MPI_Message 1 #define PyMPI_HAVE_MPI_MESSAGE_NULL 1 #define PyMPI_HAVE_MPI_MESSAGE_NO_PROC 1 #define PyMPI_HAVE_MPI_Message_c2f 1 #define PyMPI_HAVE_MPI_Message_f2c 1 #define PyMPI_HAVE_MPI_Mprobe 1 #define PyMPI_HAVE_MPI_Improbe 1 #define PyMPI_HAVE_MPI_Mrecv 1 #define PyMPI_HAVE_MPI_Imrecv 1 #define MPI_Message MPIX_Message #define MPI_MESSAGE_NULL MPIX_MESSAGE_NULL #define MPI_MESSAGE_NO_PROC MPIX_MESSAGE_NO_PROC #define MPI_Message_c2f MPIX_Message_c2f #define MPI_Message_f2c MPIX_Message_f2c #define MPI_Mprobe MPIX_Mprobe #define MPI_Improbe MPIX_Improbe #define MPI_Mrecv MPIX_Mrecv #define MPI_Imrecv MPIX_Imrecv #define PyMPI_HAVE_MPI_Ibarrier 1 #define PyMPI_HAVE_MPI_Ibcast 1 #define PyMPI_HAVE_MPI_Igather 1 #define PyMPI_HAVE_MPI_Igatherv 1 #define PyMPI_HAVE_MPI_Iscatter 1 #define PyMPI_HAVE_MPI_Iscatterv 1 #define PyMPI_HAVE_MPI_Iallgather 1 #define PyMPI_HAVE_MPI_Iallgatherv 1 #define PyMPI_HAVE_MPI_Ialltoall 1 #define PyMPI_HAVE_MPI_Ialltoallv 1 #define PyMPI_HAVE_MPI_Ialltoallw 1 #define PyMPI_HAVE_MPI_Ireduce 1 #define PyMPI_HAVE_MPI_Iallreduce 1 #define PyMPI_HAVE_MPI_Ireduce_scatter_block 1 #define PyMPI_HAVE_MPI_Ireduce_scatter 1 #define PyMPI_HAVE_MPI_Iscan 1 #define PyMPI_HAVE_MPI_Iexscan 1 #define MPI_Ibarrier MPIX_Ibarrier #define MPI_Ibcast MPIX_Ibcast #define MPI_Igather MPIX_Igather #define MPI_Igatherv MPIX_Igatherv #define MPI_Iscatter MPIX_Iscatter #define MPI_Iscatterv MPIX_Iscatterv #define MPI_Iallgather MPIX_Iallgather #define MPI_Iallgatherv MPIX_Iallgatherv #define MPI_Ialltoall MPIX_Ialltoall #define MPI_Ialltoallv MPIX_Ialltoallv #define MPI_Ialltoallw MPIX_Ialltoallw #define MPI_Ireduce MPIX_Ireduce #define MPI_Iallreduce MPIX_Iallreduce #define MPI_Ireduce_scatter_block MPIX_Ireduce_scatter_block #define MPI_Ireduce_scatter MPIX_Ireduce_scatter #define MPI_Iscan MPIX_Iscan #define MPI_Iexscan MPIX_Iexscan #define PyMPI_HAVE_MPI_Neighbor_allgather 1 #define PyMPI_HAVE_MPI_Neighbor_allgatherv 1 #define PyMPI_HAVE_MPI_Neighbor_alltoall 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallv 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallw 1 #define MPI_Neighbor_allgather MPIX_Neighbor_allgather #define MPI_Neighbor_allgatherv MPIX_Neighbor_allgatherv #define MPI_Neighbor_alltoall MPIX_Neighbor_alltoall #define MPI_Neighbor_alltoallv MPIX_Neighbor_alltoallv #define MPI_Neighbor_alltoallw MPIX_Neighbor_alltoallw #define PyMPI_HAVE_MPI_Ineighbor_allgather 1 #define PyMPI_HAVE_MPI_Ineighbor_allgatherv 1 #define PyMPI_HAVE_MPI_Ineighbor_alltoall 1 #define PyMPI_HAVE_MPI_Ineighbor_alltoallv 1 #define PyMPI_HAVE_MPI_Ineighbor_alltoallw 1 #define MPI_Ineighbor_allgather MPIX_Ineighbor_allgather #define MPI_Ineighbor_allgatherv MPIX_Ineighbor_allgatherv #define MPI_Ineighbor_alltoall MPIX_Ineighbor_alltoall #define MPI_Ineighbor_alltoallv MPIX_Ineighbor_alltoallv #define MPI_Ineighbor_alltoallw MPIX_Ineighbor_alltoallw #define PyMPI_HAVE_MPI_Comm_idup 1 #define PyMPI_HAVE_MPI_Comm_create_group 1 #define PyMPI_HAVE_MPI_COMM_TYPE_SHARED 1 #define PyMPI_HAVE_MPI_Comm_split_type 1 #define MPI_Comm_idup MPIX_Comm_idup #define MPI_Comm_create_group MPIX_Comm_create_group #define MPI_COMM_TYPE_SHARED MPIX_COMM_TYPE_SHARED #define MPI_Comm_split_type MPIX_Comm_split_type /* #define PyMPI_HAVE_MPI_Comm_dup_with_info 1 #define PyMPI_HAVE_MPI_Comm_set_info 1 #define PyMPI_HAVE_MPI_Comm_get_info 1 #define MPI_Comm_dup_with_info MPIX_Comm_dup_with_info #define MPI_Comm_set_info MPIX_Comm_set_info #define MPI_Comm_get_info MPIX_Comm_get_info */ #define PyMPI_HAVE_MPI_WIN_CREATE_FLAVOR 1 #define PyMPI_HAVE_MPI_WIN_FLAVOR_CREATE 1 #define PyMPI_HAVE_MPI_WIN_FLAVOR_ALLOCATE 1 #define PyMPI_HAVE_MPI_WIN_FLAVOR_DYNAMIC 1 #define PyMPI_HAVE_MPI_WIN_FLAVOR_SHARED 1 #define MPI_WIN_CREATE_FLAVOR MPIX_WIN_CREATE_FLAVOR #define MPI_WIN_FLAVOR_CREATE MPIX_WIN_FLAVOR_CREATE #define MPI_WIN_FLAVOR_ALLOCATE MPIX_WIN_FLAVOR_ALLOCATE #define MPI_WIN_FLAVOR_DYNAMIC MPIX_WIN_FLAVOR_DYNAMIC #define MPI_WIN_FLAVOR_SHARED MPIX_WIN_FLAVOR_SHARED #define PyMPI_HAVE_MPI_WIN_MODEL 1 #define PyMPI_HAVE_MPI_WIN_SEPARATE 1 #define PyMPI_HAVE_MPI_WIN_UNIFIED 1 #define MPI_WIN_MODEL MPIX_WIN_MODEL #define MPI_WIN_SEPARATE MPIX_WIN_SEPARATE #define MPI_WIN_UNIFIED MPIX_WIN_UNIFIED #define PyMPI_HAVE_MPI_Win_allocate 1 #define MPI_Win_allocate MPIX_Win_allocate #define PyMPI_HAVE_MPI_Win_allocate_shared 1 #define PyMPI_HAVE_MPI_Win_shared_query 1 #define MPI_Win_allocate_shared MPIX_Win_allocate_shared #define MPI_Win_shared_query MPIX_Win_shared_query #define PyMPI_HAVE_MPI_Win_create_dynamic 1 #define PyMPI_HAVE_MPI_Win_attach 1 #define PyMPI_HAVE_MPI_Win_detach 1 #define MPI_Win_create_dynamic MPIX_Win_create_dynamic #define MPI_Win_attach MPIX_Win_attach #define MPI_Win_detach MPIX_Win_detach /* #define PyMPI_HAVE_MPI_Win_set_info 1 #define PyMPI_HAVE_MPI_Win_get_info 1 #define MPI_Win_set_info MPIX_Win_set_info #define MPI_Win_get_info MPIX_Win_get_info */ #define PyMPI_HAVE_MPI_Get_accumulate 1 #define PyMPI_HAVE_MPI_Fetch_and_op 1 #define PyMPI_HAVE_MPI_Compare_and_swap 1 #define MPI_Get_accumulate MPIX_Get_accumulate #define MPI_Fetch_and_op MPIX_Fetch_and_op #define MPI_Compare_and_swap MPIX_Compare_and_swap #define PyMPI_HAVE_MPI_Rget 1 #define PyMPI_HAVE_MPI_Rput 1 #define PyMPI_HAVE_MPI_Raccumulate 1 #define PyMPI_HAVE_MPI_Rget_accumulate 1 #define MPI_Rget MPIX_Rget #define MPI_Rput MPIX_Rput #define MPI_Raccumulate MPIX_Raccumulate #define MPI_Rget_accumulate MPIX_Rget_accumulate #define PyMPI_HAVE_MPI_Win_lock_all 1 #define PyMPI_HAVE_MPI_Win_unlock_all 1 #define PyMPI_HAVE_MPI_Win_flush 1 #define PyMPI_HAVE_MPI_Win_flush_all 1 #define PyMPI_HAVE_MPI_Win_flush_local 1 #define PyMPI_HAVE_MPI_Win_flush_local_all 1 #define PyMPI_HAVE_MPI_Win_sync #define MPI_Win_lock_all MPIX_Win_lock_all #define MPI_Win_unlock_all MPIX_Win_unlock_all #define MPI_Win_flush MPIX_Win_flush #define MPI_Win_flush_all MPIX_Win_flush_all #define MPI_Win_flush_local MPIX_Win_flush_local #define MPI_Win_flush_local_all MPIX_Win_flush_local_all #define MPI_Win_sync MPIX_Win_sync #define PyMPI_HAVE_MPI_ERR_RMA_RANGE 1 #define PyMPI_HAVE_MPI_ERR_RMA_ATTACH 1 #define PyMPI_HAVE_MPI_ERR_RMA_SHARED 1 #define PyMPI_HAVE_MPI_ERR_RMA_FLAVOR 1 #define MPI_ERR_RMA_RANGE MPIX_ERR_RMA_RANGE #define MPI_ERR_RMA_ATTACH MPIX_ERR_RMA_ATTACH #define MPI_ERR_RMA_SHARED MPIX_ERR_RMA_SHARED #define MPI_ERR_RMA_FLAVOR MPIX_ERR_RMA_WRONG_FLAVOR /* #define PyMPI_HAVE_MPI_MAX_LIBRARY_VERSION_STRING 1 #define PyMPI_HAVE_MPI_Get_library_version 1 #define PyMPI_HAVE_MPI_INFO_ENV 1 #define MPI_MAX_LIBRARY_VERSION_STRING MPIX_MAX_LIBRARY_VERSION_STRING #define MPI_Get_library_version MPIX_Get_library_version #define MPI_INFO_ENV MPIX_INFO_ENV */ #endif /* MPICH2 < 1.5*/ #endif /* MPI < 3.0*/ #endif /* !PyMPI_CONFIG_MPICH2_H */ mpi4py-3.1.6/src/lib-mpi/config/msmpi.h000066400000000000000000000077731460670727200176570ustar00rootroot00000000000000#ifndef PyMPI_CONFIG_MSMPI_H #define PyMPI_CONFIG_MSMPI_H #include "mpi-11.h" #include "mpi-12.h" #include "mpi-20.h" #include "mpi-22.h" #include "mpi-30.h" #include "mpi-31.h" #include "mpi-40.h" #if MSMPI_VER >= 0x402 #define PyMPI_HAVE_MPI_AINT 1 #define PyMPI_HAVE_MPI_OFFSET 1 #define PyMPI_HAVE_MPI_C_BOOL 1 #define PyMPI_HAVE_MPI_INT8_T 1 #define PyMPI_HAVE_MPI_INT16_T 1 #define PyMPI_HAVE_MPI_INT32_T 1 #define PyMPI_HAVE_MPI_INT64_T 1 #define PyMPI_HAVE_MPI_UINT8_T 1 #define PyMPI_HAVE_MPI_UINT16_T 1 #define PyMPI_HAVE_MPI_UINT32_T 1 #define PyMPI_HAVE_MPI_UINT64_T 1 #define PyMPI_HAVE_MPI_C_COMPLEX 1 #define PyMPI_HAVE_MPI_C_FLOAT_COMPLEX 1 #define PyMPI_HAVE_MPI_C_DOUBLE_COMPLEX 1 #define PyMPI_HAVE_MPI_C_LONG_DOUBLE_COMPLEX 1 #define PyMPI_HAVE_MPI_REAL2 1 #define PyMPI_HAVE_MPI_COMPLEX4 1 #define PyMPI_HAVE_MPI_Reduce_local 1 #endif #if MSMPI_VER >= 0x500 #define PyMPI_HAVE_MPI_COMM_TYPE_SHARED 1 #define PyMPI_HAVE_MPI_Comm_split_type 1 #define PyMPI_HAVE_MPI_Win_allocate_shared 1 #define PyMPI_HAVE_MPI_Win_shared_query 1 #define PyMPI_HAVE_MPI_MAX_LIBRARY_VERSION_STRING 1 #define PyMPI_HAVE_MPI_Get_library_version 1 #endif #if MSMPI_VER >= 0x600 #define PyMPI_HAVE_MPI_Count 1 #define PyMPI_HAVE_MPI_COUNT 1 #define PyMPI_HAVE_MPI_Type_create_hindexed_block 1 #define PyMPI_HAVE_MPI_COMBINER_HINDEXED_BLOCK 1 #define PyMPI_HAVE_MPI_Type_size_x 1 #define PyMPI_HAVE_MPI_Type_get_extent_x 1 #define PyMPI_HAVE_MPI_Type_get_true_extent_x 1 #define PyMPI_HAVE_MPI_Get_elements_x 1 #define PyMPI_HAVE_MPI_Status_set_elements_x 1 #define PyMPI_HAVE_MPI_Message 1 #define PyMPI_HAVE_MPI_MESSAGE_NULL 1 #define PyMPI_HAVE_MPI_MESSAGE_NO_PROC 1 #define PyMPI_HAVE_MPI_Mprobe 1 #define PyMPI_HAVE_MPI_Improbe 1 #define PyMPI_HAVE_MPI_Mrecv 1 #define PyMPI_HAVE_MPI_Imrecv 1 #define PyMPI_HAVE_MPI_Message_c2f 1 #define PyMPI_HAVE_MPI_Message_f2c 1 #define PyMPI_HAVE_MPI_Op_commutative 1 #define PyMPI_HAVE_MPI_DIST_GRAPH 1 #define PyMPI_HAVE_MPI_UNWEIGHTED 1 #define PyMPI_HAVE_MPI_WEIGHTS_EMPTY 1 #define PyMPI_HAVE_MPI_Dist_graph_create_adjacent 1 #define PyMPI_HAVE_MPI_Dist_graph_create 1 #define PyMPI_HAVE_MPI_Dist_graph_neighbors_count 1 #define PyMPI_HAVE_MPI_Dist_graph_neighbors 1 #define PyMPI_HAVE_MPI_Ibarrier 1 #define PyMPI_HAVE_MPI_Ibcast 1 #define PyMPI_HAVE_MPI_Igather 1 #define PyMPI_HAVE_MPI_Ireduce 1 #endif #if MSMPI_VER >= 0x700 #define PyMPI_HAVE_MPI_Iallgather 1 #define PyMPI_HAVE_MPI_Iallreduce 1 #define PyMPI_HAVE_MPI_Igatherv 1 #define PyMPI_HAVE_MPI_Iscatter 1 #define PyMPI_HAVE_MPI_Iscatterv 1 #endif #if MSMPI_VER >= 0x800 #define PyMPI_HAVE_MPI_Reduce_scatter_block 1 #define PyMPI_HAVE_MPI_Iallgatherv 1 #define PyMPI_HAVE_MPI_Ialltoall 1 #define PyMPI_HAVE_MPI_Ialltoallv 1 #define PyMPI_HAVE_MPI_Ialltoallw 1 #define PyMPI_HAVE_MPI_Iallreduce 1 #define PyMPI_HAVE_MPI_Ireduce_scatter 1 #define PyMPI_HAVE_MPI_Ireduce_scatter_block 1 #define PyMPI_HAVE_MPI_Iscan 1 #define PyMPI_HAVE_MPI_Iexscan 1 #endif #if MSMPI_VER >= 0x900 #define PyMPI_HAVE_MPI_NO_OP 1 #define PyMPI_HAVE_MPI_Win_allocate 1 #define PyMPI_HAVE_MPI_Win_create_dynamic 1 #define PyMPI_HAVE_MPI_Win_attach 1 #define PyMPI_HAVE_MPI_Win_detach 1 #define PyMPI_HAVE_MPI_Rget 1 #define PyMPI_HAVE_MPI_Rput 1 #define PyMPI_HAVE_MPI_Raccumulate 1 #define PyMPI_HAVE_MPI_Win_flush 1 #define PyMPI_HAVE_MPI_WIN_CREATE_FLAVOR 1 #define PyMPI_HAVE_MPI_WIN_MODEL 1 #define PyMPI_HAVE_MPI_WIN_FLAVOR_CREATE 1 #define PyMPI_HAVE_MPI_WIN_FLAVOR_ALLOCATE 1 #define PyMPI_HAVE_MPI_WIN_FLAVOR_DYNAMIC 1 #define PyMPI_HAVE_MPI_WIN_FLAVOR_SHARED 1 #define PyMPI_HAVE_MPI_WIN_SEPARATE 1 #define PyMPI_HAVE_MPI_WIN_UNIFIED 1 #endif #if MSMPI_VER >= 0xA00 #define PyMPI_HAVE_MPI_Get_accumulate #define PyMPI_HAVE_MPI_Rget_accumulate #define PyMPI_HAVE_MPI_Fetch_and_op #define PyMPI_HAVE_MPI_Compare_and_swap #define PyMPI_HAVE_MPI_Win_lock_all #define PyMPI_HAVE_MPI_Win_unlock_all #define PyMPI_HAVE_MPI_Win_flush_all #define PyMPI_HAVE_MPI_Win_flush_local #define PyMPI_HAVE_MPI_Win_flush_local_all #define PyMPI_HAVE_MPI_Win_sync #endif #endif /* !PyMPI_CONFIG_MSMPI_H */ mpi4py-3.1.6/src/lib-mpi/config/openmpi.h000066400000000000000000000075571460670727200202010ustar00rootroot00000000000000#ifndef PyMPI_CONFIG_OPENMPI_H #define PyMPI_CONFIG_OPENMPI_H #include "mpi-11.h" #include "mpi-12.h" #include "mpi-20.h" #include "mpi-22.h" #include "mpi-30.h" #include "mpi-31.h" #include "mpi-40.h" #ifndef OMPI_HAVE_FORTRAN_LOGICAL1 #define OMPI_HAVE_FORTRAN_LOGICAL1 0 #endif #ifndef OMPI_HAVE_FORTRAN_LOGICAL2 #define OMPI_HAVE_FORTRAN_LOGICAL2 0 #endif #ifndef OMPI_HAVE_FORTRAN_LOGICAL4 #define OMPI_HAVE_FORTRAN_LOGICAL4 0 #endif #ifndef OMPI_HAVE_FORTRAN_LOGICAL8 #define OMPI_HAVE_FORTRAN_LOGICAL8 0 #endif #if OMPI_HAVE_FORTRAN_LOGICAL1 #define PyMPI_HAVE_MPI_LOGICAL1 1 #endif #if OMPI_HAVE_FORTRAN_LOGICAL2 #define PyMPI_HAVE_MPI_LOGICAL2 1 #endif #if OMPI_HAVE_FORTRAN_LOGICAL4 #define PyMPI_HAVE_MPI_LOGICAL4 1 #endif #if OMPI_HAVE_FORTRAN_LOGICAL8 #define PyMPI_HAVE_MPI_LOGICAL8 1 #endif #if !OMPI_HAVE_FORTRAN_INTEGER1 #undef PyMPI_HAVE_MPI_INTEGER1 #endif #if !OMPI_HAVE_FORTRAN_INTEGER2 #undef PyMPI_HAVE_MPI_INTEGER2 #endif #if !OMPI_HAVE_FORTRAN_INTEGER4 #undef PyMPI_HAVE_MPI_INTEGER4 #endif #if !OMPI_HAVE_FORTRAN_INTEGER8 #undef PyMPI_HAVE_MPI_INTEGER8 #endif #if !OMPI_HAVE_FORTRAN_INTEGER16 #undef PyMPI_HAVE_MPI_INTEGER16 #endif #if !OMPI_HAVE_FORTRAN_REAL2 #undef PyMPI_HAVE_MPI_REAL2 #undef PyMPI_HAVE_MPI_COMPLEX4 #endif #if !OMPI_HAVE_FORTRAN_REAL4 #undef PyMPI_HAVE_MPI_REAL4 #undef PyMPI_HAVE_MPI_COMPLEX8 #endif #if !OMPI_HAVE_FORTRAN_REAL8 #undef PyMPI_HAVE_MPI_REAL8 #undef PyMPI_HAVE_MPI_COMPLEX16 #endif #if !OMPI_HAVE_FORTRAN_REAL16 #undef PyMPI_HAVE_MPI_REAL16 #undef PyMPI_HAVE_MPI_COMPLEX32 #endif #ifdef OMPI_PROVIDE_MPI_FILE_INTERFACE #if OMPI_PROVIDE_MPI_FILE_INTERFACE == 0 #include "mpi-io.h" #endif #endif #if (defined(OMPI_MAJOR_VERSION) && \ defined(OMPI_MINOR_VERSION) && \ defined(OMPI_RELEASE_VERSION)) #define OMPI_NUMVERSION (OMPI_MAJOR_VERSION*10000 + \ OMPI_MINOR_VERSION*100 + \ OMPI_RELEASE_VERSION) #else #define OMPI_NUMVERSION (10100) #endif #if MPI_VERSION < 3 #if OMPI_NUMVERSION >= 10700 #define PyMPI_HAVE_MPI_Message 1 #define PyMPI_HAVE_MPI_MESSAGE_NULL 1 #define PyMPI_HAVE_MPI_MESSAGE_NO_PROC 1 #define PyMPI_HAVE_MPI_Message_c2f 1 #define PyMPI_HAVE_MPI_Message_f2c 1 #define PyMPI_HAVE_MPI_Mprobe 1 #define PyMPI_HAVE_MPI_Improbe 1 #define PyMPI_HAVE_MPI_Mrecv 1 #define PyMPI_HAVE_MPI_Imrecv 1 #define PyMPI_HAVE_MPI_Ibarrier 1 #define PyMPI_HAVE_MPI_Ibcast 1 #define PyMPI_HAVE_MPI_Igather 1 #define PyMPI_HAVE_MPI_Igatherv 1 #define PyMPI_HAVE_MPI_Iscatter 1 #define PyMPI_HAVE_MPI_Iscatterv 1 #define PyMPI_HAVE_MPI_Iallgather 1 #define PyMPI_HAVE_MPI_Iallgatherv 1 #define PyMPI_HAVE_MPI_Ialltoall 1 #define PyMPI_HAVE_MPI_Ialltoallv 1 #define PyMPI_HAVE_MPI_Ialltoallw 1 #define PyMPI_HAVE_MPI_Ireduce 1 #define PyMPI_HAVE_MPI_Iallreduce 1 #define PyMPI_HAVE_MPI_Ireduce_scatter_block 1 #define PyMPI_HAVE_MPI_Ireduce_scatter 1 #define PyMPI_HAVE_MPI_Iscan 1 #define PyMPI_HAVE_MPI_Iexscan 1 #define PyMPI_HAVE_MPI_MAX_LIBRARY_VERSION_STRING 1 #define PyMPI_HAVE_MPI_Get_library_version 1 #endif /* OMPI >= 1.7.0 */ #if OMPI_NUMVERSION >= 10704 #define PyMPI_HAVE_MPI_Neighbor_allgather 1 #define PyMPI_HAVE_MPI_Neighbor_allgatherv 1 #define PyMPI_HAVE_MPI_Neighbor_alltoall 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallv 1 #define PyMPI_HAVE_MPI_Neighbor_alltoallw 1 #define PyMPI_HAVE_MPI_Ineighbor_allgather 1 #define PyMPI_HAVE_MPI_Ineighbor_allgatherv 1 #define PyMPI_HAVE_MPI_Ineighbor_alltoall 1 #define PyMPI_HAVE_MPI_Ineighbor_alltoallv 1 #define PyMPI_HAVE_MPI_Ineighbor_alltoallw 1 #endif /* OMPI >= 1.7.4 */ #endif #if MPI_VERSION == 3 #if OMPI_NUMVERSION <= 10705 #undef PyMPI_HAVE_MPI_Comm_set_info #undef PyMPI_HAVE_MPI_Comm_get_info #undef PyMPI_HAVE_MPI_WEIGHTS_EMPTY #undef PyMPI_HAVE_MPI_ERR_RMA_SHARED #endif /* OMPI <= 1.7.5 */ #endif #if OMPI_NUMVERSION >= 40000 #undef PyMPI_HAVE_MPI_LB #undef PyMPI_HAVE_MPI_UB #endif /* OMPI >= 4.0.0 */ #endif /* !PyMPI_CONFIG_OPENMPI_H */ mpi4py-3.1.6/src/lib-mpi/config/unknown.h000066400000000000000000000006031460670727200202120ustar00rootroot00000000000000#ifndef PyMPI_CONFIG_UNKNOWN_H #define PyMPI_CONFIG_UNKNOWN_H #include "mpi-11.h" #include "mpi-12.h" #include "mpi-20.h" #include "mpi-22.h" #include "mpi-30.h" #include "mpi-31.h" #include "mpi-40.h" /* These types are difficult to implement portably */ #undef PyMPI_HAVE_MPI_INTEGER16 #undef PyMPI_HAVE_MPI_REAL2 #undef PyMPI_HAVE_MPI_COMPLEX4 #endif /* !PyMPI_CONFIG_UNKNOWN_H */ mpi4py-3.1.6/src/lib-mpi/fallback.h000066400000000000000000000752011460670727200170130ustar00rootroot00000000000000#ifndef PyMPI_FALLBACK_H #define PyMPI_FALLBACK_H /* ---------------------------------------------------------------- */ #include #ifndef PyMPI_MALLOC #define PyMPI_MALLOC malloc #endif #ifndef PyMPI_FREE #define PyMPI_FREE free #endif /* ---------------------------------------------------------------- */ /* Version Number */ #ifndef PyMPI_HAVE_MPI_VERSION #if !defined(MPI_VERSION) #define MPI_VERSION 1 #endif #endif #ifndef PyMPI_HAVE_MPI_SUBVERSION #if !defined(MPI_SUBVERSION) #define MPI_SUBVERSION 0 #endif #endif #ifndef PyMPI_HAVE_MPI_Get_version static int PyMPI_Get_version(int *version, int* subversion) { if (!version) return MPI_ERR_ARG; if (!subversion) return MPI_ERR_ARG; *version = MPI_VERSION; *subversion = MPI_SUBVERSION; return MPI_SUCCESS; } #undef MPI_Get_version #define MPI_Get_version PyMPI_Get_version #endif #ifndef PyMPI_HAVE_MPI_Get_library_version #define PyMPI_MAX_LIBRARY_VERSION_STRING 8 static int PyMPI_Get_library_version(char version[], int *rlen) { if (!version) return MPI_ERR_ARG; if (!rlen) return MPI_ERR_ARG; version[0] = 'M'; version[1] = 'P'; version[2] = 'I'; version[3] = ' '; version[4] = '0' + (char) MPI_VERSION; version[5] = '.'; version[6] = '0' + (char) MPI_SUBVERSION; version[7] = 0; *rlen = 7; return MPI_SUCCESS; } #undef MPI_MAX_LIBRARY_VERSION_STRING #define MPI_MAX_LIBRARY_VERSION_STRING \ PyMPI_MAX_LIBRARY_VERSION_STRING #undef MPI_Get_library_version #define MPI_Get_library_version \ PyMPI_Get_library_version #endif /* ---------------------------------------------------------------- */ /* Threading Support */ #ifndef PyMPI_HAVE_MPI_Init_thread static int PyMPI_Init_thread(int *argc, char ***argv, int required, int *provided) { int ierr = MPI_SUCCESS; if (!provided) return MPI_ERR_ARG; ierr = MPI_Init(argc, argv); if (ierr != MPI_SUCCESS) return ierr; *provided = MPI_THREAD_SINGLE; return MPI_SUCCESS; } #undef MPI_Init_thread #define MPI_Init_thread PyMPI_Init_thread #endif #ifndef PyMPI_HAVE_MPI_Query_thread static int PyMPI_Query_thread(int *provided) { if (!provided) return MPI_ERR_ARG; provided = MPI_THREAD_SINGLE; return MPI_SUCCESS; } #undef MPI_Query_thread #define MPI_Query_thread PyMPI_Query_thread #endif #ifndef PyMPI_HAVE_MPI_Is_thread_main static int PyMPI_Is_thread_main(int *flag) { if (!flag) return MPI_ERR_ARG; *flag = 1; /* XXX this is completely broken !! */ return MPI_SUCCESS; } #undef MPI_Is_thread_main #define MPI_Is_thread_main PyMPI_Is_thread_main #endif /* ---------------------------------------------------------------- */ /* Status */ #ifndef PyMPI_HAVE_MPI_STATUS_IGNORE static MPI_Status PyMPI_STATUS_IGNORE; #undef MPI_STATUS_IGNORE #define MPI_STATUS_IGNORE ((MPI_Status*)(&PyMPI_STATUS_IGNORE)) #endif #ifndef PyMPI_HAVE_MPI_STATUSES_IGNORE #ifndef PyMPI_MPI_STATUSES_IGNORE_SIZE #define PyMPI_MPI_STATUSES_IGNORE_SIZE 4096 #endif static MPI_Status PyMPI_STATUSES_IGNORE[PyMPI_MPI_STATUSES_IGNORE_SIZE]; #undef MPI_STATUSES_IGNORE #define MPI_STATUSES_IGNORE ((MPI_Status*)(PyMPI_STATUSES_IGNORE)) #endif /* ---------------------------------------------------------------- */ /* Datatypes */ #ifndef PyMPI_HAVE_MPI_LONG_LONG #undef MPI_LONG_LONG #define MPI_LONG_LONG MPI_LONG_LONG_INT #endif #ifndef PyMPI_HAVE_MPI_Type_get_extent static int PyMPI_Type_get_extent(MPI_Datatype datatype, MPI_Aint *lb, MPI_Aint *extent) { int ierr = MPI_SUCCESS; ierr = MPI_Type_lb(datatype, lb); if (ierr != MPI_SUCCESS) return ierr; ierr = MPI_Type_extent(datatype, extent); if (ierr != MPI_SUCCESS) return ierr; return MPI_SUCCESS; } #undef MPI_Type_get_extent #define MPI_Type_get_extent PyMPI_Type_get_extent #endif #ifndef PyMPI_HAVE_MPI_Type_dup static int PyMPI_Type_dup(MPI_Datatype datatype, MPI_Datatype *newtype) { int ierr = MPI_SUCCESS; ierr = MPI_Type_contiguous(1, datatype, newtype); if (ierr != MPI_SUCCESS) return ierr; ierr = MPI_Type_commit(newtype); /* the safe way ... */ if (ierr != MPI_SUCCESS) return ierr; return MPI_SUCCESS; } #undef MPI_Type_dup #define MPI_Type_dup PyMPI_Type_dup #endif #ifndef PyMPI_HAVE_MPI_Type_create_indexed_block static int PyMPI_Type_create_indexed_block(int count, int blocklength, int displacements[], MPI_Datatype oldtype, MPI_Datatype *newtype) { int i, *blocklengths = 0, ierr = MPI_SUCCESS; if (count > 0) { blocklengths = (int *) PyMPI_MALLOC((size_t)count*sizeof(int)); if (!blocklengths) return MPI_ERR_INTERN; } for (i=0; i 0) { blocklengths = (int *) PyMPI_MALLOC((size_t)count*sizeof(int)); if (!blocklengths) return MPI_ERR_INTERN; } for (i=0; i 0); PyMPI_CHKARG(sizes); PyMPI_CHKARG(subsizes); PyMPI_CHKARG(starts); PyMPI_CHKARG(newtype); for (i=0; i 0); PyMPI_CHKARG(subsizes[i] > 0); PyMPI_CHKARG(starts[i] >= 0); PyMPI_CHKARG(sizes[i] >= subsizes[i]); PyMPI_CHKARG(starts[i] <= (sizes[i] - subsizes[i])); } PyMPI_CHKARG((order==MPI_ORDER_C) || (order==MPI_ORDER_FORTRAN)); ierr = MPI_Type_extent(oldtype, &extent); if (ierr != MPI_SUCCESS) return ierr; if (order == MPI_ORDER_FORTRAN) { if (ndims == 1) { ierr = MPI_Type_contiguous(subsizes[0], oldtype, &tmp1); if (ierr != MPI_SUCCESS) return ierr; } else { ierr = MPI_Type_vector(subsizes[1], subsizes[0], sizes[0], oldtype, &tmp1); if (ierr != MPI_SUCCESS) return ierr; size = sizes[0]*extent; for (i=2; i=0; i--) { size *= sizes[i+1]; ierr = MPI_Type_hvector(subsizes[i], 1, size, tmp1, &tmp2); if (ierr != MPI_SUCCESS) return ierr; ierr = MPI_Type_free(&tmp1); if (ierr != MPI_SUCCESS) return ierr; tmp1 = tmp2; } } /* add displacement and UB */ disps[1] = starts[ndims-1]; size = 1; for (i=ndims-2; i>=0; i--) { size *= sizes[i+1]; disps[1] += size*starts[i]; } } disps[1] *= extent; disps[2] = extent; for (i=0; i 0); PyMPI_CHKARG(blksize * nprocs >= global_size); } j = global_size - blksize*rank; mysize = (blksize < j) ? blksize : j; if (mysize < 0) mysize = 0; stride = orig_extent; if (order == MPI_ORDER_FORTRAN) { if (dim == 0) { ierr = MPI_Type_contiguous(mysize, type_old, type_new); if (ierr != MPI_SUCCESS) goto fn_exit; } else { for (i=0; idim; i--) stride *= gsizes[i]; ierr = MPI_Type_hvector(mysize, 1, stride, type_old, type_new); if (ierr != MPI_SUCCESS) goto fn_exit; } } *offset = blksize * rank; if (mysize == 0) *offset = 0; ierr = MPI_SUCCESS; fn_exit: return ierr; } static int PyMPI_Type_cyclic(int *gsizes, int dim, int ndims, int nprocs, int rank, int darg, int order, MPI_Aint orig_extent, MPI_Datatype type_old, MPI_Datatype *type_new, MPI_Aint *offset) { int ierr, blksize, i, blklens[3], st_index, end_index, local_size, rem, count; MPI_Aint stride, disps[3]; MPI_Datatype type_tmp, types[3]; type_tmp = MPI_DATATYPE_NULL; types[0] = types[1] = types[2] = MPI_DATATYPE_NULL; if (darg == MPI_DISTRIBUTE_DFLT_DARG) blksize = 1; else blksize = darg; PyMPI_CHKARG(blksize > 0); st_index = rank*blksize; end_index = gsizes[dim] - 1; if (end_index < st_index) local_size = 0; else { local_size = ((end_index - st_index + 1)/(nprocs*blksize))*blksize; rem = (end_index - st_index + 1) % (nprocs*blksize); local_size += (rem < blksize) ? rem : blksize; } count = local_size/blksize; rem = local_size % blksize; stride = nprocs*blksize*orig_extent; if (order == MPI_ORDER_FORTRAN) for (i=0; idim; i--) stride *= gsizes[i]; ierr = MPI_Type_hvector(count, blksize, stride, type_old, type_new); if (ierr != MPI_SUCCESS) goto fn_exit; /* if the last block is of size less than blksize, include it separately using MPI_Type_struct */ if (rem) { types[0] = *type_new; types[1] = type_old; disps[0] = 0; disps[1] = count*stride; blklens[0] = 1; blklens[1] = rem; ierr = MPI_Type_struct(2, blklens, disps, types, &type_tmp); if (ierr != MPI_SUCCESS) goto fn_exit; ierr = MPI_Type_free(type_new); if (ierr != MPI_SUCCESS) goto fn_exit; *type_new = type_tmp; } /* In the first iteration, we need to set the displacement in that dimension correctly. */ if ( ((order == MPI_ORDER_FORTRAN) && (dim == 0)) || ((order == MPI_ORDER_C) && (dim == ndims-1)) ) { types[0] = MPI_LB; disps[0] = 0; types[1] = *type_new; disps[1] = rank * blksize * orig_extent; types[2] = MPI_UB; disps[2] = orig_extent * gsizes[dim]; blklens[0] = blklens[1] = blklens[2] = 1; ierr = MPI_Type_struct(3, blklens, disps, types, &type_tmp); if (ierr != MPI_SUCCESS) goto fn_exit; ierr = MPI_Type_free(type_new); if (ierr != MPI_SUCCESS) goto fn_exit; *type_new = type_tmp; *offset = 0; } else { *offset = rank * blksize; } if (local_size == 0) *offset = 0; ierr = MPI_SUCCESS; fn_exit: return ierr; } static int PyMPI_Type_create_darray(int size, int rank, int ndims, int gsizes[], int distribs[], int dargs[], int psizes[], int order, MPI_Datatype oldtype, MPI_Datatype *newtype) { int ierr = MPI_SUCCESS, i; int procs, tmp_rank, tmp_size, blklens[3]; MPI_Aint orig_extent, disps[3]; MPI_Datatype type_old, type_new, types[3]; int *coords = 0; MPI_Aint *offsets = 0; orig_extent=0; type_old = type_new = MPI_DATATYPE_NULL; types[0] = types[1] = types[2] = MPI_DATATYPE_NULL; ierr = MPI_Type_extent(oldtype, &orig_extent); if (ierr != MPI_SUCCESS) goto fn_exit; PyMPI_CHKARG(rank >= 0); PyMPI_CHKARG(size > 0); PyMPI_CHKARG(ndims > 0); PyMPI_CHKARG(gsizes); PyMPI_CHKARG(distribs); PyMPI_CHKARG(dargs); PyMPI_CHKARG(psizes); PyMPI_CHKARG((order==MPI_ORDER_C) || (order==MPI_ORDER_FORTRAN) ); for (i=0; i < ndims; i++) { PyMPI_CHKARG(gsizes[1] > 0); PyMPI_CHKARG(psizes[1] > 0); PyMPI_CHKARG((distribs[i] == MPI_DISTRIBUTE_NONE) || (distribs[i] == MPI_DISTRIBUTE_BLOCK) || (distribs[i] == MPI_DISTRIBUTE_CYCLIC)); PyMPI_CHKARG((dargs[i] == MPI_DISTRIBUTE_DFLT_DARG) || (dargs[i] > 0)); PyMPI_CHKARG(!((distribs[i] == MPI_DISTRIBUTE_NONE) && (psizes[i] != 1))); } /* calculate position in Cartesian grid as MPI would (row-major ordering) */ coords = (int *) PyMPI_MALLOC((size_t)ndims*sizeof(int)); if (!coords) { ierr = MPI_ERR_INTERN; goto fn_exit; } offsets = (MPI_Aint *) PyMPI_MALLOC((size_t)ndims*sizeof(MPI_Aint)); if (!offsets) { ierr = MPI_ERR_INTERN; goto fn_exit; } procs = size; tmp_rank = rank; for (i=0; i=0; i--) { if (distribs[i] == MPI_DISTRIBUTE_BLOCK) { ierr = PyMPI_Type_block(gsizes, i, ndims, psizes[i], coords[i], dargs[i], order, orig_extent, type_old, &type_new, offsets+i); if (ierr != MPI_SUCCESS) goto fn_exit; } else if (distribs[i] == MPI_DISTRIBUTE_CYCLIC) { ierr = PyMPI_Type_cyclic(gsizes, i, ndims, psizes[i], coords[i], dargs[i], order, orig_extent, type_old, &type_new, offsets+i); if (ierr != MPI_SUCCESS) goto fn_exit; } else if (distribs[i] == MPI_DISTRIBUTE_NONE) { /* treat it as a block distribution on 1 process */ ierr = PyMPI_Type_block(gsizes, i, ndims, psizes[i], coords[i], MPI_DISTRIBUTE_DFLT_DARG, order, orig_extent, type_old, &type_new, offsets+i); if (ierr != MPI_SUCCESS) goto fn_exit; } if (i != ndims-1) { ierr = MPI_Type_free(&type_old); if (ierr != MPI_SUCCESS) goto fn_exit; } type_old = type_new; } /* add displacement and UB */ disps[1] = offsets[ndims-1]; tmp_size = 1; for (i=ndims-2; i>=0; i--) { tmp_size *= gsizes[i+1]; disps[1] += tmp_size*offsets[i]; } /* rest done below for both Fortran and C order */ } disps[0] = 0; disps[1] *= orig_extent; disps[2] = orig_extent; for (i=0; i 0) p[n] = 0; #endif status->MPI_SOURCE = MPI_ANY_SOURCE; status->MPI_TAG = MPI_ANY_TAG; status->MPI_ERROR = MPI_SUCCESS; #ifdef PyMPI_HAVE_MPI_Status_set_elements (void)MPI_Status_set_elements(status, MPI_BYTE, 0); #endif #ifdef PyMPI_HAVE_MPI_Status_set_cancelled (void)MPI_Status_set_cancelled(status, 0); #endif } return MPI_SUCCESS; } #undef MPI_Request_get_status #define MPI_Request_get_status PyMPI_Request_get_status #endif #endif /* ---------------------------------------------------------------- */ #ifndef PyMPI_HAVE_MPI_Reduce_scatter_block static int PyMPI_Reduce_scatter_block(void *sendbuf, void *recvbuf, int recvcount, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) { int ierr = MPI_SUCCESS; int n = 1, *recvcounts = 0; ierr = MPI_Comm_size(comm, &n); if (ierr != MPI_SUCCESS) return ierr; recvcounts = (int *) PyMPI_MALLOC((size_t)n*sizeof(int)); if (!recvcounts) return MPI_ERR_INTERN; while (n-- > 0) recvcounts[n] = recvcount; ierr = MPI_Reduce_scatter(sendbuf, recvbuf, recvcounts, datatype, op, comm); PyMPI_FREE(recvcounts); return ierr; } #undef MPI_Reduce_scatter_block #define MPI_Reduce_scatter_block PyMPI_Reduce_scatter_block #endif /* ---------------------------------------------------------------- */ /* Communicator Info */ #ifndef PyMPI_HAVE_MPI_Comm_dup_with_info static int PyMPI_Comm_dup_with_info(MPI_Comm comm, MPI_Info info, MPI_Comm *newcomm) { int dummy, ierr; if (info != MPI_INFO_NULL) { ierr = MPI_Info_get_nkeys(info, &dummy); if (ierr != MPI_SUCCESS) return ierr; } return MPI_Comm_dup(comm, newcomm); } #undef MPI_Comm_dup_with_info #define MPI_Comm_dup_with_info PyMPI_Comm_dup_with_info #endif #ifndef PyMPI_HAVE_MPI_Comm_set_info static int PyMPI_Comm_set_info(MPI_Comm comm, MPI_Info info) { int dummy, ierr; ierr = MPI_Comm_size(comm, &dummy); if (ierr != MPI_SUCCESS) return ierr; if (info != MPI_INFO_NULL) { ierr = MPI_Info_get_nkeys(info, &dummy); if (ierr != MPI_SUCCESS) return ierr; } return MPI_SUCCESS; } #undef MPI_Comm_set_info #define MPI_Comm_set_info PyMPI_Comm_set_info #endif #ifndef PyMPI_HAVE_MPI_Comm_get_info static int PyMPI_Comm_get_info(MPI_Comm comm, MPI_Info *info) { int dummy, ierr; ierr = MPI_Comm_size(comm, &dummy); if (ierr != MPI_SUCCESS) return ierr; return MPI_Info_create(info); } #undef MPI_Comm_get_info #define MPI_Comm_get_info PyMPI_Comm_get_info #endif /* ---------------------------------------------------------------- */ #if !defined(PyMPI_HAVE_MPI_WEIGHTS_EMPTY) static const int PyMPI_WEIGHTS_EMPTY_ARRAY[1] = {MPI_UNDEFINED}; static int * const PyMPI_WEIGHTS_EMPTY = (int*)PyMPI_WEIGHTS_EMPTY_ARRAY; #undef MPI_WEIGHTS_EMPTY #define MPI_WEIGHTS_EMPTY PyMPI_WEIGHTS_EMPTY #endif /* ---------------------------------------------------------------- */ /* Memory Allocation */ #if !defined(PyMPI_HAVE_MPI_Alloc_mem) || \ !defined(PyMPI_HAVE_MPI_Free_mem) static int PyMPI_Alloc_mem(MPI_Aint size, MPI_Info info, void *baseptr) { char *buf = 0, **basebuf = 0; if (size < 0) return MPI_ERR_ARG; if (!baseptr) return MPI_ERR_ARG; if (size == 0) size = 1; buf = (char *) PyMPI_MALLOC((size_t)size); if (!buf) return MPI_ERR_NO_MEM; basebuf = (char **) baseptr; *basebuf = buf; return MPI_SUCCESS; } #undef MPI_Alloc_mem #define MPI_Alloc_mem PyMPI_Alloc_mem static int PyMPI_Free_mem(void *baseptr) { if (!baseptr) return MPI_ERR_ARG; PyMPI_FREE(baseptr); return MPI_SUCCESS; } #undef MPI_Free_mem #define MPI_Free_mem PyMPI_Free_mem #endif /* ---------------------------------------------------------------- */ #ifndef PyMPI_HAVE_MPI_Win_allocate #ifdef PyMPI_HAVE_MPI_Win_create static int PyMPI_WIN_KEYVAL_MPIMEM = MPI_KEYVAL_INVALID; static int MPIAPI PyMPI_win_free_mpimem(MPI_Win win, int k, void *v, void *xs) { (void)win; (void)k; (void)xs; /* unused */ return MPI_Free_mem(v); } static int MPIAPI PyMPI_win_free_keyval(MPI_Comm comm, int k, void *v, void *xs) { int ierr = MPI_SUCCESS; (void)comm; (void)xs; /* unused */ ierr = MPI_Win_free_keyval((int *)v); if (ierr != MPI_SUCCESS) return ierr; ierr = MPI_Comm_free_keyval(&k); if (ierr != MPI_SUCCESS) return ierr; return MPI_SUCCESS; } static int PyMPI_Win_allocate(MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, void *baseptr_, MPI_Win *win_) { int ierr = MPI_SUCCESS; void *baseptr = MPI_BOTTOM; MPI_Win win = MPI_WIN_NULL; if (!baseptr_) return MPI_ERR_ARG; if (!win_) return MPI_ERR_ARG; ierr = MPI_Alloc_mem(size?size:1, info, &baseptr); if (ierr != MPI_SUCCESS) goto error; ierr = MPI_Win_create(baseptr, size, disp_unit, info, comm, &win); if (ierr != MPI_SUCCESS) goto error; #if defined(PyMPI_HAVE_MPI_Win_create_keyval) && \ defined(PyMPI_HAVE_MPI_Win_set_attr) if (PyMPI_WIN_KEYVAL_MPIMEM == MPI_KEYVAL_INVALID) { int comm_keyval = MPI_KEYVAL_INVALID; ierr = MPI_Win_create_keyval(MPI_WIN_NULL_COPY_FN, PyMPI_win_free_mpimem, &PyMPI_WIN_KEYVAL_MPIMEM, NULL); if (ierr != MPI_SUCCESS) goto error; ierr = MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, PyMPI_win_free_keyval, &comm_keyval, NULL); if (ierr == MPI_SUCCESS) (void)MPI_Comm_set_attr(MPI_COMM_SELF, comm_keyval, &PyMPI_WIN_KEYVAL_MPIMEM); } ierr = MPI_Win_set_attr(win, PyMPI_WIN_KEYVAL_MPIMEM, baseptr); if (ierr != MPI_SUCCESS) goto error; #endif *((void**)baseptr_) = baseptr; *win_ = win; return MPI_SUCCESS; error: if (baseptr != MPI_BOTTOM) (void)MPI_Free_mem(baseptr); if (win != MPI_WIN_NULL) (void)MPI_Win_free(&win); return ierr; } #undef MPI_Win_allocate #define MPI_Win_allocate PyMPI_Win_allocate #endif #endif #ifndef PyMPI_HAVE_MPI_Win_set_info static int PyMPI_Win_set_info(MPI_Win win, MPI_Info info) { int dummy, ierr; if (win == MPI_WIN_NULL) return MPI_ERR_WIN; if (info != MPI_INFO_NULL) { ierr = MPI_Info_get_nkeys(info, &dummy); if (ierr != MPI_SUCCESS) return ierr; } return MPI_SUCCESS; } #undef MPI_Win_set_info #define MPI_Win_set_info PyMPI_Win_set_info #endif #ifndef PyMPI_HAVE_MPI_Win_get_info static int PyMPI_Win_get_info(MPI_Win win, MPI_Info *info) { if (win == MPI_WIN_NULL) return MPI_ERR_WIN; return MPI_Info_create(info); } #undef MPI_Win_get_info #define MPI_Win_get_info PyMPI_Win_get_info #endif /* ---------------------------------------------------------------- */ #endif /* !PyMPI_FALLBACK_H */ /* Local variables: c-basic-offset: 2 indent-tabs-mode: nil End: */ mpi4py-3.1.6/src/lib-mpi/missing.h000066400000000000000000003001471460670727200167250ustar00rootroot00000000000000#ifndef PyMPI_MISSING_H #define PyMPI_MISSING_H #ifndef PyMPI_UNUSED # if defined(__GNUC__) # if !defined(__cplusplus) || (__GNUC__>3||(__GNUC__==3&&__GNUC_MINOR__>=4)) # define PyMPI_UNUSED __attribute__ ((__unused__)) # else # define PyMPI_UNUSED # endif # elif defined(__INTEL_COMPILER) || defined(__ICC) # define PyMPI_UNUSED __attribute__ ((__unused__)) # else # define PyMPI_UNUSED # endif #endif #define PyMPI_ERR_UNAVAILABLE (-1431655766) /*0xaaaaaaaa*/ static PyMPI_UNUSED int PyMPI_UNAVAILABLE(const char *name,...) { (void)name; return PyMPI_ERR_UNAVAILABLE; } #ifndef PyMPI_HAVE_MPI_Aint #undef MPI_Aint typedef long PyMPI_MPI_Aint; #define MPI_Aint PyMPI_MPI_Aint #endif #ifndef PyMPI_HAVE_MPI_Offset #undef MPI_Offset typedef long PyMPI_MPI_Offset; #define MPI_Offset PyMPI_MPI_Offset #endif #ifndef PyMPI_HAVE_MPI_Count #undef MPI_Count typedef MPI_Offset PyMPI_MPI_Count; #define MPI_Count PyMPI_MPI_Count #endif #ifndef PyMPI_HAVE_MPI_Status #undef MPI_Status typedef struct PyMPI_MPI_Status { int MPI_SOURCE; int MPI_TAG; int MPI_ERROR; } PyMPI_MPI_Status; #define MPI_Status PyMPI_MPI_Status #endif #ifndef PyMPI_HAVE_MPI_Datatype #undef MPI_Datatype typedef void *PyMPI_MPI_Datatype; #define MPI_Datatype PyMPI_MPI_Datatype #endif #ifndef PyMPI_HAVE_MPI_Request #undef MPI_Request typedef void *PyMPI_MPI_Request; #define MPI_Request PyMPI_MPI_Request #endif #ifndef PyMPI_HAVE_MPI_Message #undef MPI_Message typedef void *PyMPI_MPI_Message; #define MPI_Message PyMPI_MPI_Message #endif #ifndef PyMPI_HAVE_MPI_Op #undef MPI_Op typedef void *PyMPI_MPI_Op; #define MPI_Op PyMPI_MPI_Op #endif #ifndef PyMPI_HAVE_MPI_Group #undef MPI_Group typedef void *PyMPI_MPI_Group; #define MPI_Group PyMPI_MPI_Group #endif #ifndef PyMPI_HAVE_MPI_Info #undef MPI_Info typedef void *PyMPI_MPI_Info; #define MPI_Info PyMPI_MPI_Info #endif #ifndef PyMPI_HAVE_MPI_Comm #undef MPI_Comm typedef void *PyMPI_MPI_Comm; #define MPI_Comm PyMPI_MPI_Comm #endif #ifndef PyMPI_HAVE_MPI_Win #undef MPI_Win typedef void *PyMPI_MPI_Win; #define MPI_Win PyMPI_MPI_Win #endif #ifndef PyMPI_HAVE_MPI_File #undef MPI_File typedef void *PyMPI_MPI_File; #define MPI_File PyMPI_MPI_File #endif #ifndef PyMPI_HAVE_MPI_Errhandler #undef MPI_Errhandler typedef void *PyMPI_MPI_Errhandler; #define MPI_Errhandler PyMPI_MPI_Errhandler #endif #ifndef PyMPI_HAVE_MPI_UNDEFINED #undef MPI_UNDEFINED #define MPI_UNDEFINED (-32766) #endif #ifndef PyMPI_HAVE_MPI_ANY_SOURCE #undef MPI_ANY_SOURCE #define MPI_ANY_SOURCE (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_ANY_TAG #undef MPI_ANY_TAG #define MPI_ANY_TAG (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_PROC_NULL #undef MPI_PROC_NULL #define MPI_PROC_NULL (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_ROOT #undef MPI_ROOT #define MPI_ROOT (MPI_PROC_NULL) #endif #ifndef PyMPI_HAVE_MPI_IDENT #undef MPI_IDENT #define MPI_IDENT (1) #endif #ifndef PyMPI_HAVE_MPI_CONGRUENT #undef MPI_CONGRUENT #define MPI_CONGRUENT (2) #endif #ifndef PyMPI_HAVE_MPI_SIMILAR #undef MPI_SIMILAR #define MPI_SIMILAR (3) #endif #ifndef PyMPI_HAVE_MPI_UNEQUAL #undef MPI_UNEQUAL #define MPI_UNEQUAL (4) #endif #ifndef PyMPI_HAVE_MPI_BOTTOM #undef MPI_BOTTOM #define MPI_BOTTOM ((void*)0) #endif #ifndef PyMPI_HAVE_MPI_IN_PLACE #undef MPI_IN_PLACE #define MPI_IN_PLACE ((void*)0) #endif #ifndef PyMPI_HAVE_MPI_KEYVAL_INVALID #undef MPI_KEYVAL_INVALID #define MPI_KEYVAL_INVALID (0) #endif #ifndef PyMPI_HAVE_MPI_MAX_OBJECT_NAME #undef MPI_MAX_OBJECT_NAME #define MPI_MAX_OBJECT_NAME (1) #endif #ifndef PyMPI_HAVE_MPI_DATATYPE_NULL #undef MPI_DATATYPE_NULL #define MPI_DATATYPE_NULL ((MPI_Datatype)0) #endif #ifndef PyMPI_HAVE_MPI_PACKED #undef MPI_PACKED #define MPI_PACKED ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_BYTE #undef MPI_BYTE #define MPI_BYTE ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_AINT #undef MPI_AINT #define MPI_AINT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_OFFSET #undef MPI_OFFSET #define MPI_OFFSET ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_COUNT #undef MPI_COUNT #define MPI_COUNT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_CHAR #undef MPI_CHAR #define MPI_CHAR ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_WCHAR #undef MPI_WCHAR #define MPI_WCHAR ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_SIGNED_CHAR #undef MPI_SIGNED_CHAR #define MPI_SIGNED_CHAR ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_SHORT #undef MPI_SHORT #define MPI_SHORT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_INT #undef MPI_INT #define MPI_INT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_LONG #undef MPI_LONG #define MPI_LONG ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_LONG_LONG #undef MPI_LONG_LONG #define MPI_LONG_LONG ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_LONG_LONG_INT #undef MPI_LONG_LONG_INT #define MPI_LONG_LONG_INT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_UNSIGNED_CHAR #undef MPI_UNSIGNED_CHAR #define MPI_UNSIGNED_CHAR ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_UNSIGNED_SHORT #undef MPI_UNSIGNED_SHORT #define MPI_UNSIGNED_SHORT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_UNSIGNED #undef MPI_UNSIGNED #define MPI_UNSIGNED ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_UNSIGNED_LONG #undef MPI_UNSIGNED_LONG #define MPI_UNSIGNED_LONG ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_UNSIGNED_LONG_LONG #undef MPI_UNSIGNED_LONG_LONG #define MPI_UNSIGNED_LONG_LONG ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_FLOAT #undef MPI_FLOAT #define MPI_FLOAT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_DOUBLE #undef MPI_DOUBLE #define MPI_DOUBLE ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_LONG_DOUBLE #undef MPI_LONG_DOUBLE #define MPI_LONG_DOUBLE ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_C_BOOL #undef MPI_C_BOOL #define MPI_C_BOOL ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_INT8_T #undef MPI_INT8_T #define MPI_INT8_T ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_INT16_T #undef MPI_INT16_T #define MPI_INT16_T ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_INT32_T #undef MPI_INT32_T #define MPI_INT32_T ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_INT64_T #undef MPI_INT64_T #define MPI_INT64_T ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_UINT8_T #undef MPI_UINT8_T #define MPI_UINT8_T ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_UINT16_T #undef MPI_UINT16_T #define MPI_UINT16_T ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_UINT32_T #undef MPI_UINT32_T #define MPI_UINT32_T ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_UINT64_T #undef MPI_UINT64_T #define MPI_UINT64_T ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_C_COMPLEX #undef MPI_C_COMPLEX #define MPI_C_COMPLEX ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_C_FLOAT_COMPLEX #undef MPI_C_FLOAT_COMPLEX #define MPI_C_FLOAT_COMPLEX ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_C_DOUBLE_COMPLEX #undef MPI_C_DOUBLE_COMPLEX #define MPI_C_DOUBLE_COMPLEX ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_C_LONG_DOUBLE_COMPLEX #undef MPI_C_LONG_DOUBLE_COMPLEX #define MPI_C_LONG_DOUBLE_COMPLEX ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_CXX_BOOL #undef MPI_CXX_BOOL #define MPI_CXX_BOOL ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_CXX_FLOAT_COMPLEX #undef MPI_CXX_FLOAT_COMPLEX #define MPI_CXX_FLOAT_COMPLEX ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_CXX_DOUBLE_COMPLEX #undef MPI_CXX_DOUBLE_COMPLEX #define MPI_CXX_DOUBLE_COMPLEX ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_CXX_LONG_DOUBLE_COMPLEX #undef MPI_CXX_LONG_DOUBLE_COMPLEX #define MPI_CXX_LONG_DOUBLE_COMPLEX ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_SHORT_INT #undef MPI_SHORT_INT #define MPI_SHORT_INT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_2INT #undef MPI_2INT #define MPI_2INT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_LONG_INT #undef MPI_LONG_INT #define MPI_LONG_INT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_FLOAT_INT #undef MPI_FLOAT_INT #define MPI_FLOAT_INT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_DOUBLE_INT #undef MPI_DOUBLE_INT #define MPI_DOUBLE_INT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_LONG_DOUBLE_INT #undef MPI_LONG_DOUBLE_INT #define MPI_LONG_DOUBLE_INT ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_CHARACTER #undef MPI_CHARACTER #define MPI_CHARACTER ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_LOGICAL #undef MPI_LOGICAL #define MPI_LOGICAL ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_INTEGER #undef MPI_INTEGER #define MPI_INTEGER ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_REAL #undef MPI_REAL #define MPI_REAL ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_DOUBLE_PRECISION #undef MPI_DOUBLE_PRECISION #define MPI_DOUBLE_PRECISION ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_COMPLEX #undef MPI_COMPLEX #define MPI_COMPLEX ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_DOUBLE_COMPLEX #undef MPI_DOUBLE_COMPLEX #define MPI_DOUBLE_COMPLEX ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_LOGICAL1 #undef MPI_LOGICAL1 #define MPI_LOGICAL1 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_LOGICAL2 #undef MPI_LOGICAL2 #define MPI_LOGICAL2 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_LOGICAL4 #undef MPI_LOGICAL4 #define MPI_LOGICAL4 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_LOGICAL8 #undef MPI_LOGICAL8 #define MPI_LOGICAL8 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_INTEGER1 #undef MPI_INTEGER1 #define MPI_INTEGER1 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_INTEGER2 #undef MPI_INTEGER2 #define MPI_INTEGER2 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_INTEGER4 #undef MPI_INTEGER4 #define MPI_INTEGER4 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_INTEGER8 #undef MPI_INTEGER8 #define MPI_INTEGER8 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_INTEGER16 #undef MPI_INTEGER16 #define MPI_INTEGER16 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_REAL2 #undef MPI_REAL2 #define MPI_REAL2 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_REAL4 #undef MPI_REAL4 #define MPI_REAL4 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_REAL8 #undef MPI_REAL8 #define MPI_REAL8 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_REAL16 #undef MPI_REAL16 #define MPI_REAL16 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_COMPLEX4 #undef MPI_COMPLEX4 #define MPI_COMPLEX4 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_COMPLEX8 #undef MPI_COMPLEX8 #define MPI_COMPLEX8 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_COMPLEX16 #undef MPI_COMPLEX16 #define MPI_COMPLEX16 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_COMPLEX32 #undef MPI_COMPLEX32 #define MPI_COMPLEX32 ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_UB #undef MPI_UB #define MPI_UB ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_LB #undef MPI_LB #define MPI_LB ((MPI_Datatype)MPI_DATATYPE_NULL) #endif #ifndef PyMPI_HAVE_MPI_Type_lb #undef MPI_Type_lb #define MPI_Type_lb(a1,a2) PyMPI_UNAVAILABLE("MPI_Type_lb",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Type_ub #undef MPI_Type_ub #define MPI_Type_ub(a1,a2) PyMPI_UNAVAILABLE("MPI_Type_ub",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Type_extent #undef MPI_Type_extent #define MPI_Type_extent(a1,a2) PyMPI_UNAVAILABLE("MPI_Type_extent",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Address #undef MPI_Address #define MPI_Address(a1,a2) PyMPI_UNAVAILABLE("MPI_Address",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Type_hvector #undef MPI_Type_hvector #define MPI_Type_hvector(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Type_hvector",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Type_hindexed #undef MPI_Type_hindexed #define MPI_Type_hindexed(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Type_hindexed",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Type_struct #undef MPI_Type_struct #define MPI_Type_struct(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Type_struct",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_HVECTOR_INTEGER #undef MPI_COMBINER_HVECTOR_INTEGER #define MPI_COMBINER_HVECTOR_INTEGER (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_HINDEXED_INTEGER #undef MPI_COMBINER_HINDEXED_INTEGER #define MPI_COMBINER_HINDEXED_INTEGER (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_STRUCT_INTEGER #undef MPI_COMBINER_STRUCT_INTEGER #define MPI_COMBINER_STRUCT_INTEGER (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_Type_dup #undef MPI_Type_dup #define MPI_Type_dup(a1,a2) PyMPI_UNAVAILABLE("MPI_Type_dup",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Type_contiguous #undef MPI_Type_contiguous #define MPI_Type_contiguous(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Type_contiguous",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Type_vector #undef MPI_Type_vector #define MPI_Type_vector(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Type_vector",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Type_indexed #undef MPI_Type_indexed #define MPI_Type_indexed(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Type_indexed",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Type_create_indexed_block #undef MPI_Type_create_indexed_block #define MPI_Type_create_indexed_block(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Type_create_indexed_block",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_ORDER_C #undef MPI_ORDER_C #define MPI_ORDER_C (0) #endif #ifndef PyMPI_HAVE_MPI_ORDER_FORTRAN #undef MPI_ORDER_FORTRAN #define MPI_ORDER_FORTRAN (1) #endif #ifndef PyMPI_HAVE_MPI_Type_create_subarray #undef MPI_Type_create_subarray #define MPI_Type_create_subarray(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Type_create_subarray",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_DISTRIBUTE_NONE #undef MPI_DISTRIBUTE_NONE #define MPI_DISTRIBUTE_NONE (0) #endif #ifndef PyMPI_HAVE_MPI_DISTRIBUTE_BLOCK #undef MPI_DISTRIBUTE_BLOCK #define MPI_DISTRIBUTE_BLOCK (1) #endif #ifndef PyMPI_HAVE_MPI_DISTRIBUTE_CYCLIC #undef MPI_DISTRIBUTE_CYCLIC #define MPI_DISTRIBUTE_CYCLIC (2) #endif #ifndef PyMPI_HAVE_MPI_DISTRIBUTE_DFLT_DARG #undef MPI_DISTRIBUTE_DFLT_DARG #define MPI_DISTRIBUTE_DFLT_DARG (4) #endif #ifndef PyMPI_HAVE_MPI_Type_create_darray #undef MPI_Type_create_darray #define MPI_Type_create_darray(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Type_create_darray",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Get_address #undef MPI_Get_address #define MPI_Get_address MPI_Address #endif #ifndef PyMPI_HAVE_MPI_Aint_add #undef MPI_Aint_add #define MPI_Aint_add(a1,a2) PyMPI_UNAVAILABLE("MPI_Aint_add",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Aint_diff #undef MPI_Aint_diff #define MPI_Aint_diff(a1,a2) PyMPI_UNAVAILABLE("MPI_Aint_diff",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Type_create_hvector #undef MPI_Type_create_hvector #define MPI_Type_create_hvector MPI_Type_hvector #endif #ifndef PyMPI_HAVE_MPI_Type_create_hindexed #undef MPI_Type_create_hindexed #define MPI_Type_create_hindexed MPI_Type_hindexed #endif #ifndef PyMPI_HAVE_MPI_Type_create_hindexed_block #undef MPI_Type_create_hindexed_block #define MPI_Type_create_hindexed_block(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Type_create_hindexed_block",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Type_create_struct #undef MPI_Type_create_struct #define MPI_Type_create_struct MPI_Type_struct #endif #ifndef PyMPI_HAVE_MPI_Type_create_resized #undef MPI_Type_create_resized #define MPI_Type_create_resized(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Type_create_resized",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Type_size #undef MPI_Type_size #define MPI_Type_size(a1,a2) PyMPI_UNAVAILABLE("MPI_Type_size",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Type_size_x #undef MPI_Type_size_x #define MPI_Type_size_x(a1,a2) PyMPI_UNAVAILABLE("MPI_Type_size_x",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Type_get_extent #undef MPI_Type_get_extent #define MPI_Type_get_extent(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Type_get_extent",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Type_get_extent_x #undef MPI_Type_get_extent_x #define MPI_Type_get_extent_x(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Type_get_extent_x",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Type_get_true_extent #undef MPI_Type_get_true_extent #define MPI_Type_get_true_extent(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Type_get_true_extent",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Type_get_true_extent_x #undef MPI_Type_get_true_extent_x #define MPI_Type_get_true_extent_x(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Type_get_true_extent_x",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Type_create_f90_integer #undef MPI_Type_create_f90_integer #define MPI_Type_create_f90_integer(a1,a2) PyMPI_UNAVAILABLE("MPI_Type_create_f90_integer",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Type_create_f90_real #undef MPI_Type_create_f90_real #define MPI_Type_create_f90_real(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Type_create_f90_real",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Type_create_f90_complex #undef MPI_Type_create_f90_complex #define MPI_Type_create_f90_complex(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Type_create_f90_complex",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_TYPECLASS_INTEGER #undef MPI_TYPECLASS_INTEGER #define MPI_TYPECLASS_INTEGER (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_TYPECLASS_REAL #undef MPI_TYPECLASS_REAL #define MPI_TYPECLASS_REAL (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_TYPECLASS_COMPLEX #undef MPI_TYPECLASS_COMPLEX #define MPI_TYPECLASS_COMPLEX (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_Type_match_size #undef MPI_Type_match_size #define MPI_Type_match_size(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Type_match_size",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Type_commit #undef MPI_Type_commit #define MPI_Type_commit(a1) PyMPI_UNAVAILABLE("MPI_Type_commit",a1) #endif #ifndef PyMPI_HAVE_MPI_Type_free #undef MPI_Type_free #define MPI_Type_free(a1) PyMPI_UNAVAILABLE("MPI_Type_free",a1) #endif #ifndef PyMPI_HAVE_MPI_Pack #undef MPI_Pack #define MPI_Pack(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Pack",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Unpack #undef MPI_Unpack #define MPI_Unpack(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Unpack",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Pack_size #undef MPI_Pack_size #define MPI_Pack_size(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Pack_size",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Pack_external #undef MPI_Pack_external #define MPI_Pack_external(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Pack_external",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Unpack_external #undef MPI_Unpack_external #define MPI_Unpack_external(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Unpack_external",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Pack_external_size #undef MPI_Pack_external_size #define MPI_Pack_external_size(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Pack_external_size",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_NAMED #undef MPI_COMBINER_NAMED #define MPI_COMBINER_NAMED (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_DUP #undef MPI_COMBINER_DUP #define MPI_COMBINER_DUP (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_CONTIGUOUS #undef MPI_COMBINER_CONTIGUOUS #define MPI_COMBINER_CONTIGUOUS (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_VECTOR #undef MPI_COMBINER_VECTOR #define MPI_COMBINER_VECTOR (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_HVECTOR #undef MPI_COMBINER_HVECTOR #define MPI_COMBINER_HVECTOR (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_INDEXED #undef MPI_COMBINER_INDEXED #define MPI_COMBINER_INDEXED (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_HINDEXED #undef MPI_COMBINER_HINDEXED #define MPI_COMBINER_HINDEXED (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_INDEXED_BLOCK #undef MPI_COMBINER_INDEXED_BLOCK #define MPI_COMBINER_INDEXED_BLOCK (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_HINDEXED_BLOCK #undef MPI_COMBINER_HINDEXED_BLOCK #define MPI_COMBINER_HINDEXED_BLOCK (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_STRUCT #undef MPI_COMBINER_STRUCT #define MPI_COMBINER_STRUCT (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_SUBARRAY #undef MPI_COMBINER_SUBARRAY #define MPI_COMBINER_SUBARRAY (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_DARRAY #undef MPI_COMBINER_DARRAY #define MPI_COMBINER_DARRAY (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_F90_REAL #undef MPI_COMBINER_F90_REAL #define MPI_COMBINER_F90_REAL (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_F90_COMPLEX #undef MPI_COMBINER_F90_COMPLEX #define MPI_COMBINER_F90_COMPLEX (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_F90_INTEGER #undef MPI_COMBINER_F90_INTEGER #define MPI_COMBINER_F90_INTEGER (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_COMBINER_RESIZED #undef MPI_COMBINER_RESIZED #define MPI_COMBINER_RESIZED (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_Type_get_envelope #undef MPI_Type_get_envelope #define MPI_Type_get_envelope(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Type_get_envelope",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Type_get_contents #undef MPI_Type_get_contents #define MPI_Type_get_contents(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Type_get_contents",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Type_get_name #undef MPI_Type_get_name #define MPI_Type_get_name(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Type_get_name",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Type_set_name #undef MPI_Type_set_name #define MPI_Type_set_name(a1,a2) PyMPI_UNAVAILABLE("MPI_Type_set_name",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Type_get_attr #undef MPI_Type_get_attr #define MPI_Type_get_attr(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Type_get_attr",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Type_set_attr #undef MPI_Type_set_attr #define MPI_Type_set_attr(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Type_set_attr",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Type_delete_attr #undef MPI_Type_delete_attr #define MPI_Type_delete_attr(a1,a2) PyMPI_UNAVAILABLE("MPI_Type_delete_attr",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Type_copy_attr_function #undef MPI_Type_copy_attr_function typedef int (MPIAPI PyMPI_MPI_Type_copy_attr_function)(MPI_Datatype,int,void*,void*,void*,int*); #define MPI_Type_copy_attr_function PyMPI_MPI_Type_copy_attr_function #endif #ifndef PyMPI_HAVE_MPI_Type_delete_attr_function #undef MPI_Type_delete_attr_function typedef int (MPIAPI PyMPI_MPI_Type_delete_attr_function)(MPI_Datatype,int,void*,void*); #define MPI_Type_delete_attr_function PyMPI_MPI_Type_delete_attr_function #endif #ifndef PyMPI_HAVE_MPI_TYPE_NULL_COPY_FN #undef MPI_TYPE_NULL_COPY_FN #define MPI_TYPE_NULL_COPY_FN ((MPI_Type_copy_attr_function*)0) #endif #ifndef PyMPI_HAVE_MPI_TYPE_DUP_FN #undef MPI_TYPE_DUP_FN #define MPI_TYPE_DUP_FN ((MPI_Type_copy_attr_function*)0) #endif #ifndef PyMPI_HAVE_MPI_TYPE_NULL_DELETE_FN #undef MPI_TYPE_NULL_DELETE_FN #define MPI_TYPE_NULL_DELETE_FN ((MPI_Type_delete_attr_function*)0) #endif #ifndef PyMPI_HAVE_MPI_Type_create_keyval #undef MPI_Type_create_keyval #define MPI_Type_create_keyval(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Type_create_keyval",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Type_free_keyval #undef MPI_Type_free_keyval #define MPI_Type_free_keyval(a1) PyMPI_UNAVAILABLE("MPI_Type_free_keyval",a1) #endif #ifndef PyMPI_HAVE_MPI_STATUS_IGNORE #undef MPI_STATUS_IGNORE #define MPI_STATUS_IGNORE ((MPI_Status*)0) #endif #ifndef PyMPI_HAVE_MPI_STATUSES_IGNORE #undef MPI_STATUSES_IGNORE #define MPI_STATUSES_IGNORE ((MPI_Status*)0) #endif #ifndef PyMPI_HAVE_MPI_Get_count #undef MPI_Get_count #define MPI_Get_count(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Get_count",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Get_elements #undef MPI_Get_elements #define MPI_Get_elements(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Get_elements",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Get_elements_x #undef MPI_Get_elements_x #define MPI_Get_elements_x(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Get_elements_x",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Status_set_elements #undef MPI_Status_set_elements #define MPI_Status_set_elements(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Status_set_elements",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Status_set_elements_x #undef MPI_Status_set_elements_x #define MPI_Status_set_elements_x(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Status_set_elements_x",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Test_cancelled #undef MPI_Test_cancelled #define MPI_Test_cancelled(a1,a2) PyMPI_UNAVAILABLE("MPI_Test_cancelled",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Status_set_cancelled #undef MPI_Status_set_cancelled #define MPI_Status_set_cancelled(a1,a2) PyMPI_UNAVAILABLE("MPI_Status_set_cancelled",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_REQUEST_NULL #undef MPI_REQUEST_NULL #define MPI_REQUEST_NULL ((MPI_Request)0) #endif #ifndef PyMPI_HAVE_MPI_Request_free #undef MPI_Request_free #define MPI_Request_free(a1) PyMPI_UNAVAILABLE("MPI_Request_free",a1) #endif #ifndef PyMPI_HAVE_MPI_Wait #undef MPI_Wait #define MPI_Wait(a1,a2) PyMPI_UNAVAILABLE("MPI_Wait",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Test #undef MPI_Test #define MPI_Test(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Test",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Request_get_status #undef MPI_Request_get_status #define MPI_Request_get_status(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Request_get_status",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Cancel #undef MPI_Cancel #define MPI_Cancel(a1) PyMPI_UNAVAILABLE("MPI_Cancel",a1) #endif #ifndef PyMPI_HAVE_MPI_Waitany #undef MPI_Waitany #define MPI_Waitany(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Waitany",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Testany #undef MPI_Testany #define MPI_Testany(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Testany",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Waitall #undef MPI_Waitall #define MPI_Waitall(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Waitall",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Testall #undef MPI_Testall #define MPI_Testall(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Testall",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Waitsome #undef MPI_Waitsome #define MPI_Waitsome(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Waitsome",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Testsome #undef MPI_Testsome #define MPI_Testsome(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Testsome",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Start #undef MPI_Start #define MPI_Start(a1) PyMPI_UNAVAILABLE("MPI_Start",a1) #endif #ifndef PyMPI_HAVE_MPI_Startall #undef MPI_Startall #define MPI_Startall(a1,a2) PyMPI_UNAVAILABLE("MPI_Startall",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Grequest_cancel_function #undef MPI_Grequest_cancel_function typedef int (MPIAPI PyMPI_MPI_Grequest_cancel_function)(void*,int); #define MPI_Grequest_cancel_function PyMPI_MPI_Grequest_cancel_function #endif #ifndef PyMPI_HAVE_MPI_Grequest_free_function #undef MPI_Grequest_free_function typedef int (MPIAPI PyMPI_MPI_Grequest_free_function)(void*); #define MPI_Grequest_free_function PyMPI_MPI_Grequest_free_function #endif #ifndef PyMPI_HAVE_MPI_Grequest_query_function #undef MPI_Grequest_query_function typedef int (MPIAPI PyMPI_MPI_Grequest_query_function)(void*,MPI_Status*); #define MPI_Grequest_query_function PyMPI_MPI_Grequest_query_function #endif #ifndef PyMPI_HAVE_MPI_Grequest_start #undef MPI_Grequest_start #define MPI_Grequest_start(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Grequest_start",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Grequest_complete #undef MPI_Grequest_complete #define MPI_Grequest_complete(a1) PyMPI_UNAVAILABLE("MPI_Grequest_complete",a1) #endif #ifndef PyMPI_HAVE_MPI_OP_NULL #undef MPI_OP_NULL #define MPI_OP_NULL ((MPI_Op)0) #endif #ifndef PyMPI_HAVE_MPI_MAX #undef MPI_MAX #define MPI_MAX ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_MIN #undef MPI_MIN #define MPI_MIN ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_SUM #undef MPI_SUM #define MPI_SUM ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_PROD #undef MPI_PROD #define MPI_PROD ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_LAND #undef MPI_LAND #define MPI_LAND ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_BAND #undef MPI_BAND #define MPI_BAND ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_LOR #undef MPI_LOR #define MPI_LOR ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_BOR #undef MPI_BOR #define MPI_BOR ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_LXOR #undef MPI_LXOR #define MPI_LXOR ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_BXOR #undef MPI_BXOR #define MPI_BXOR ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_MAXLOC #undef MPI_MAXLOC #define MPI_MAXLOC ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_MINLOC #undef MPI_MINLOC #define MPI_MINLOC ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_REPLACE #undef MPI_REPLACE #define MPI_REPLACE ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_NO_OP #undef MPI_NO_OP #define MPI_NO_OP ((MPI_Op)MPI_OP_NULL) #endif #ifndef PyMPI_HAVE_MPI_Op_free #undef MPI_Op_free #define MPI_Op_free(a1) PyMPI_UNAVAILABLE("MPI_Op_free",a1) #endif #ifndef PyMPI_HAVE_MPI_User_function #undef MPI_User_function typedef void (MPIAPI PyMPI_MPI_User_function)(void*,void*,int*,MPI_Datatype*); #define MPI_User_function PyMPI_MPI_User_function #endif #ifndef PyMPI_HAVE_MPI_Op_create #undef MPI_Op_create #define MPI_Op_create(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Op_create",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Op_commutative #undef MPI_Op_commutative #define MPI_Op_commutative(a1,a2) PyMPI_UNAVAILABLE("MPI_Op_commutative",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_INFO_NULL #undef MPI_INFO_NULL #define MPI_INFO_NULL ((MPI_Info)0) #endif #ifndef PyMPI_HAVE_MPI_INFO_ENV #undef MPI_INFO_ENV #define MPI_INFO_ENV ((MPI_Info)MPI_INFO_NULL) #endif #ifndef PyMPI_HAVE_MPI_Info_free #undef MPI_Info_free #define MPI_Info_free(a1) PyMPI_UNAVAILABLE("MPI_Info_free",a1) #endif #ifndef PyMPI_HAVE_MPI_Info_create #undef MPI_Info_create #define MPI_Info_create(a1) PyMPI_UNAVAILABLE("MPI_Info_create",a1) #endif #ifndef PyMPI_HAVE_MPI_Info_dup #undef MPI_Info_dup #define MPI_Info_dup(a1,a2) PyMPI_UNAVAILABLE("MPI_Info_dup",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_MAX_INFO_KEY #undef MPI_MAX_INFO_KEY #define MPI_MAX_INFO_KEY (1) #endif #ifndef PyMPI_HAVE_MPI_MAX_INFO_VAL #undef MPI_MAX_INFO_VAL #define MPI_MAX_INFO_VAL (1) #endif #ifndef PyMPI_HAVE_MPI_Info_get #undef MPI_Info_get #define MPI_Info_get(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Info_get",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Info_set #undef MPI_Info_set #define MPI_Info_set(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Info_set",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Info_delete #undef MPI_Info_delete #define MPI_Info_delete(a1,a2) PyMPI_UNAVAILABLE("MPI_Info_delete",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Info_get_nkeys #undef MPI_Info_get_nkeys #define MPI_Info_get_nkeys(a1,a2) PyMPI_UNAVAILABLE("MPI_Info_get_nkeys",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Info_get_nthkey #undef MPI_Info_get_nthkey #define MPI_Info_get_nthkey(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Info_get_nthkey",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Info_get_valuelen #undef MPI_Info_get_valuelen #define MPI_Info_get_valuelen(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Info_get_valuelen",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_GROUP_NULL #undef MPI_GROUP_NULL #define MPI_GROUP_NULL ((MPI_Group)0) #endif #ifndef PyMPI_HAVE_MPI_GROUP_EMPTY #undef MPI_GROUP_EMPTY #define MPI_GROUP_EMPTY ((MPI_Group)1) #endif #ifndef PyMPI_HAVE_MPI_Group_free #undef MPI_Group_free #define MPI_Group_free(a1) PyMPI_UNAVAILABLE("MPI_Group_free",a1) #endif #ifndef PyMPI_HAVE_MPI_Group_size #undef MPI_Group_size #define MPI_Group_size(a1,a2) PyMPI_UNAVAILABLE("MPI_Group_size",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Group_rank #undef MPI_Group_rank #define MPI_Group_rank(a1,a2) PyMPI_UNAVAILABLE("MPI_Group_rank",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Group_translate_ranks #undef MPI_Group_translate_ranks #define MPI_Group_translate_ranks(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Group_translate_ranks",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Group_compare #undef MPI_Group_compare #define MPI_Group_compare(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Group_compare",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Group_union #undef MPI_Group_union #define MPI_Group_union(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Group_union",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Group_intersection #undef MPI_Group_intersection #define MPI_Group_intersection(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Group_intersection",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Group_difference #undef MPI_Group_difference #define MPI_Group_difference(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Group_difference",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Group_incl #undef MPI_Group_incl #define MPI_Group_incl(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Group_incl",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Group_excl #undef MPI_Group_excl #define MPI_Group_excl(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Group_excl",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Group_range_incl #undef MPI_Group_range_incl #define MPI_Group_range_incl(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Group_range_incl",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Group_range_excl #undef MPI_Group_range_excl #define MPI_Group_range_excl(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Group_range_excl",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_COMM_NULL #undef MPI_COMM_NULL #define MPI_COMM_NULL ((MPI_Comm)0) #endif #ifndef PyMPI_HAVE_MPI_COMM_SELF #undef MPI_COMM_SELF #define MPI_COMM_SELF ((MPI_Comm)MPI_COMM_NULL) #endif #ifndef PyMPI_HAVE_MPI_COMM_WORLD #undef MPI_COMM_WORLD #define MPI_COMM_WORLD ((MPI_Comm)MPI_COMM_NULL) #endif #ifndef PyMPI_HAVE_MPI_Comm_free #undef MPI_Comm_free #define MPI_Comm_free(a1) PyMPI_UNAVAILABLE("MPI_Comm_free",a1) #endif #ifndef PyMPI_HAVE_MPI_Comm_group #undef MPI_Comm_group #define MPI_Comm_group(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_group",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Comm_size #undef MPI_Comm_size #define MPI_Comm_size(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_size",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Comm_rank #undef MPI_Comm_rank #define MPI_Comm_rank(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_rank",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Comm_compare #undef MPI_Comm_compare #define MPI_Comm_compare(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Comm_compare",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Topo_test #undef MPI_Topo_test #define MPI_Topo_test(a1,a2) PyMPI_UNAVAILABLE("MPI_Topo_test",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Comm_test_inter #undef MPI_Comm_test_inter #define MPI_Comm_test_inter(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_test_inter",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Abort #undef MPI_Abort #define MPI_Abort(a1,a2) PyMPI_UNAVAILABLE("MPI_Abort",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Send #undef MPI_Send #define MPI_Send(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Send",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Recv #undef MPI_Recv #define MPI_Recv(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Recv",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Sendrecv #undef MPI_Sendrecv #define MPI_Sendrecv(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) PyMPI_UNAVAILABLE("MPI_Sendrecv",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) #endif #ifndef PyMPI_HAVE_MPI_Sendrecv_replace #undef MPI_Sendrecv_replace #define MPI_Sendrecv_replace(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Sendrecv_replace",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_BSEND_OVERHEAD #undef MPI_BSEND_OVERHEAD #define MPI_BSEND_OVERHEAD (0) #endif #ifndef PyMPI_HAVE_MPI_Buffer_attach #undef MPI_Buffer_attach #define MPI_Buffer_attach(a1,a2) PyMPI_UNAVAILABLE("MPI_Buffer_attach",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Buffer_detach #undef MPI_Buffer_detach #define MPI_Buffer_detach(a1,a2) PyMPI_UNAVAILABLE("MPI_Buffer_detach",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Bsend #undef MPI_Bsend #define MPI_Bsend(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Bsend",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Ssend #undef MPI_Ssend #define MPI_Ssend(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Ssend",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Rsend #undef MPI_Rsend #define MPI_Rsend(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Rsend",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Isend #undef MPI_Isend #define MPI_Isend(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Isend",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Ibsend #undef MPI_Ibsend #define MPI_Ibsend(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Ibsend",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Issend #undef MPI_Issend #define MPI_Issend(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Issend",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Irsend #undef MPI_Irsend #define MPI_Irsend(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Irsend",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Irecv #undef MPI_Irecv #define MPI_Irecv(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Irecv",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Send_init #undef MPI_Send_init #define MPI_Send_init(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Send_init",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Bsend_init #undef MPI_Bsend_init #define MPI_Bsend_init(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Bsend_init",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Ssend_init #undef MPI_Ssend_init #define MPI_Ssend_init(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Ssend_init",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Rsend_init #undef MPI_Rsend_init #define MPI_Rsend_init(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Rsend_init",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Recv_init #undef MPI_Recv_init #define MPI_Recv_init(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Recv_init",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Probe #undef MPI_Probe #define MPI_Probe(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Probe",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Iprobe #undef MPI_Iprobe #define MPI_Iprobe(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Iprobe",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_MESSAGE_NULL #undef MPI_MESSAGE_NULL #define MPI_MESSAGE_NULL ((MPI_Message)0) #endif #ifndef PyMPI_HAVE_MPI_MESSAGE_NO_PROC #undef MPI_MESSAGE_NO_PROC #define MPI_MESSAGE_NO_PROC ((MPI_Message)MPI_MESSAGE_NULL) #endif #ifndef PyMPI_HAVE_MPI_Mprobe #undef MPI_Mprobe #define MPI_Mprobe(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Mprobe",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Improbe #undef MPI_Improbe #define MPI_Improbe(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Improbe",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Mrecv #undef MPI_Mrecv #define MPI_Mrecv(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Mrecv",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Imrecv #undef MPI_Imrecv #define MPI_Imrecv(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Imrecv",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Barrier #undef MPI_Barrier #define MPI_Barrier(a1) PyMPI_UNAVAILABLE("MPI_Barrier",a1) #endif #ifndef PyMPI_HAVE_MPI_Bcast #undef MPI_Bcast #define MPI_Bcast(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Bcast",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Gather #undef MPI_Gather #define MPI_Gather(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Gather",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Gatherv #undef MPI_Gatherv #define MPI_Gatherv(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Gatherv",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Scatter #undef MPI_Scatter #define MPI_Scatter(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Scatter",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Scatterv #undef MPI_Scatterv #define MPI_Scatterv(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Scatterv",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Allgather #undef MPI_Allgather #define MPI_Allgather(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Allgather",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Allgatherv #undef MPI_Allgatherv #define MPI_Allgatherv(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Allgatherv",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Alltoall #undef MPI_Alltoall #define MPI_Alltoall(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Alltoall",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Alltoallv #undef MPI_Alltoallv #define MPI_Alltoallv(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Alltoallv",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Alltoallw #undef MPI_Alltoallw #define MPI_Alltoallw(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Alltoallw",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Reduce #undef MPI_Reduce #define MPI_Reduce(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Reduce",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Allreduce #undef MPI_Allreduce #define MPI_Allreduce(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Allreduce",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Reduce_local #undef MPI_Reduce_local #define MPI_Reduce_local(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Reduce_local",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Reduce_scatter_block #undef MPI_Reduce_scatter_block #define MPI_Reduce_scatter_block(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Reduce_scatter_block",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Reduce_scatter #undef MPI_Reduce_scatter #define MPI_Reduce_scatter(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Reduce_scatter",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Scan #undef MPI_Scan #define MPI_Scan(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Scan",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Exscan #undef MPI_Exscan #define MPI_Exscan(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Exscan",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_allgather #undef MPI_Neighbor_allgather #define MPI_Neighbor_allgather(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Neighbor_allgather",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_allgatherv #undef MPI_Neighbor_allgatherv #define MPI_Neighbor_allgatherv(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Neighbor_allgatherv",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_alltoall #undef MPI_Neighbor_alltoall #define MPI_Neighbor_alltoall(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Neighbor_alltoall",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_alltoallv #undef MPI_Neighbor_alltoallv #define MPI_Neighbor_alltoallv(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Neighbor_alltoallv",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Neighbor_alltoallw #undef MPI_Neighbor_alltoallw #define MPI_Neighbor_alltoallw(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Neighbor_alltoallw",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Ibarrier #undef MPI_Ibarrier #define MPI_Ibarrier(a1,a2) PyMPI_UNAVAILABLE("MPI_Ibarrier",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Ibcast #undef MPI_Ibcast #define MPI_Ibcast(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Ibcast",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Igather #undef MPI_Igather #define MPI_Igather(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Igather",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Igatherv #undef MPI_Igatherv #define MPI_Igatherv(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Igatherv",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Iscatter #undef MPI_Iscatter #define MPI_Iscatter(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Iscatter",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Iscatterv #undef MPI_Iscatterv #define MPI_Iscatterv(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Iscatterv",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Iallgather #undef MPI_Iallgather #define MPI_Iallgather(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Iallgather",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Iallgatherv #undef MPI_Iallgatherv #define MPI_Iallgatherv(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Iallgatherv",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Ialltoall #undef MPI_Ialltoall #define MPI_Ialltoall(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Ialltoall",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Ialltoallv #undef MPI_Ialltoallv #define MPI_Ialltoallv(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Ialltoallv",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Ialltoallw #undef MPI_Ialltoallw #define MPI_Ialltoallw(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Ialltoallw",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Ireduce #undef MPI_Ireduce #define MPI_Ireduce(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Ireduce",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Iallreduce #undef MPI_Iallreduce #define MPI_Iallreduce(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Iallreduce",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Ireduce_scatter_block #undef MPI_Ireduce_scatter_block #define MPI_Ireduce_scatter_block(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Ireduce_scatter_block",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Ireduce_scatter #undef MPI_Ireduce_scatter #define MPI_Ireduce_scatter(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Ireduce_scatter",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Iscan #undef MPI_Iscan #define MPI_Iscan(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Iscan",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Iexscan #undef MPI_Iexscan #define MPI_Iexscan(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Iexscan",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Ineighbor_allgather #undef MPI_Ineighbor_allgather #define MPI_Ineighbor_allgather(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Ineighbor_allgather",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Ineighbor_allgatherv #undef MPI_Ineighbor_allgatherv #define MPI_Ineighbor_allgatherv(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Ineighbor_allgatherv",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Ineighbor_alltoall #undef MPI_Ineighbor_alltoall #define MPI_Ineighbor_alltoall(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Ineighbor_alltoall",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Ineighbor_alltoallv #undef MPI_Ineighbor_alltoallv #define MPI_Ineighbor_alltoallv(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Ineighbor_alltoallv",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Ineighbor_alltoallw #undef MPI_Ineighbor_alltoallw #define MPI_Ineighbor_alltoallw(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Ineighbor_alltoallw",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Comm_dup #undef MPI_Comm_dup #define MPI_Comm_dup(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_dup",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Comm_dup_with_info #undef MPI_Comm_dup_with_info #define MPI_Comm_dup_with_info(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Comm_dup_with_info",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Comm_idup #undef MPI_Comm_idup #define MPI_Comm_idup(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Comm_idup",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Comm_create #undef MPI_Comm_create #define MPI_Comm_create(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Comm_create",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Comm_create_group #undef MPI_Comm_create_group #define MPI_Comm_create_group(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Comm_create_group",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Comm_split #undef MPI_Comm_split #define MPI_Comm_split(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Comm_split",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_COMM_TYPE_SHARED #undef MPI_COMM_TYPE_SHARED #define MPI_COMM_TYPE_SHARED (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_Comm_split_type #undef MPI_Comm_split_type #define MPI_Comm_split_type(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Comm_split_type",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Comm_set_info #undef MPI_Comm_set_info #define MPI_Comm_set_info(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_set_info",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Comm_get_info #undef MPI_Comm_get_info #define MPI_Comm_get_info(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_get_info",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_CART #undef MPI_CART #define MPI_CART (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_Cart_create #undef MPI_Cart_create #define MPI_Cart_create(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Cart_create",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Cartdim_get #undef MPI_Cartdim_get #define MPI_Cartdim_get(a1,a2) PyMPI_UNAVAILABLE("MPI_Cartdim_get",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Cart_get #undef MPI_Cart_get #define MPI_Cart_get(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Cart_get",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Cart_rank #undef MPI_Cart_rank #define MPI_Cart_rank(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Cart_rank",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Cart_coords #undef MPI_Cart_coords #define MPI_Cart_coords(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Cart_coords",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Cart_shift #undef MPI_Cart_shift #define MPI_Cart_shift(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Cart_shift",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Cart_sub #undef MPI_Cart_sub #define MPI_Cart_sub(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Cart_sub",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Cart_map #undef MPI_Cart_map #define MPI_Cart_map(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Cart_map",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Dims_create #undef MPI_Dims_create #define MPI_Dims_create(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Dims_create",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_GRAPH #undef MPI_GRAPH #define MPI_GRAPH (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_Graph_create #undef MPI_Graph_create #define MPI_Graph_create(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Graph_create",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Graphdims_get #undef MPI_Graphdims_get #define MPI_Graphdims_get(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Graphdims_get",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Graph_get #undef MPI_Graph_get #define MPI_Graph_get(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Graph_get",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Graph_map #undef MPI_Graph_map #define MPI_Graph_map(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Graph_map",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Graph_neighbors_count #undef MPI_Graph_neighbors_count #define MPI_Graph_neighbors_count(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Graph_neighbors_count",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Graph_neighbors #undef MPI_Graph_neighbors #define MPI_Graph_neighbors(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Graph_neighbors",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_DIST_GRAPH #undef MPI_DIST_GRAPH #define MPI_DIST_GRAPH (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_UNWEIGHTED #undef MPI_UNWEIGHTED #define MPI_UNWEIGHTED ((int*)0) #endif #ifndef PyMPI_HAVE_MPI_WEIGHTS_EMPTY #undef MPI_WEIGHTS_EMPTY #define MPI_WEIGHTS_EMPTY ((int*)MPI_UNWEIGHTED) #endif #ifndef PyMPI_HAVE_MPI_Dist_graph_create_adjacent #undef MPI_Dist_graph_create_adjacent #define MPI_Dist_graph_create_adjacent(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Dist_graph_create_adjacent",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Dist_graph_create #undef MPI_Dist_graph_create #define MPI_Dist_graph_create(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Dist_graph_create",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Dist_graph_neighbors_count #undef MPI_Dist_graph_neighbors_count #define MPI_Dist_graph_neighbors_count(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Dist_graph_neighbors_count",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Dist_graph_neighbors #undef MPI_Dist_graph_neighbors #define MPI_Dist_graph_neighbors(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Dist_graph_neighbors",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Intercomm_create #undef MPI_Intercomm_create #define MPI_Intercomm_create(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Intercomm_create",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Comm_remote_group #undef MPI_Comm_remote_group #define MPI_Comm_remote_group(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_remote_group",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Comm_remote_size #undef MPI_Comm_remote_size #define MPI_Comm_remote_size(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_remote_size",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Intercomm_merge #undef MPI_Intercomm_merge #define MPI_Intercomm_merge(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Intercomm_merge",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_MAX_PORT_NAME #undef MPI_MAX_PORT_NAME #define MPI_MAX_PORT_NAME (1) #endif #ifndef PyMPI_HAVE_MPI_Open_port #undef MPI_Open_port #define MPI_Open_port(a1,a2) PyMPI_UNAVAILABLE("MPI_Open_port",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Close_port #undef MPI_Close_port #define MPI_Close_port(a1) PyMPI_UNAVAILABLE("MPI_Close_port",a1) #endif #ifndef PyMPI_HAVE_MPI_Publish_name #undef MPI_Publish_name #define MPI_Publish_name(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Publish_name",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Unpublish_name #undef MPI_Unpublish_name #define MPI_Unpublish_name(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Unpublish_name",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Lookup_name #undef MPI_Lookup_name #define MPI_Lookup_name(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Lookup_name",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Comm_accept #undef MPI_Comm_accept #define MPI_Comm_accept(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Comm_accept",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Comm_connect #undef MPI_Comm_connect #define MPI_Comm_connect(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Comm_connect",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Comm_join #undef MPI_Comm_join #define MPI_Comm_join(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_join",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Comm_disconnect #undef MPI_Comm_disconnect #define MPI_Comm_disconnect(a1) PyMPI_UNAVAILABLE("MPI_Comm_disconnect",a1) #endif #ifndef PyMPI_HAVE_MPI_ARGV_NULL #undef MPI_ARGV_NULL #define MPI_ARGV_NULL ((char**)0) #endif #ifndef PyMPI_HAVE_MPI_ARGVS_NULL #undef MPI_ARGVS_NULL #define MPI_ARGVS_NULL ((char***)0) #endif #ifndef PyMPI_HAVE_MPI_ERRCODES_IGNORE #undef MPI_ERRCODES_IGNORE #define MPI_ERRCODES_IGNORE ((int*)0) #endif #ifndef PyMPI_HAVE_MPI_Comm_spawn #undef MPI_Comm_spawn #define MPI_Comm_spawn(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Comm_spawn",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Comm_spawn_multiple #undef MPI_Comm_spawn_multiple #define MPI_Comm_spawn_multiple(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Comm_spawn_multiple",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Comm_get_parent #undef MPI_Comm_get_parent #define MPI_Comm_get_parent(a1) PyMPI_UNAVAILABLE("MPI_Comm_get_parent",a1) #endif #ifndef PyMPI_HAVE_MPI_Errhandler_get #undef MPI_Errhandler_get #define MPI_Errhandler_get(a1,a2) PyMPI_UNAVAILABLE("MPI_Errhandler_get",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Errhandler_set #undef MPI_Errhandler_set #define MPI_Errhandler_set(a1,a2) PyMPI_UNAVAILABLE("MPI_Errhandler_set",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Handler_function #undef MPI_Handler_function typedef void (MPIAPI PyMPI_MPI_Handler_function)(MPI_Comm*,int*,...); #define MPI_Handler_function PyMPI_MPI_Handler_function #endif #ifndef PyMPI_HAVE_MPI_Errhandler_create #undef MPI_Errhandler_create #define MPI_Errhandler_create(a1,a2) PyMPI_UNAVAILABLE("MPI_Errhandler_create",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Attr_get #undef MPI_Attr_get #define MPI_Attr_get(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Attr_get",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Attr_put #undef MPI_Attr_put #define MPI_Attr_put(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Attr_put",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Attr_delete #undef MPI_Attr_delete #define MPI_Attr_delete(a1,a2) PyMPI_UNAVAILABLE("MPI_Attr_delete",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Copy_function #undef MPI_Copy_function typedef int (MPIAPI PyMPI_MPI_Copy_function)(MPI_Comm,int,void*,void*,void*,int*); #define MPI_Copy_function PyMPI_MPI_Copy_function #endif #ifndef PyMPI_HAVE_MPI_Delete_function #undef MPI_Delete_function typedef int (MPIAPI PyMPI_MPI_Delete_function)(MPI_Comm,int,void*,void*); #define MPI_Delete_function PyMPI_MPI_Delete_function #endif #ifndef PyMPI_HAVE_MPI_DUP_FN #undef MPI_DUP_FN #define MPI_DUP_FN ((MPI_Copy_function*)0) #endif #ifndef PyMPI_HAVE_MPI_NULL_COPY_FN #undef MPI_NULL_COPY_FN #define MPI_NULL_COPY_FN ((MPI_Copy_function*)0) #endif #ifndef PyMPI_HAVE_MPI_NULL_DELETE_FN #undef MPI_NULL_DELETE_FN #define MPI_NULL_DELETE_FN ((MPI_Delete_function*)0) #endif #ifndef PyMPI_HAVE_MPI_Keyval_create #undef MPI_Keyval_create #define MPI_Keyval_create(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Keyval_create",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Keyval_free #undef MPI_Keyval_free #define MPI_Keyval_free(a1) PyMPI_UNAVAILABLE("MPI_Keyval_free",a1) #endif #ifndef PyMPI_HAVE_MPI_Comm_get_errhandler #undef MPI_Comm_get_errhandler #define MPI_Comm_get_errhandler MPI_Errhandler_get #endif #ifndef PyMPI_HAVE_MPI_Comm_set_errhandler #undef MPI_Comm_set_errhandler #define MPI_Comm_set_errhandler MPI_Errhandler_set #endif #ifndef PyMPI_HAVE_MPI_Comm_errhandler_fn #undef MPI_Comm_errhandler_fn #define MPI_Comm_errhandler_fn MPI_Handler_function #endif #ifndef PyMPI_HAVE_MPI_Comm_errhandler_function #undef MPI_Comm_errhandler_function #define MPI_Comm_errhandler_function MPI_Comm_errhandler_fn #endif #ifndef PyMPI_HAVE_MPI_Comm_create_errhandler #undef MPI_Comm_create_errhandler #define MPI_Comm_create_errhandler MPI_Errhandler_create #endif #ifndef PyMPI_HAVE_MPI_Comm_call_errhandler #undef MPI_Comm_call_errhandler #define MPI_Comm_call_errhandler(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_call_errhandler",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Comm_get_name #undef MPI_Comm_get_name #define MPI_Comm_get_name(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Comm_get_name",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Comm_set_name #undef MPI_Comm_set_name #define MPI_Comm_set_name(a1,a2) PyMPI_UNAVAILABLE("MPI_Comm_set_name",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_TAG_UB #undef MPI_TAG_UB #define MPI_TAG_UB (MPI_KEYVAL_INVALID) #endif #ifndef PyMPI_HAVE_MPI_HOST #undef MPI_HOST #define MPI_HOST (MPI_KEYVAL_INVALID) #endif #ifndef PyMPI_HAVE_MPI_IO #undef MPI_IO #define MPI_IO (MPI_KEYVAL_INVALID) #endif #ifndef PyMPI_HAVE_MPI_WTIME_IS_GLOBAL #undef MPI_WTIME_IS_GLOBAL #define MPI_WTIME_IS_GLOBAL (MPI_KEYVAL_INVALID) #endif #ifndef PyMPI_HAVE_MPI_UNIVERSE_SIZE #undef MPI_UNIVERSE_SIZE #define MPI_UNIVERSE_SIZE (MPI_KEYVAL_INVALID) #endif #ifndef PyMPI_HAVE_MPI_APPNUM #undef MPI_APPNUM #define MPI_APPNUM (MPI_KEYVAL_INVALID) #endif #ifndef PyMPI_HAVE_MPI_LASTUSEDCODE #undef MPI_LASTUSEDCODE #define MPI_LASTUSEDCODE (MPI_KEYVAL_INVALID) #endif #ifndef PyMPI_HAVE_MPI_Comm_get_attr #undef MPI_Comm_get_attr #define MPI_Comm_get_attr MPI_Attr_get #endif #ifndef PyMPI_HAVE_MPI_Comm_set_attr #undef MPI_Comm_set_attr #define MPI_Comm_set_attr MPI_Attr_put #endif #ifndef PyMPI_HAVE_MPI_Comm_delete_attr #undef MPI_Comm_delete_attr #define MPI_Comm_delete_attr MPI_Attr_delete #endif #ifndef PyMPI_HAVE_MPI_Comm_copy_attr_function #undef MPI_Comm_copy_attr_function #define MPI_Comm_copy_attr_function MPI_Copy_function #endif #ifndef PyMPI_HAVE_MPI_Comm_delete_attr_function #undef MPI_Comm_delete_attr_function #define MPI_Comm_delete_attr_function MPI_Delete_function #endif #ifndef PyMPI_HAVE_MPI_COMM_DUP_FN #undef MPI_COMM_DUP_FN #define MPI_COMM_DUP_FN ((MPI_Comm_copy_attr_function*)MPI_DUP_FN) #endif #ifndef PyMPI_HAVE_MPI_COMM_NULL_COPY_FN #undef MPI_COMM_NULL_COPY_FN #define MPI_COMM_NULL_COPY_FN ((MPI_Comm_copy_attr_function*)MPI_NULL_COPY_FN) #endif #ifndef PyMPI_HAVE_MPI_COMM_NULL_DELETE_FN #undef MPI_COMM_NULL_DELETE_FN #define MPI_COMM_NULL_DELETE_FN ((MPI_Comm_delete_attr_function*)MPI_NULL_DELETE_FN) #endif #ifndef PyMPI_HAVE_MPI_Comm_create_keyval #undef MPI_Comm_create_keyval #define MPI_Comm_create_keyval MPI_Keyval_create #endif #ifndef PyMPI_HAVE_MPI_Comm_free_keyval #undef MPI_Comm_free_keyval #define MPI_Comm_free_keyval MPI_Keyval_free #endif #ifndef PyMPI_HAVE_MPI_WIN_NULL #undef MPI_WIN_NULL #define MPI_WIN_NULL ((MPI_Win)0) #endif #ifndef PyMPI_HAVE_MPI_Win_free #undef MPI_Win_free #define MPI_Win_free(a1) PyMPI_UNAVAILABLE("MPI_Win_free",a1) #endif #ifndef PyMPI_HAVE_MPI_Win_create #undef MPI_Win_create #define MPI_Win_create(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Win_create",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Win_allocate #undef MPI_Win_allocate #define MPI_Win_allocate(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Win_allocate",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Win_allocate_shared #undef MPI_Win_allocate_shared #define MPI_Win_allocate_shared(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_Win_allocate_shared",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_Win_shared_query #undef MPI_Win_shared_query #define MPI_Win_shared_query(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Win_shared_query",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_Win_create_dynamic #undef MPI_Win_create_dynamic #define MPI_Win_create_dynamic(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Win_create_dynamic",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Win_attach #undef MPI_Win_attach #define MPI_Win_attach(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Win_attach",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Win_detach #undef MPI_Win_detach #define MPI_Win_detach(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_detach",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_set_info #undef MPI_Win_set_info #define MPI_Win_set_info(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_set_info",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_get_info #undef MPI_Win_get_info #define MPI_Win_get_info(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_get_info",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_get_group #undef MPI_Win_get_group #define MPI_Win_get_group(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_get_group",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Get #undef MPI_Get #define MPI_Get(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Get",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Put #undef MPI_Put #define MPI_Put(a1,a2,a3,a4,a5,a6,a7,a8) PyMPI_UNAVAILABLE("MPI_Put",a1,a2,a3,a4,a5,a6,a7,a8) #endif #ifndef PyMPI_HAVE_MPI_Accumulate #undef MPI_Accumulate #define MPI_Accumulate(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Accumulate",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Get_accumulate #undef MPI_Get_accumulate #define MPI_Get_accumulate(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) PyMPI_UNAVAILABLE("MPI_Get_accumulate",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) #endif #ifndef PyMPI_HAVE_MPI_Fetch_and_op #undef MPI_Fetch_and_op #define MPI_Fetch_and_op(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Fetch_and_op",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Compare_and_swap #undef MPI_Compare_and_swap #define MPI_Compare_and_swap(a1,a2,a3,a4,a5,a6,a7) PyMPI_UNAVAILABLE("MPI_Compare_and_swap",a1,a2,a3,a4,a5,a6,a7) #endif #ifndef PyMPI_HAVE_MPI_Rget #undef MPI_Rget #define MPI_Rget(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Rget",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Rput #undef MPI_Rput #define MPI_Rput(a1,a2,a3,a4,a5,a6,a7,a8,a9) PyMPI_UNAVAILABLE("MPI_Rput",a1,a2,a3,a4,a5,a6,a7,a8,a9) #endif #ifndef PyMPI_HAVE_MPI_Raccumulate #undef MPI_Raccumulate #define MPI_Raccumulate(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) PyMPI_UNAVAILABLE("MPI_Raccumulate",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10) #endif #ifndef PyMPI_HAVE_MPI_Rget_accumulate #undef MPI_Rget_accumulate #define MPI_Rget_accumulate(a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13) PyMPI_UNAVAILABLE("MPI_Rget_accumulate",a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13) #endif #ifndef PyMPI_HAVE_MPI_MODE_NOCHECK #undef MPI_MODE_NOCHECK #define MPI_MODE_NOCHECK (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_MODE_NOSTORE #undef MPI_MODE_NOSTORE #define MPI_MODE_NOSTORE (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_MODE_NOPUT #undef MPI_MODE_NOPUT #define MPI_MODE_NOPUT (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_MODE_NOPRECEDE #undef MPI_MODE_NOPRECEDE #define MPI_MODE_NOPRECEDE (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_MODE_NOSUCCEED #undef MPI_MODE_NOSUCCEED #define MPI_MODE_NOSUCCEED (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_Win_fence #undef MPI_Win_fence #define MPI_Win_fence(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_fence",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_post #undef MPI_Win_post #define MPI_Win_post(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Win_post",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Win_start #undef MPI_Win_start #define MPI_Win_start(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Win_start",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Win_complete #undef MPI_Win_complete #define MPI_Win_complete(a1) PyMPI_UNAVAILABLE("MPI_Win_complete",a1) #endif #ifndef PyMPI_HAVE_MPI_Win_wait #undef MPI_Win_wait #define MPI_Win_wait(a1) PyMPI_UNAVAILABLE("MPI_Win_wait",a1) #endif #ifndef PyMPI_HAVE_MPI_Win_test #undef MPI_Win_test #define MPI_Win_test(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_test",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_LOCK_EXCLUSIVE #undef MPI_LOCK_EXCLUSIVE #define MPI_LOCK_EXCLUSIVE (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_LOCK_SHARED #undef MPI_LOCK_SHARED #define MPI_LOCK_SHARED (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_Win_lock #undef MPI_Win_lock #define MPI_Win_lock(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Win_lock",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Win_unlock #undef MPI_Win_unlock #define MPI_Win_unlock(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_unlock",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_lock_all #undef MPI_Win_lock_all #define MPI_Win_lock_all(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_lock_all",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_unlock_all #undef MPI_Win_unlock_all #define MPI_Win_unlock_all(a1) PyMPI_UNAVAILABLE("MPI_Win_unlock_all",a1) #endif #ifndef PyMPI_HAVE_MPI_Win_flush #undef MPI_Win_flush #define MPI_Win_flush(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_flush",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_flush_all #undef MPI_Win_flush_all #define MPI_Win_flush_all(a1) PyMPI_UNAVAILABLE("MPI_Win_flush_all",a1) #endif #ifndef PyMPI_HAVE_MPI_Win_flush_local #undef MPI_Win_flush_local #define MPI_Win_flush_local(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_flush_local",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_flush_local_all #undef MPI_Win_flush_local_all #define MPI_Win_flush_local_all(a1) PyMPI_UNAVAILABLE("MPI_Win_flush_local_all",a1) #endif #ifndef PyMPI_HAVE_MPI_Win_sync #undef MPI_Win_sync #define MPI_Win_sync(a1) PyMPI_UNAVAILABLE("MPI_Win_sync",a1) #endif #ifndef PyMPI_HAVE_MPI_Win_get_errhandler #undef MPI_Win_get_errhandler #define MPI_Win_get_errhandler(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_get_errhandler",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_set_errhandler #undef MPI_Win_set_errhandler #define MPI_Win_set_errhandler(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_set_errhandler",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_errhandler_fn #undef MPI_Win_errhandler_fn typedef void (MPIAPI PyMPI_MPI_Win_errhandler_fn)(MPI_Win*,int*,...); #define MPI_Win_errhandler_fn PyMPI_MPI_Win_errhandler_fn #endif #ifndef PyMPI_HAVE_MPI_Win_errhandler_function #undef MPI_Win_errhandler_function #define MPI_Win_errhandler_function MPI_Win_errhandler_fn #endif #ifndef PyMPI_HAVE_MPI_Win_create_errhandler #undef MPI_Win_create_errhandler #define MPI_Win_create_errhandler(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_create_errhandler",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_call_errhandler #undef MPI_Win_call_errhandler #define MPI_Win_call_errhandler(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_call_errhandler",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_get_name #undef MPI_Win_get_name #define MPI_Win_get_name(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Win_get_name",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Win_set_name #undef MPI_Win_set_name #define MPI_Win_set_name(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_set_name",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_WIN_BASE #undef MPI_WIN_BASE #define MPI_WIN_BASE (MPI_KEYVAL_INVALID) #endif #ifndef PyMPI_HAVE_MPI_WIN_SIZE #undef MPI_WIN_SIZE #define MPI_WIN_SIZE (MPI_KEYVAL_INVALID) #endif #ifndef PyMPI_HAVE_MPI_WIN_DISP_UNIT #undef MPI_WIN_DISP_UNIT #define MPI_WIN_DISP_UNIT (MPI_KEYVAL_INVALID) #endif #ifndef PyMPI_HAVE_MPI_WIN_CREATE_FLAVOR #undef MPI_WIN_CREATE_FLAVOR #define MPI_WIN_CREATE_FLAVOR (MPI_KEYVAL_INVALID) #endif #ifndef PyMPI_HAVE_MPI_WIN_MODEL #undef MPI_WIN_MODEL #define MPI_WIN_MODEL (MPI_KEYVAL_INVALID) #endif #ifndef PyMPI_HAVE_MPI_WIN_FLAVOR_CREATE #undef MPI_WIN_FLAVOR_CREATE #define MPI_WIN_FLAVOR_CREATE (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_WIN_FLAVOR_ALLOCATE #undef MPI_WIN_FLAVOR_ALLOCATE #define MPI_WIN_FLAVOR_ALLOCATE (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_WIN_FLAVOR_DYNAMIC #undef MPI_WIN_FLAVOR_DYNAMIC #define MPI_WIN_FLAVOR_DYNAMIC (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_WIN_FLAVOR_SHARED #undef MPI_WIN_FLAVOR_SHARED #define MPI_WIN_FLAVOR_SHARED (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_WIN_SEPARATE #undef MPI_WIN_SEPARATE #define MPI_WIN_SEPARATE (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_WIN_UNIFIED #undef MPI_WIN_UNIFIED #define MPI_WIN_UNIFIED (MPI_UNDEFINED) #endif #ifndef PyMPI_HAVE_MPI_Win_get_attr #undef MPI_Win_get_attr #define MPI_Win_get_attr(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Win_get_attr",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Win_set_attr #undef MPI_Win_set_attr #define MPI_Win_set_attr(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Win_set_attr",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Win_delete_attr #undef MPI_Win_delete_attr #define MPI_Win_delete_attr(a1,a2) PyMPI_UNAVAILABLE("MPI_Win_delete_attr",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Win_copy_attr_function #undef MPI_Win_copy_attr_function typedef int (MPIAPI PyMPI_MPI_Win_copy_attr_function)(MPI_Win,int,void*,void*,void*,int*); #define MPI_Win_copy_attr_function PyMPI_MPI_Win_copy_attr_function #endif #ifndef PyMPI_HAVE_MPI_Win_delete_attr_function #undef MPI_Win_delete_attr_function typedef int (MPIAPI PyMPI_MPI_Win_delete_attr_function)(MPI_Win,int,void*,void*); #define MPI_Win_delete_attr_function PyMPI_MPI_Win_delete_attr_function #endif #ifndef PyMPI_HAVE_MPI_WIN_DUP_FN #undef MPI_WIN_DUP_FN #define MPI_WIN_DUP_FN ((MPI_Win_copy_attr_function*)0) #endif #ifndef PyMPI_HAVE_MPI_WIN_NULL_COPY_FN #undef MPI_WIN_NULL_COPY_FN #define MPI_WIN_NULL_COPY_FN ((MPI_Win_copy_attr_function*)0) #endif #ifndef PyMPI_HAVE_MPI_WIN_NULL_DELETE_FN #undef MPI_WIN_NULL_DELETE_FN #define MPI_WIN_NULL_DELETE_FN ((MPI_Win_delete_attr_function*)0) #endif #ifndef PyMPI_HAVE_MPI_Win_create_keyval #undef MPI_Win_create_keyval #define MPI_Win_create_keyval(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Win_create_keyval",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Win_free_keyval #undef MPI_Win_free_keyval #define MPI_Win_free_keyval(a1) PyMPI_UNAVAILABLE("MPI_Win_free_keyval",a1) #endif #ifndef PyMPI_HAVE_MPI_FILE_NULL #undef MPI_FILE_NULL #define MPI_FILE_NULL ((MPI_File)0) #endif #ifndef PyMPI_HAVE_MPI_MODE_RDONLY #undef MPI_MODE_RDONLY #define MPI_MODE_RDONLY (1) #endif #ifndef PyMPI_HAVE_MPI_MODE_RDWR #undef MPI_MODE_RDWR #define MPI_MODE_RDWR (2) #endif #ifndef PyMPI_HAVE_MPI_MODE_WRONLY #undef MPI_MODE_WRONLY #define MPI_MODE_WRONLY (4) #endif #ifndef PyMPI_HAVE_MPI_MODE_CREATE #undef MPI_MODE_CREATE #define MPI_MODE_CREATE (8) #endif #ifndef PyMPI_HAVE_MPI_MODE_EXCL #undef MPI_MODE_EXCL #define MPI_MODE_EXCL (16) #endif #ifndef PyMPI_HAVE_MPI_MODE_DELETE_ON_CLOSE #undef MPI_MODE_DELETE_ON_CLOSE #define MPI_MODE_DELETE_ON_CLOSE (32) #endif #ifndef PyMPI_HAVE_MPI_MODE_UNIQUE_OPEN #undef MPI_MODE_UNIQUE_OPEN #define MPI_MODE_UNIQUE_OPEN (64) #endif #ifndef PyMPI_HAVE_MPI_MODE_APPEND #undef MPI_MODE_APPEND #define MPI_MODE_APPEND (128) #endif #ifndef PyMPI_HAVE_MPI_MODE_SEQUENTIAL #undef MPI_MODE_SEQUENTIAL #define MPI_MODE_SEQUENTIAL (256) #endif #ifndef PyMPI_HAVE_MPI_File_open #undef MPI_File_open #define MPI_File_open(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_open",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_close #undef MPI_File_close #define MPI_File_close(a1) PyMPI_UNAVAILABLE("MPI_File_close",a1) #endif #ifndef PyMPI_HAVE_MPI_File_delete #undef MPI_File_delete #define MPI_File_delete(a1,a2) PyMPI_UNAVAILABLE("MPI_File_delete",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_set_size #undef MPI_File_set_size #define MPI_File_set_size(a1,a2) PyMPI_UNAVAILABLE("MPI_File_set_size",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_preallocate #undef MPI_File_preallocate #define MPI_File_preallocate(a1,a2) PyMPI_UNAVAILABLE("MPI_File_preallocate",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_get_size #undef MPI_File_get_size #define MPI_File_get_size(a1,a2) PyMPI_UNAVAILABLE("MPI_File_get_size",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_get_group #undef MPI_File_get_group #define MPI_File_get_group(a1,a2) PyMPI_UNAVAILABLE("MPI_File_get_group",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_get_amode #undef MPI_File_get_amode #define MPI_File_get_amode(a1,a2) PyMPI_UNAVAILABLE("MPI_File_get_amode",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_set_info #undef MPI_File_set_info #define MPI_File_set_info(a1,a2) PyMPI_UNAVAILABLE("MPI_File_set_info",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_get_info #undef MPI_File_get_info #define MPI_File_get_info(a1,a2) PyMPI_UNAVAILABLE("MPI_File_get_info",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_get_view #undef MPI_File_get_view #define MPI_File_get_view(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_get_view",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_set_view #undef MPI_File_set_view #define MPI_File_set_view(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_set_view",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_File_read_at #undef MPI_File_read_at #define MPI_File_read_at(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_read_at",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_File_read_at_all #undef MPI_File_read_at_all #define MPI_File_read_at_all(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_read_at_all",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_File_write_at #undef MPI_File_write_at #define MPI_File_write_at(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_write_at",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_File_write_at_all #undef MPI_File_write_at_all #define MPI_File_write_at_all(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_write_at_all",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_File_iread_at #undef MPI_File_iread_at #define MPI_File_iread_at(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_iread_at",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_File_iread_at_all #undef MPI_File_iread_at_all #define MPI_File_iread_at_all(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_iread_at_all",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_File_iwrite_at #undef MPI_File_iwrite_at #define MPI_File_iwrite_at(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_iwrite_at",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_File_iwrite_at_all #undef MPI_File_iwrite_at_all #define MPI_File_iwrite_at_all(a1,a2,a3,a4,a5,a6) PyMPI_UNAVAILABLE("MPI_File_iwrite_at_all",a1,a2,a3,a4,a5,a6) #endif #ifndef PyMPI_HAVE_MPI_SEEK_SET #undef MPI_SEEK_SET #define MPI_SEEK_SET (0) #endif #ifndef PyMPI_HAVE_MPI_SEEK_CUR #undef MPI_SEEK_CUR #define MPI_SEEK_CUR (1) #endif #ifndef PyMPI_HAVE_MPI_SEEK_END #undef MPI_SEEK_END #define MPI_SEEK_END (2) #endif #ifndef PyMPI_HAVE_MPI_DISPLACEMENT_CURRENT #undef MPI_DISPLACEMENT_CURRENT #define MPI_DISPLACEMENT_CURRENT (3) #endif #ifndef PyMPI_HAVE_MPI_File_seek #undef MPI_File_seek #define MPI_File_seek(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_File_seek",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_File_get_position #undef MPI_File_get_position #define MPI_File_get_position(a1,a2) PyMPI_UNAVAILABLE("MPI_File_get_position",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_get_byte_offset #undef MPI_File_get_byte_offset #define MPI_File_get_byte_offset(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_File_get_byte_offset",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_File_read #undef MPI_File_read #define MPI_File_read(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_read",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_read_all #undef MPI_File_read_all #define MPI_File_read_all(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_read_all",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_write #undef MPI_File_write #define MPI_File_write(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_write",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_write_all #undef MPI_File_write_all #define MPI_File_write_all(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_write_all",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_iread #undef MPI_File_iread #define MPI_File_iread(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_iread",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_iread_all #undef MPI_File_iread_all #define MPI_File_iread_all(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_iread_all",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_iwrite #undef MPI_File_iwrite #define MPI_File_iwrite(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_iwrite",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_iwrite_all #undef MPI_File_iwrite_all #define MPI_File_iwrite_all(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_iwrite_all",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_read_shared #undef MPI_File_read_shared #define MPI_File_read_shared(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_read_shared",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_write_shared #undef MPI_File_write_shared #define MPI_File_write_shared(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_write_shared",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_iread_shared #undef MPI_File_iread_shared #define MPI_File_iread_shared(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_iread_shared",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_iwrite_shared #undef MPI_File_iwrite_shared #define MPI_File_iwrite_shared(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_iwrite_shared",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_read_ordered #undef MPI_File_read_ordered #define MPI_File_read_ordered(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_read_ordered",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_write_ordered #undef MPI_File_write_ordered #define MPI_File_write_ordered(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_write_ordered",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_seek_shared #undef MPI_File_seek_shared #define MPI_File_seek_shared(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_File_seek_shared",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_File_get_position_shared #undef MPI_File_get_position_shared #define MPI_File_get_position_shared(a1,a2) PyMPI_UNAVAILABLE("MPI_File_get_position_shared",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_read_at_all_begin #undef MPI_File_read_at_all_begin #define MPI_File_read_at_all_begin(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_read_at_all_begin",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_read_at_all_end #undef MPI_File_read_at_all_end #define MPI_File_read_at_all_end(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_File_read_at_all_end",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_File_write_at_all_begin #undef MPI_File_write_at_all_begin #define MPI_File_write_at_all_begin(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_File_write_at_all_begin",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_File_write_at_all_end #undef MPI_File_write_at_all_end #define MPI_File_write_at_all_end(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_File_write_at_all_end",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_File_read_all_begin #undef MPI_File_read_all_begin #define MPI_File_read_all_begin(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_File_read_all_begin",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_File_read_all_end #undef MPI_File_read_all_end #define MPI_File_read_all_end(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_File_read_all_end",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_File_write_all_begin #undef MPI_File_write_all_begin #define MPI_File_write_all_begin(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_File_write_all_begin",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_File_write_all_end #undef MPI_File_write_all_end #define MPI_File_write_all_end(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_File_write_all_end",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_File_read_ordered_begin #undef MPI_File_read_ordered_begin #define MPI_File_read_ordered_begin(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_File_read_ordered_begin",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_File_read_ordered_end #undef MPI_File_read_ordered_end #define MPI_File_read_ordered_end(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_File_read_ordered_end",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_File_write_ordered_begin #undef MPI_File_write_ordered_begin #define MPI_File_write_ordered_begin(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_File_write_ordered_begin",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_File_write_ordered_end #undef MPI_File_write_ordered_end #define MPI_File_write_ordered_end(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_File_write_ordered_end",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_File_get_type_extent #undef MPI_File_get_type_extent #define MPI_File_get_type_extent(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_File_get_type_extent",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_File_set_atomicity #undef MPI_File_set_atomicity #define MPI_File_set_atomicity(a1,a2) PyMPI_UNAVAILABLE("MPI_File_set_atomicity",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_get_atomicity #undef MPI_File_get_atomicity #define MPI_File_get_atomicity(a1,a2) PyMPI_UNAVAILABLE("MPI_File_get_atomicity",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_sync #undef MPI_File_sync #define MPI_File_sync(a1) PyMPI_UNAVAILABLE("MPI_File_sync",a1) #endif #ifndef PyMPI_HAVE_MPI_File_get_errhandler #undef MPI_File_get_errhandler #define MPI_File_get_errhandler(a1,a2) PyMPI_UNAVAILABLE("MPI_File_get_errhandler",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_set_errhandler #undef MPI_File_set_errhandler #define MPI_File_set_errhandler(a1,a2) PyMPI_UNAVAILABLE("MPI_File_set_errhandler",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_errhandler_fn #undef MPI_File_errhandler_fn typedef void (MPIAPI PyMPI_MPI_File_errhandler_fn)(MPI_File*,int*,...); #define MPI_File_errhandler_fn PyMPI_MPI_File_errhandler_fn #endif #ifndef PyMPI_HAVE_MPI_File_errhandler_function #undef MPI_File_errhandler_function #define MPI_File_errhandler_function MPI_File_errhandler_fn #endif #ifndef PyMPI_HAVE_MPI_File_create_errhandler #undef MPI_File_create_errhandler #define MPI_File_create_errhandler(a1,a2) PyMPI_UNAVAILABLE("MPI_File_create_errhandler",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_File_call_errhandler #undef MPI_File_call_errhandler #define MPI_File_call_errhandler(a1,a2) PyMPI_UNAVAILABLE("MPI_File_call_errhandler",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Datarep_conversion_function #undef MPI_Datarep_conversion_function typedef int (MPIAPI PyMPI_MPI_Datarep_conversion_function)(void*,MPI_Datatype,int,void*,MPI_Offset,void*); #define MPI_Datarep_conversion_function PyMPI_MPI_Datarep_conversion_function #endif #ifndef PyMPI_HAVE_MPI_Datarep_extent_function #undef MPI_Datarep_extent_function typedef int (MPIAPI PyMPI_MPI_Datarep_extent_function)(MPI_Datatype,MPI_Aint*,void*); #define MPI_Datarep_extent_function PyMPI_MPI_Datarep_extent_function #endif #ifndef PyMPI_HAVE_MPI_CONVERSION_FN_NULL #undef MPI_CONVERSION_FN_NULL #define MPI_CONVERSION_FN_NULL ((MPI_Datarep_conversion_function*)0) #endif #ifndef PyMPI_HAVE_MPI_MAX_DATAREP_STRING #undef MPI_MAX_DATAREP_STRING #define MPI_MAX_DATAREP_STRING (1) #endif #ifndef PyMPI_HAVE_MPI_Register_datarep #undef MPI_Register_datarep #define MPI_Register_datarep(a1,a2,a3,a4,a5) PyMPI_UNAVAILABLE("MPI_Register_datarep",a1,a2,a3,a4,a5) #endif #ifndef PyMPI_HAVE_MPI_ERRHANDLER_NULL #undef MPI_ERRHANDLER_NULL #define MPI_ERRHANDLER_NULL ((MPI_Errhandler)0) #endif #ifndef PyMPI_HAVE_MPI_ERRORS_RETURN #undef MPI_ERRORS_RETURN #define MPI_ERRORS_RETURN ((MPI_Errhandler)MPI_ERRHANDLER_NULL) #endif #ifndef PyMPI_HAVE_MPI_ERRORS_ARE_FATAL #undef MPI_ERRORS_ARE_FATAL #define MPI_ERRORS_ARE_FATAL ((MPI_Errhandler)MPI_ERRHANDLER_NULL) #endif #ifndef PyMPI_HAVE_MPI_Errhandler_free #undef MPI_Errhandler_free #define MPI_Errhandler_free(a1) PyMPI_UNAVAILABLE("MPI_Errhandler_free",a1) #endif #ifndef PyMPI_HAVE_MPI_MAX_ERROR_STRING #undef MPI_MAX_ERROR_STRING #define MPI_MAX_ERROR_STRING (1) #endif #ifndef PyMPI_HAVE_MPI_Error_class #undef MPI_Error_class #define MPI_Error_class(a1,a2) PyMPI_UNAVAILABLE("MPI_Error_class",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Error_string #undef MPI_Error_string #define MPI_Error_string(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Error_string",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Add_error_class #undef MPI_Add_error_class #define MPI_Add_error_class(a1) PyMPI_UNAVAILABLE("MPI_Add_error_class",a1) #endif #ifndef PyMPI_HAVE_MPI_Add_error_code #undef MPI_Add_error_code #define MPI_Add_error_code(a1,a2) PyMPI_UNAVAILABLE("MPI_Add_error_code",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Add_error_string #undef MPI_Add_error_string #define MPI_Add_error_string(a1,a2) PyMPI_UNAVAILABLE("MPI_Add_error_string",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_SUCCESS #undef MPI_SUCCESS #define MPI_SUCCESS (0) #endif #ifndef PyMPI_HAVE_MPI_ERR_LASTCODE #undef MPI_ERR_LASTCODE #define MPI_ERR_LASTCODE (1) #endif #ifndef PyMPI_HAVE_MPI_ERR_COMM #undef MPI_ERR_COMM #define MPI_ERR_COMM (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_GROUP #undef MPI_ERR_GROUP #define MPI_ERR_GROUP (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_TYPE #undef MPI_ERR_TYPE #define MPI_ERR_TYPE (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_REQUEST #undef MPI_ERR_REQUEST #define MPI_ERR_REQUEST (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_OP #undef MPI_ERR_OP #define MPI_ERR_OP (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_BUFFER #undef MPI_ERR_BUFFER #define MPI_ERR_BUFFER (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_COUNT #undef MPI_ERR_COUNT #define MPI_ERR_COUNT (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_TAG #undef MPI_ERR_TAG #define MPI_ERR_TAG (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_RANK #undef MPI_ERR_RANK #define MPI_ERR_RANK (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_ROOT #undef MPI_ERR_ROOT #define MPI_ERR_ROOT (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_TRUNCATE #undef MPI_ERR_TRUNCATE #define MPI_ERR_TRUNCATE (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_IN_STATUS #undef MPI_ERR_IN_STATUS #define MPI_ERR_IN_STATUS (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_PENDING #undef MPI_ERR_PENDING #define MPI_ERR_PENDING (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_TOPOLOGY #undef MPI_ERR_TOPOLOGY #define MPI_ERR_TOPOLOGY (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_DIMS #undef MPI_ERR_DIMS #define MPI_ERR_DIMS (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_ARG #undef MPI_ERR_ARG #define MPI_ERR_ARG (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_OTHER #undef MPI_ERR_OTHER #define MPI_ERR_OTHER (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_UNKNOWN #undef MPI_ERR_UNKNOWN #define MPI_ERR_UNKNOWN (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_INTERN #undef MPI_ERR_INTERN #define MPI_ERR_INTERN (MPI_ERR_LASTCODE) #endif #ifndef PyMPI_HAVE_MPI_ERR_KEYVAL #undef MPI_ERR_KEYVAL #define MPI_ERR_KEYVAL (MPI_ERR_ARG) #endif #ifndef PyMPI_HAVE_MPI_ERR_NO_MEM #undef MPI_ERR_NO_MEM #define MPI_ERR_NO_MEM (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_INFO #undef MPI_ERR_INFO #define MPI_ERR_INFO (MPI_ERR_ARG) #endif #ifndef PyMPI_HAVE_MPI_ERR_INFO_KEY #undef MPI_ERR_INFO_KEY #define MPI_ERR_INFO_KEY (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_INFO_VALUE #undef MPI_ERR_INFO_VALUE #define MPI_ERR_INFO_VALUE (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_INFO_NOKEY #undef MPI_ERR_INFO_NOKEY #define MPI_ERR_INFO_NOKEY (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_SPAWN #undef MPI_ERR_SPAWN #define MPI_ERR_SPAWN (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_PORT #undef MPI_ERR_PORT #define MPI_ERR_PORT (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_SERVICE #undef MPI_ERR_SERVICE #define MPI_ERR_SERVICE (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_NAME #undef MPI_ERR_NAME #define MPI_ERR_NAME (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_FILE #undef MPI_ERR_FILE #define MPI_ERR_FILE (MPI_ERR_ARG) #endif #ifndef PyMPI_HAVE_MPI_ERR_NOT_SAME #undef MPI_ERR_NOT_SAME #define MPI_ERR_NOT_SAME (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_BAD_FILE #undef MPI_ERR_BAD_FILE #define MPI_ERR_BAD_FILE (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_NO_SUCH_FILE #undef MPI_ERR_NO_SUCH_FILE #define MPI_ERR_NO_SUCH_FILE (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_FILE_EXISTS #undef MPI_ERR_FILE_EXISTS #define MPI_ERR_FILE_EXISTS (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_FILE_IN_USE #undef MPI_ERR_FILE_IN_USE #define MPI_ERR_FILE_IN_USE (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_AMODE #undef MPI_ERR_AMODE #define MPI_ERR_AMODE (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_ACCESS #undef MPI_ERR_ACCESS #define MPI_ERR_ACCESS (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_READ_ONLY #undef MPI_ERR_READ_ONLY #define MPI_ERR_READ_ONLY (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_NO_SPACE #undef MPI_ERR_NO_SPACE #define MPI_ERR_NO_SPACE (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_QUOTA #undef MPI_ERR_QUOTA #define MPI_ERR_QUOTA (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_UNSUPPORTED_DATAREP #undef MPI_ERR_UNSUPPORTED_DATAREP #define MPI_ERR_UNSUPPORTED_DATAREP (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_UNSUPPORTED_OPERATION #undef MPI_ERR_UNSUPPORTED_OPERATION #define MPI_ERR_UNSUPPORTED_OPERATION (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_CONVERSION #undef MPI_ERR_CONVERSION #define MPI_ERR_CONVERSION (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_DUP_DATAREP #undef MPI_ERR_DUP_DATAREP #define MPI_ERR_DUP_DATAREP (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_IO #undef MPI_ERR_IO #define MPI_ERR_IO (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_WIN #undef MPI_ERR_WIN #define MPI_ERR_WIN (MPI_ERR_ARG) #endif #ifndef PyMPI_HAVE_MPI_ERR_BASE #undef MPI_ERR_BASE #define MPI_ERR_BASE (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_SIZE #undef MPI_ERR_SIZE #define MPI_ERR_SIZE (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_DISP #undef MPI_ERR_DISP #define MPI_ERR_DISP (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_ASSERT #undef MPI_ERR_ASSERT #define MPI_ERR_ASSERT (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_LOCKTYPE #undef MPI_ERR_LOCKTYPE #define MPI_ERR_LOCKTYPE (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_RMA_CONFLICT #undef MPI_ERR_RMA_CONFLICT #define MPI_ERR_RMA_CONFLICT (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_RMA_SYNC #undef MPI_ERR_RMA_SYNC #define MPI_ERR_RMA_SYNC (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_RMA_RANGE #undef MPI_ERR_RMA_RANGE #define MPI_ERR_RMA_RANGE (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_RMA_ATTACH #undef MPI_ERR_RMA_ATTACH #define MPI_ERR_RMA_ATTACH (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_RMA_SHARED #undef MPI_ERR_RMA_SHARED #define MPI_ERR_RMA_SHARED (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_ERR_RMA_FLAVOR #undef MPI_ERR_RMA_FLAVOR #define MPI_ERR_RMA_FLAVOR (MPI_ERR_UNKNOWN) #endif #ifndef PyMPI_HAVE_MPI_Alloc_mem #undef MPI_Alloc_mem #define MPI_Alloc_mem(a1,a2,a3) PyMPI_UNAVAILABLE("MPI_Alloc_mem",a1,a2,a3) #endif #ifndef PyMPI_HAVE_MPI_Free_mem #undef MPI_Free_mem #define MPI_Free_mem(a1) PyMPI_UNAVAILABLE("MPI_Free_mem",a1) #endif #ifndef PyMPI_HAVE_MPI_Init #undef MPI_Init #define MPI_Init(a1,a2) PyMPI_UNAVAILABLE("MPI_Init",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Finalize #undef MPI_Finalize #define MPI_Finalize() PyMPI_UNAVAILABLE("MPI_Finalize") #endif #ifndef PyMPI_HAVE_MPI_Initialized #undef MPI_Initialized #define MPI_Initialized(a1) PyMPI_UNAVAILABLE("MPI_Initialized",a1) #endif #ifndef PyMPI_HAVE_MPI_Finalized #undef MPI_Finalized #define MPI_Finalized(a1) PyMPI_UNAVAILABLE("MPI_Finalized",a1) #endif #ifndef PyMPI_HAVE_MPI_THREAD_SINGLE #undef MPI_THREAD_SINGLE #define MPI_THREAD_SINGLE (0) #endif #ifndef PyMPI_HAVE_MPI_THREAD_FUNNELED #undef MPI_THREAD_FUNNELED #define MPI_THREAD_FUNNELED (1) #endif #ifndef PyMPI_HAVE_MPI_THREAD_SERIALIZED #undef MPI_THREAD_SERIALIZED #define MPI_THREAD_SERIALIZED (2) #endif #ifndef PyMPI_HAVE_MPI_THREAD_MULTIPLE #undef MPI_THREAD_MULTIPLE #define MPI_THREAD_MULTIPLE (3) #endif #ifndef PyMPI_HAVE_MPI_Init_thread #undef MPI_Init_thread #define MPI_Init_thread(a1,a2,a3,a4) PyMPI_UNAVAILABLE("MPI_Init_thread",a1,a2,a3,a4) #endif #ifndef PyMPI_HAVE_MPI_Query_thread #undef MPI_Query_thread #define MPI_Query_thread(a1) PyMPI_UNAVAILABLE("MPI_Query_thread",a1) #endif #ifndef PyMPI_HAVE_MPI_Is_thread_main #undef MPI_Is_thread_main #define MPI_Is_thread_main(a1) PyMPI_UNAVAILABLE("MPI_Is_thread_main",a1) #endif #ifndef PyMPI_HAVE_MPI_VERSION #undef MPI_VERSION #define MPI_VERSION (1) #endif #ifndef PyMPI_HAVE_MPI_SUBVERSION #undef MPI_SUBVERSION #define MPI_SUBVERSION (0) #endif #ifndef PyMPI_HAVE_MPI_Get_version #undef MPI_Get_version #define MPI_Get_version(a1,a2) PyMPI_UNAVAILABLE("MPI_Get_version",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_MAX_LIBRARY_VERSION_STRING #undef MPI_MAX_LIBRARY_VERSION_STRING #define MPI_MAX_LIBRARY_VERSION_STRING (1) #endif #ifndef PyMPI_HAVE_MPI_Get_library_version #undef MPI_Get_library_version #define MPI_Get_library_version(a1,a2) PyMPI_UNAVAILABLE("MPI_Get_library_version",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_MAX_PROCESSOR_NAME #undef MPI_MAX_PROCESSOR_NAME #define MPI_MAX_PROCESSOR_NAME (1) #endif #ifndef PyMPI_HAVE_MPI_Get_processor_name #undef MPI_Get_processor_name #define MPI_Get_processor_name(a1,a2) PyMPI_UNAVAILABLE("MPI_Get_processor_name",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Wtime #undef MPI_Wtime #define MPI_Wtime() PyMPI_UNAVAILABLE("MPI_Wtime") #endif #ifndef PyMPI_HAVE_MPI_Wtick #undef MPI_Wtick #define MPI_Wtick() PyMPI_UNAVAILABLE("MPI_Wtick") #endif #ifndef PyMPI_HAVE_MPI_Pcontrol #undef MPI_Pcontrol #define MPI_Pcontrol(a1) PyMPI_UNAVAILABLE("MPI_Pcontrol",a1) #endif #ifndef PyMPI_HAVE_MPI_Fint #undef MPI_Fint typedef int PyMPI_MPI_Fint; #define MPI_Fint PyMPI_MPI_Fint #endif #ifndef PyMPI_HAVE_MPI_F_STATUS_IGNORE #undef MPI_F_STATUS_IGNORE #define MPI_F_STATUS_IGNORE ((MPI_Fint*)0) #endif #ifndef PyMPI_HAVE_MPI_F_STATUSES_IGNORE #undef MPI_F_STATUSES_IGNORE #define MPI_F_STATUSES_IGNORE ((MPI_Fint*)0) #endif #ifndef PyMPI_HAVE_MPI_Status_c2f #undef MPI_Status_c2f #define MPI_Status_c2f(a1,a2) PyMPI_UNAVAILABLE("MPI_Status_c2f",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Status_f2c #undef MPI_Status_f2c #define MPI_Status_f2c(a1,a2) PyMPI_UNAVAILABLE("MPI_Status_f2c",a1,a2) #endif #ifndef PyMPI_HAVE_MPI_Type_c2f #undef MPI_Type_c2f #define MPI_Type_c2f(a1) ((MPI_Fint)0) #endif #ifndef PyMPI_HAVE_MPI_Request_c2f #undef MPI_Request_c2f #define MPI_Request_c2f(a1) ((MPI_Fint)0) #endif #ifndef PyMPI_HAVE_MPI_Message_c2f #undef MPI_Message_c2f #define MPI_Message_c2f(a1) ((MPI_Fint)0) #endif #ifndef PyMPI_HAVE_MPI_Op_c2f #undef MPI_Op_c2f #define MPI_Op_c2f(a1) ((MPI_Fint)0) #endif #ifndef PyMPI_HAVE_MPI_Info_c2f #undef MPI_Info_c2f #define MPI_Info_c2f(a1) ((MPI_Fint)0) #endif #ifndef PyMPI_HAVE_MPI_Group_c2f #undef MPI_Group_c2f #define MPI_Group_c2f(a1) ((MPI_Fint)0) #endif #ifndef PyMPI_HAVE_MPI_Comm_c2f #undef MPI_Comm_c2f #define MPI_Comm_c2f(a1) ((MPI_Fint)0) #endif #ifndef PyMPI_HAVE_MPI_Win_c2f #undef MPI_Win_c2f #define MPI_Win_c2f(a1) ((MPI_Fint)0) #endif #ifndef PyMPI_HAVE_MPI_File_c2f #undef MPI_File_c2f #define MPI_File_c2f(a1) ((MPI_Fint)0) #endif #ifndef PyMPI_HAVE_MPI_Errhandler_c2f #undef MPI_Errhandler_c2f #define MPI_Errhandler_c2f(a1) ((MPI_Fint)0) #endif #ifndef PyMPI_HAVE_MPI_Type_f2c #undef MPI_Type_f2c #define MPI_Type_f2c(a1) MPI_DATATYPE_NULL #endif #ifndef PyMPI_HAVE_MPI_Request_f2c #undef MPI_Request_f2c #define MPI_Request_f2c(a1) MPI_REQUEST_NULL #endif #ifndef PyMPI_HAVE_MPI_Message_f2c #undef MPI_Message_f2c #define MPI_Message_f2c(a1) MPI_MESSAGE_NULL #endif #ifndef PyMPI_HAVE_MPI_Op_f2c #undef MPI_Op_f2c #define MPI_Op_f2c(a1) MPI_OP_NULL #endif #ifndef PyMPI_HAVE_MPI_Info_f2c #undef MPI_Info_f2c #define MPI_Info_f2c(a1) MPI_INFO_NULL #endif #ifndef PyMPI_HAVE_MPI_Group_f2c #undef MPI_Group_f2c #define MPI_Group_f2c(a1) MPI_GROUP_NULL #endif #ifndef PyMPI_HAVE_MPI_Comm_f2c #undef MPI_Comm_f2c #define MPI_Comm_f2c(a1) MPI_COMM_NULL #endif #ifndef PyMPI_HAVE_MPI_Win_f2c #undef MPI_Win_f2c #define MPI_Win_f2c(a1) MPI_WIN_NULL #endif #ifndef PyMPI_HAVE_MPI_File_f2c #undef MPI_File_f2c #define MPI_File_f2c(a1) MPI_FILE_NULL #endif #ifndef PyMPI_HAVE_MPI_Errhandler_f2c #undef MPI_Errhandler_f2c #define MPI_Errhandler_f2c(a1) MPI_ERRHANDLER_NULL #endif #endif /* !PyMPI_MISSING_H */ mpi4py-3.1.6/src/lib-pmpi/000077500000000000000000000000001460670727200152565ustar00rootroot00000000000000mpi4py-3.1.6/src/lib-pmpi/mpe.c000066400000000000000000000000401460670727200161750ustar00rootroot00000000000000char pympi_pmpi_name[] = "mpe"; mpi4py-3.1.6/src/lib-pmpi/vt-hyb.c000066400000000000000000000000631460670727200166320ustar00rootroot00000000000000#include "vt.h" char pympi_pmpi_name[] = "vt-hyb"; mpi4py-3.1.6/src/lib-pmpi/vt-mpi.c000066400000000000000000000000631460670727200166350ustar00rootroot00000000000000#include "vt.h" char pympi_pmpi_name[] = "vt-mpi"; mpi4py-3.1.6/src/lib-pmpi/vt.c000066400000000000000000000000571460670727200160550ustar00rootroot00000000000000#include "vt.h" char pympi_pmpi_name[] = "vt"; mpi4py-3.1.6/src/lib-pmpi/vt.h000066400000000000000000000017251460670727200160650ustar00rootroot00000000000000#ifdef LIBVT_LEGACY #include #include #define LIBVT_HAVE_MPI_Init_thread 1 #if (defined(OMPI_MAJOR_VERSION) && \ defined(OMPI_MINOR_VERSION) && \ defined(OMPI_RELEASE_VERSION)) #undef OPENMPI_VERSION_NUMBER #define OPENMPI_VERSION_NUMBER \ ((OMPI_MAJOR_VERSION * 10000) + \ (OMPI_MINOR_VERSION * 100) + \ (OMPI_RELEASE_VERSION * 1)) #if ((OPENMPI_VERSION_NUMBER >= 10300) && \ (OPENMPI_VERSION_NUMBER < 10403)) #undef LIBVT_HAVE_MPI_Init_thread #endif #endif #ifdef __cplusplus extern "C" { #endif extern int POMP_MAX_ID; struct ompregdescr; extern struct ompregdescr* pomp_rd_table[]; int POMP_MAX_ID = 0; struct ompregdescr* pomp_rd_table[] = { 0 }; #ifndef LIBVT_HAVE_MPI_Init_thread int MPI_Init_thread(int *argc, char ***argv, int required, int *provided) { if (provided) *provided = MPI_THREAD_SINGLE; return MPI_Init(argc, argv); } #endif #ifdef __cplusplus } #endif #endif mpi4py-3.1.6/src/mpi.pth000066400000000000000000000026321460670727200150520ustar00rootroot00000000000000# Add Intel MPI to Python 3.8+ DLL search path on Windows import os; I_MPI_ROOT = os.getenv('I_MPI_ROOT') import os; I_MPI_LIBRARY_KIND = os.getenv('I_MPI_LIBRARY_KIND') import os; library_kind = os.getenv('library_kind') import os; kind = I_MPI_LIBRARY_KIND or library_kind or 'release' import os; d1 = I_MPI_ROOT and os.path.join(I_MPI_ROOT, 'bin', kind) import os; d2 = I_MPI_ROOT and os.path.join(I_MPI_ROOT, 'bin') import os; d1 = d1 and os.path.isfile(os.path.join(d1, 'impi.dll')) and d1 import os; d2 = d2 and os.path.isfile(os.path.join(d2, 'impi.dll')) and d2 import os; dlldir = d1 or d2 import os; add_dll_directory = getattr(os, 'add_dll_directory', None) import os; add_dll_directory and dlldir and add_dll_directory(dlldir) import sys; verbose = add_dll_directory and dlldir and sys.flags.verbose >= 1 import sys; verbose and print("# add DLL directory: ", dlldir, file=sys.stderr) # Add Microsoft MPI to Python 3.8+ DLL search path on Windows import os; MSMPI_BIN = os.getenv('MSMPI_BIN') import os; dll = MSMPI_BIN and os.path.join(MSMPI_BIN, 'msmpi.dll') import os; dlldir = dll and os.path.isfile(dll) and MSMPI_BIN import os; add_dll_directory = getattr(os, 'add_dll_directory', None) import os; add_dll_directory and dlldir and add_dll_directory(dlldir) import sys; verbose = add_dll_directory and dlldir and sys.flags.verbose >= 1 import sys; verbose and print("# add DLL directory: ", dlldir, file=sys.stderr) mpi4py-3.1.6/src/mpi4py/000077500000000000000000000000001460670727200147675ustar00rootroot00000000000000mpi4py-3.1.6/src/mpi4py/MPI.pxd000066400000000000000000000075741460670727200161460ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com # -- from mpi4py.libmpi cimport MPI_Aint from mpi4py.libmpi cimport MPI_Offset from mpi4py.libmpi cimport MPI_Count from mpi4py.libmpi cimport MPI_Status from mpi4py.libmpi cimport MPI_Datatype from mpi4py.libmpi cimport MPI_Request from mpi4py.libmpi cimport MPI_Message from mpi4py.libmpi cimport MPI_Op from mpi4py.libmpi cimport MPI_Group from mpi4py.libmpi cimport MPI_Info from mpi4py.libmpi cimport MPI_Errhandler from mpi4py.libmpi cimport MPI_Comm from mpi4py.libmpi cimport MPI_Win from mpi4py.libmpi cimport MPI_File # -- cdef import from *: ctypedef MPI_Aint Aint "MPI_Aint" ctypedef MPI_Offset Offset "MPI_Offset" ctypedef MPI_Count Count "MPI_Count" ctypedef public api class Datatype [ type PyMPIDatatype_Type, object PyMPIDatatypeObject, ]: cdef MPI_Datatype ob_mpi cdef unsigned flags cdef object __weakref__ ctypedef public api class Status [ type PyMPIStatus_Type, object PyMPIStatusObject, ]: cdef MPI_Status ob_mpi cdef unsigned flags cdef object __weakref__ ctypedef public api class Request [ type PyMPIRequest_Type, object PyMPIRequestObject, ]: cdef MPI_Request ob_mpi cdef unsigned flags cdef object __weakref__ cdef object ob_buf ctypedef public api class Prequest(Request) [ type PyMPIPrequest_Type, object PyMPIPrequestObject, ]: pass ctypedef public api class Grequest(Request) [ type PyMPIGrequest_Type, object PyMPIGrequestObject, ]: cdef MPI_Request ob_grequest ctypedef public api class Message [ type PyMPIMessage_Type, object PyMPIMessageObject, ]: cdef MPI_Message ob_mpi cdef unsigned flags cdef object __weakref__ cdef object ob_buf ctypedef public api class Op [ type PyMPIOp_Type, object PyMPIOpObject, ]: cdef MPI_Op ob_mpi cdef unsigned flags cdef object __weakref__ cdef object (*ob_func)(object, object) cdef int ob_usrid ctypedef public api class Group [ type PyMPIGroup_Type, object PyMPIGroupObject, ]: cdef MPI_Group ob_mpi cdef unsigned flags cdef object __weakref__ ctypedef public api class Info [ type PyMPIInfo_Type, object PyMPIInfoObject, ]: cdef MPI_Info ob_mpi cdef unsigned flags cdef object __weakref__ ctypedef public api class Errhandler [ type PyMPIErrhandler_Type, object PyMPIErrhandlerObject, ]: cdef MPI_Errhandler ob_mpi cdef unsigned flags cdef object __weakref__ ctypedef public api class Comm [ type PyMPIComm_Type, object PyMPICommObject, ]: cdef MPI_Comm ob_mpi cdef unsigned flags cdef object __weakref__ ctypedef public api class Intracomm(Comm) [ type PyMPIIntracomm_Type, object PyMPIIntracommObject, ]: pass ctypedef public api class Topocomm(Intracomm) [ type PyMPITopocomm_Type, object PyMPITopocommObject, ]: pass ctypedef public api class Cartcomm(Topocomm) [ type PyMPICartcomm_Type, object PyMPICartcommObject, ]: pass ctypedef public api class Graphcomm(Topocomm) [ type PyMPIGraphcomm_Type, object PyMPIGraphcommObject, ]: pass ctypedef public api class Distgraphcomm(Topocomm) [ type PyMPIDistgraphcomm_Type, object PyMPIDistgraphcommObject, ]: pass ctypedef public api class Intercomm(Comm) [ type PyMPIIntercomm_Type, object PyMPIIntercommObject, ]: pass ctypedef public api class Win [ type PyMPIWin_Type, object PyMPIWinObject, ]: cdef MPI_Win ob_mpi cdef unsigned flags cdef object __weakref__ cdef object ob_mem ctypedef public api class File [ type PyMPIFile_Type, object PyMPIFileObject, ]: cdef MPI_File ob_mpi cdef unsigned flags cdef object __weakref__ # -- mpi4py-3.1.6/src/mpi4py/MPI.pyi000066400000000000000000001454331460670727200161510ustar00rootroot00000000000000from __future__ import annotations import sys from threading import Lock from typing import overload from typing import ( Any, Union, Literal, Optional, NoReturn, Final, ) if sys.version_info >= (3, 9): from collections.abc import ( Callable, Hashable, Iterable, Iterator, Sequence, Mapping, ) from builtins import ( tuple as Tuple, list as List, dict as Dict, ) else: from typing import ( Callable, Hashable, Iterable, Iterator, Sequence, Mapping, ) from typing import ( Tuple as Tuple, List as List, Dict as Dict, ) UNDEFINED: Final[int] = ... ANY_SOURCE: Final[int] = ... ANY_TAG: Final[int] = ... PROC_NULL: Final[int] = ... ROOT: Final[int] = ... class _Bottom(int): ... Bottom = _Bottom BOTTOM: Final[Bottom] = ... class _InPlace(int): ... InPlace = _InPlace IN_PLACE: Final[InPlace] = ... KEYVAL_INVALID: Final[int] = ... TAG_UB: Final[int] = ... HOST: Final[int] = ... IO: Final[int] = ... WTIME_IS_GLOBAL: Final[int] = ... UNIVERSE_SIZE: Final[int] = ... APPNUM: Final[int] = ... LASTUSEDCODE: Final[int] = ... WIN_BASE: Final[int] = ... WIN_SIZE: Final[int] = ... WIN_DISP_UNIT: Final[int] = ... WIN_CREATE_FLAVOR: Final[int] = ... WIN_FLAVOR: Final[int] = ... WIN_MODEL: Final[int] = ... SUCCESS: Final[int] = ... ERR_LASTCODE: Final[int] = ... ERR_COMM: Final[int] = ... ERR_GROUP: Final[int] = ... ERR_TYPE: Final[int] = ... ERR_REQUEST: Final[int] = ... ERR_OP: Final[int] = ... ERR_BUFFER: Final[int] = ... ERR_COUNT: Final[int] = ... ERR_TAG: Final[int] = ... ERR_RANK: Final[int] = ... ERR_ROOT: Final[int] = ... ERR_TRUNCATE: Final[int] = ... ERR_IN_STATUS: Final[int] = ... ERR_PENDING: Final[int] = ... ERR_TOPOLOGY: Final[int] = ... ERR_DIMS: Final[int] = ... ERR_ARG: Final[int] = ... ERR_OTHER: Final[int] = ... ERR_UNKNOWN: Final[int] = ... ERR_INTERN: Final[int] = ... ERR_INFO: Final[int] = ... ERR_FILE: Final[int] = ... ERR_WIN: Final[int] = ... ERR_KEYVAL: Final[int] = ... ERR_INFO_KEY: Final[int] = ... ERR_INFO_VALUE: Final[int] = ... ERR_INFO_NOKEY: Final[int] = ... ERR_ACCESS: Final[int] = ... ERR_AMODE: Final[int] = ... ERR_BAD_FILE: Final[int] = ... ERR_FILE_EXISTS: Final[int] = ... ERR_FILE_IN_USE: Final[int] = ... ERR_NO_SPACE: Final[int] = ... ERR_NO_SUCH_FILE: Final[int] = ... ERR_IO: Final[int] = ... ERR_READ_ONLY: Final[int] = ... ERR_CONVERSION: Final[int] = ... ERR_DUP_DATAREP: Final[int] = ... ERR_UNSUPPORTED_DATAREP: Final[int] = ... ERR_UNSUPPORTED_OPERATION: Final[int] = ... ERR_NAME: Final[int] = ... ERR_NO_MEM: Final[int] = ... ERR_NOT_SAME: Final[int] = ... ERR_PORT: Final[int] = ... ERR_QUOTA: Final[int] = ... ERR_SERVICE: Final[int] = ... ERR_SPAWN: Final[int] = ... ERR_BASE: Final[int] = ... ERR_SIZE: Final[int] = ... ERR_DISP: Final[int] = ... ERR_ASSERT: Final[int] = ... ERR_LOCKTYPE: Final[int] = ... ERR_RMA_CONFLICT: Final[int] = ... ERR_RMA_SYNC: Final[int] = ... ERR_RMA_RANGE: Final[int] = ... ERR_RMA_ATTACH: Final[int] = ... ERR_RMA_SHARED: Final[int] = ... ERR_RMA_FLAVOR: Final[int] = ... ORDER_C: Final[int] = ... ORDER_FORTRAN: Final[int] = ... ORDER_F: Final[int] = ... TYPECLASS_INTEGER: Final[int] = ... TYPECLASS_REAL: Final[int] = ... TYPECLASS_COMPLEX: Final[int] = ... DISTRIBUTE_NONE: Final[int] = ... DISTRIBUTE_BLOCK: Final[int] = ... DISTRIBUTE_CYCLIC: Final[int] = ... DISTRIBUTE_DFLT_DARG: Final[int] = ... COMBINER_NAMED: Final[int] = ... COMBINER_DUP: Final[int] = ... COMBINER_CONTIGUOUS: Final[int] = ... COMBINER_VECTOR: Final[int] = ... COMBINER_HVECTOR: Final[int] = ... COMBINER_INDEXED: Final[int] = ... COMBINER_HINDEXED: Final[int] = ... COMBINER_INDEXED_BLOCK: Final[int] = ... COMBINER_HINDEXED_BLOCK: Final[int] = ... COMBINER_STRUCT: Final[int] = ... COMBINER_SUBARRAY: Final[int] = ... COMBINER_DARRAY: Final[int] = ... COMBINER_RESIZED: Final[int] = ... COMBINER_F90_REAL: Final[int] = ... COMBINER_F90_COMPLEX: Final[int] = ... COMBINER_F90_INTEGER: Final[int] = ... IDENT: Final[int] = ... CONGRUENT: Final[int] = ... SIMILAR: Final[int] = ... UNEQUAL: Final[int] = ... CART: Final[int] = ... GRAPH: Final[int] = ... DIST_GRAPH: Final[int] = ... UNWEIGHTED: Final[int] = ... WEIGHTS_EMPTY: Final[int] = ... COMM_TYPE_SHARED: Final[int] = ... BSEND_OVERHEAD: Final[int] = ... WIN_FLAVOR_CREATE: Final[int] = ... WIN_FLAVOR_ALLOCATE: Final[int] = ... WIN_FLAVOR_DYNAMIC: Final[int] = ... WIN_FLAVOR_SHARED: Final[int] = ... WIN_SEPARATE: Final[int] = ... WIN_UNIFIED: Final[int] = ... MODE_NOCHECK: Final[int] = ... MODE_NOSTORE: Final[int] = ... MODE_NOPUT: Final[int] = ... MODE_NOPRECEDE: Final[int] = ... MODE_NOSUCCEED: Final[int] = ... LOCK_EXCLUSIVE: Final[int] = ... LOCK_SHARED: Final[int] = ... MODE_RDONLY: Final[int] = ... MODE_WRONLY: Final[int] = ... MODE_RDWR: Final[int] = ... MODE_CREATE: Final[int] = ... MODE_EXCL: Final[int] = ... MODE_DELETE_ON_CLOSE: Final[int] = ... MODE_UNIQUE_OPEN: Final[int] = ... MODE_SEQUENTIAL: Final[int] = ... MODE_APPEND: Final[int] = ... SEEK_SET: Final[int] = ... SEEK_CUR: Final[int] = ... SEEK_END: Final[int] = ... DISPLACEMENT_CURRENT: Final[int] = ... DISP_CUR: Final[int] = ... THREAD_SINGLE: Final[int] = ... THREAD_FUNNELED: Final[int] = ... THREAD_SERIALIZED: Final[int] = ... THREAD_MULTIPLE: Final[int] = ... VERSION: Final[int] = ... SUBVERSION: Final[int] = ... MAX_PROCESSOR_NAME: Final[int] = ... MAX_ERROR_STRING: Final[int] = ... MAX_PORT_NAME: Final[int] = ... MAX_INFO_KEY: Final[int] = ... MAX_INFO_VAL: Final[int] = ... MAX_OBJECT_NAME: Final[int] = ... MAX_DATAREP_STRING: Final[int] = ... MAX_LIBRARY_VERSION_STRING: Final[int] = ... class Datatype: def __new__(cls, datatype: Optional[Datatype] = None) -> Datatype: ... def __eq__(self, other: object) -> bool: ... def __ne__(self, other: object) -> bool: ... def __bool__(self) -> bool: ... def Get_size(self) -> int: ... def Get_extent(self) -> Tuple[int, int]: ... def Dup(self) -> Datatype: ... def Create_contiguous(self, count: int) -> Datatype: ... def Create_vector(self, count: int, blocklength: int, stride: int) -> Datatype: ... def Create_hvector(self, count: int, blocklength: int, stride: int) -> Datatype: ... def Create_indexed(self, blocklengths: Sequence[int], displacements: Sequence[int]) -> Datatype: ... def Create_hindexed(self, blocklengths: Sequence[int], displacements: Sequence[int]) -> Datatype: ... def Create_indexed_block(self, blocklength: int, displacements: Sequence[int]) -> Datatype: ... def Create_hindexed_block(self, blocklength: int, displacements: Sequence[int]) -> Datatype: ... @classmethod def Create_struct(cls, blocklengths: Sequence[int], displacements: Sequence[int], datatypes: Sequence[Datatype]) -> Datatype: ... def Create_subarray(self, sizes: Sequence[int], subsizes: Sequence[int], starts: Sequence[int], order: int = ORDER_C) -> Datatype: ... def Create_darray(self, size: int, rank: int, gsizes: Sequence[int], distribs: Sequence[int], dargs: Sequence[int], psizes: Sequence[int], order: int = ORDER_C) -> Datatype: ... @classmethod def Create_f90_integer(cls, r: int) -> Datatype: ... @classmethod def Create_f90_real(cls, p: int, r: int) -> Datatype: ... @classmethod def Create_f90_complex(cls, p: int, r: int) -> Datatype: ... @classmethod def Match_size(cls, typeclass: int, size: int) -> Datatype: ... def Commit(self) -> Datatype: ... def Free(self) -> None: ... def Create_resized(self, lb: int, extent: int) -> Datatype: ... def Get_true_extent(self) -> Tuple[int, int]: ... def Get_envelope(self) -> Tuple[int, int, int, int]: ... def Get_contents(self) -> Tuple[List[int], List[int], List[Datatype]]: ... def decode(self) -> Tuple[Datatype, str, Dict[str, Any]]: ... def Pack(self, inbuf: BufSpec, outbuf: BufSpec, position: int, comm: Comm) -> int: ... def Unpack(self, inbuf: BufSpec, position: int, outbuf: BufSpec, comm: Comm) -> int: ... def Pack_size(self, count: int, comm: Comm) -> int: ... def Pack_external(self, datarep: str, inbuf: BufSpec, outbuf: BufSpec, position: int) -> int: ... def Unpack_external(self, datarep: str, inbuf: BufSpec, position: int, outbuf: BufSpec) -> int: ... def Pack_external_size(self, datarep: str, count: int) -> int: ... def Get_attr(self, keyval: int) -> Optional[Union[int, Any]]: ... def Set_attr(self, keyval: int, attrval: Any) -> None: ... def Delete_attr(self, keyval: int) -> None: ... @classmethod def Create_keyval(cls, copy_fn: Optional[Callable[[Datatype, int, Any], Any]] = None, delete_fn: Optional[Callable[[Datatype, int, Any], None]] = None, nopython: bool = False) -> int: ... @classmethod def Free_keyval(cls, keyval: int) -> int: ... def Get_name(self) -> str: ... def Set_name(self, name: str) -> None: ... def py2f(self) -> int: ... @classmethod def f2py(cls, arg: int) -> Datatype: ... size: int extent: int lb: int ub: int true_extent: int true_lb: int true_ub: int envelope: Tuple[int, int, int, int] contents: Tuple[List[int], List[int], List[Datatype]] combiner: int is_named: bool is_predefined: bool name: str Create_dup = Dup Resized = Create_resized DATATYPE_NULL: Final[Datatype] = ... UB: Final[Datatype] = ... LB: Final[Datatype] = ... PACKED: Final[Datatype] = ... BYTE: Final[Datatype] = ... AINT: Final[Datatype] = ... OFFSET: Final[Datatype] = ... COUNT: Final[Datatype] = ... CHAR: Final[Datatype] = ... WCHAR: Final[Datatype] = ... SIGNED_CHAR: Final[Datatype] = ... SHORT: Final[Datatype] = ... INT: Final[Datatype] = ... LONG: Final[Datatype] = ... LONG_LONG: Final[Datatype] = ... UNSIGNED_CHAR: Final[Datatype] = ... UNSIGNED_SHORT: Final[Datatype] = ... UNSIGNED: Final[Datatype] = ... UNSIGNED_LONG: Final[Datatype] = ... UNSIGNED_LONG_LONG: Final[Datatype] = ... FLOAT: Final[Datatype] = ... DOUBLE: Final[Datatype] = ... LONG_DOUBLE: Final[Datatype] = ... C_BOOL: Final[Datatype] = ... INT8_T: Final[Datatype] = ... INT16_T: Final[Datatype] = ... INT32_T: Final[Datatype] = ... INT64_T: Final[Datatype] = ... UINT8_T: Final[Datatype] = ... UINT16_T: Final[Datatype] = ... UINT32_T: Final[Datatype] = ... UINT64_T: Final[Datatype] = ... C_COMPLEX: Final[Datatype] = ... C_FLOAT_COMPLEX: Final[Datatype] = ... C_DOUBLE_COMPLEX: Final[Datatype] = ... C_LONG_DOUBLE_COMPLEX: Final[Datatype] = ... CXX_BOOL: Final[Datatype] = ... CXX_FLOAT_COMPLEX: Final[Datatype] = ... CXX_DOUBLE_COMPLEX: Final[Datatype] = ... CXX_LONG_DOUBLE_COMPLEX: Final[Datatype] = ... SHORT_INT: Final[Datatype] = ... INT_INT: Final[Datatype] = ... TWOINT: Final[Datatype] = ... LONG_INT: Final[Datatype] = ... FLOAT_INT: Final[Datatype] = ... DOUBLE_INT: Final[Datatype] = ... LONG_DOUBLE_INT: Final[Datatype] = ... CHARACTER: Final[Datatype] = ... LOGICAL: Final[Datatype] = ... INTEGER: Final[Datatype] = ... REAL: Final[Datatype] = ... DOUBLE_PRECISION: Final[Datatype] = ... COMPLEX: Final[Datatype] = ... DOUBLE_COMPLEX: Final[Datatype] = ... LOGICAL1: Final[Datatype] = ... LOGICAL2: Final[Datatype] = ... LOGICAL4: Final[Datatype] = ... LOGICAL8: Final[Datatype] = ... INTEGER1: Final[Datatype] = ... INTEGER2: Final[Datatype] = ... INTEGER4: Final[Datatype] = ... INTEGER8: Final[Datatype] = ... INTEGER16: Final[Datatype] = ... REAL2: Final[Datatype] = ... REAL4: Final[Datatype] = ... REAL8: Final[Datatype] = ... REAL16: Final[Datatype] = ... COMPLEX4: Final[Datatype] = ... COMPLEX8: Final[Datatype] = ... COMPLEX16: Final[Datatype] = ... COMPLEX32: Final[Datatype] = ... UNSIGNED_INT: Final[Datatype] = ... SIGNED_SHORT: Final[Datatype] = ... SIGNED_INT: Final[Datatype] = ... SIGNED_LONG: Final[Datatype] = ... SIGNED_LONG_LONG: Final[Datatype] = ... BOOL: Final[Datatype] = ... SINT8_T: Final[Datatype] = ... SINT16_T: Final[Datatype] = ... SINT32_T: Final[Datatype] = ... SINT64_T: Final[Datatype] = ... F_BOOL: Final[Datatype] = ... F_INT: Final[Datatype] = ... F_FLOAT: Final[Datatype] = ... F_DOUBLE: Final[Datatype] = ... F_COMPLEX: Final[Datatype] = ... F_FLOAT_COMPLEX: Final[Datatype] = ... F_DOUBLE_COMPLEX: Final[Datatype] = ... class Status: def __new__(cls, status: Optional[Status] = None) -> Status: ... def __eq__(self, other: object) -> bool: ... def __ne__(self, other: object) -> bool: ... def Get_source(self) -> int: ... def Set_source(self, source: int) -> None: ... def Get_tag(self) -> int: ... def Set_tag(self, tag: int) -> None: ... def Get_error(self) -> int: ... def Set_error(self, error: int) -> None: ... def Get_count(self, datatype: Datatype = BYTE) -> int: ... def Get_elements(self, datatype: Datatype) -> int: ... def Set_elements(self, datatype: Datatype, count: int) -> None: ... def Is_cancelled(self) -> bool: ... def Set_cancelled(self, flag: bool) -> None: ... def py2f(self) -> List[int]: ... @classmethod def f2py(cls, arg: List[int]) -> Status: ... source: int tag: int error: int count: int cancelled: bool class Request: def __new__(cls, request: Optional[Request] = None) -> Request: ... def __eq__(self, other: object) -> bool: ... def __ne__(self, other: object) -> bool: ... def __bool__(self) -> bool: ... def Wait(self, status: Optional[Status] = None) -> Literal[True]: ... def Test(self, status: Optional[Status] = None) -> bool: ... def Free(self) -> None: ... def Get_status(self, status: Optional[Status] = None) -> bool: ... @classmethod def Waitany(cls, requests: Sequence[Request], status: Optional[Status] = None) -> int: ... @classmethod def Testany(cls, requests: Sequence[Request], status: Optional[Status] = None) -> Tuple[int, bool]: ... @classmethod def Waitall(cls, requests: Sequence[Request], statuses: Optional[List[Status]] = None) -> Literal[True]: ... @classmethod def Testall(cls, requests: Sequence[Request], statuses: Optional[List[Status]] = None) -> bool: ... @classmethod def Waitsome(cls, requests: Sequence[Request], statuses: Optional[List[Status]] = None) -> Optional[List[int]]: ... @classmethod def Testsome(cls, requests: Sequence[Request], statuses: Optional[List[Status]] = None) -> Optional[List[int]]: ... def Cancel(self) -> None: ... def py2f(self) -> int: ... @classmethod def f2py(cls, arg: int) -> Request: ... def wait(self, status: Optional[Status] = None) -> Any: ... def test(self, status: Optional[Status] = None) -> Tuple[bool, Optional[Any]]: ... def get_status(self, status: Optional[Status] = None) -> bool: ... @classmethod def waitany(cls, requests: Sequence[Request], status: Optional[Status] = None) -> Tuple[int, Any]: ... @classmethod def testany(cls, requests: Sequence[Request], status: Optional[Status] = None) -> Tuple[int, bool, Optional[Any]]: ... @classmethod def waitall(cls, requests: Sequence[Request], statuses: Optional[List[Status]] = None) -> List[Any]: ... @classmethod def testall(cls, requests: Sequence[Request], statuses: Optional[List[Status]] = None) -> Tuple[bool, Optional[List[Any]]]: ... @classmethod def waitsome(cls, requests: Sequence[Request], statuses: Optional[List[Status]] = None) -> Tuple[Optional[List[int]], Optional[List[Any]]]: ... @classmethod def testsome(cls, requests: Sequence[Request], statuses: Optional[List[Status]] = None) -> Tuple[Optional[List[int]], Optional[List[Any]]]: ... def cancel(self) -> None: ... REQUEST_NULL: Final[Request] = ... class Prequest(Request): def __new__(cls, request: Optional[Request] = None) -> Prequest: ... def Start(self) -> None: ... @classmethod def Startall(cls, requests: List[Prequest]) -> None: ... class Grequest(Request): def __new__(cls, request: Optional[Request] = None) -> Grequest: ... @classmethod def Start(cls, query_fn: Callable[..., None], free_fn: Callable[..., None], cancel_fn: Callable[..., None], args: Optional[Tuple[Any]] = None, kargs: Optional[Dict[str, Any]] = None) -> Grequest: ... def Complete(self) -> None: ... class Message: def __new__(cls, message: Optional[Message] = None) -> Message: ... def __eq__(self, other: object) -> bool: ... def __ne__(self, other: object) -> bool: ... def __bool__(self) -> bool: ... @classmethod def Probe(cls, comm: Comm, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Optional[Status] = None) -> Message: ... @classmethod def Iprobe(cls, comm: Comm, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Optional[Status] = None) -> Optional[Message]: ... def Recv(self, buf: BufSpec, status: Optional[Status] = None) -> None: ... def Irecv(self, buf: BufSpec) -> Request: ... @classmethod def probe(cls, comm: Comm, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Optional[Status] = None) -> Message: ... @classmethod def iprobe(cls, comm: Comm, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Optional[Status] = None) -> Optional[Message]: ... def recv(self, status: Optional[Status] = None) -> Any: ... def irecv(self) -> Request: ... def py2f(self) -> int: ... @classmethod def f2py(cls, arg: int) -> Message: ... MESSAGE_NULL: Final[Message] = ... MESSAGE_NO_PROC: Final[Message] = ... class Op: def __new__(cls, op: Optional[Op] = None) -> Op: ... def __call__(self, x: Any, y: Any) -> Any: ... def __eq__(self, other: object) -> bool: ... def __ne__(self, other: object) -> bool: ... def __bool__(self) -> bool: ... @classmethod def Create(cls, function: Callable[[Buffer, Buffer, Datatype], None], commute: bool = False) -> Op: ... def Free(self) -> None: ... def Is_commutative(self) -> bool: ... def Reduce_local(self, inbuf: BufSpec, inoutbuf: BufSpec) -> None: ... def py2f(self) -> int: ... @classmethod def f2py(cls, arg: int) -> Op: ... is_commutative: bool is_predefined: bool OP_NULL: Final[Op] = ... MAX: Final[Op] = ... MIN: Final[Op] = ... SUM: Final[Op] = ... PROD: Final[Op] = ... LAND: Final[Op] = ... BAND: Final[Op] = ... LOR: Final[Op] = ... BOR: Final[Op] = ... LXOR: Final[Op] = ... BXOR: Final[Op] = ... MAXLOC: Final[Op] = ... MINLOC: Final[Op] = ... REPLACE: Final[Op] = ... NO_OP: Final[Op] = ... class Group: def __new__(cls, group: Optional[Group] = None) -> Group: ... def __eq__(self, other: object) -> bool: ... def __ne__(self, other: object) -> bool: ... def __bool__(self) -> bool: ... def Get_size(self) -> int: ... def Get_rank(self) -> int: ... @classmethod def Translate_ranks(cls, group1: Group, ranks1: Sequence[int], group2: Optional[Group] = None) -> List[int]: ... @classmethod def Compare(cls, group1: Group, group2: Group) -> int: ... def Dup(self) -> Group: ... @classmethod def Union(cls, group1: Group, group2: Group) -> Group: ... @classmethod def Intersection(cls, group1: Group, group2: Group) -> Group: ... @classmethod def Difference(cls, group1: Group, group2: Group) -> Group: ... def Incl(self, ranks: Sequence[int]) -> Group: ... def Excl(self, ranks: Sequence[int]) -> Group: ... def Range_incl(self, ranks: Sequence[Tuple[int, int, int]]) -> Group: ... def Range_excl(self, ranks: Sequence[Tuple[int, int, int]]) -> Group: ... def Free(self) -> None: ... def py2f(self) -> int: ... @classmethod def f2py(cls, arg: int) -> Group: ... size: int rank: int Intersect = Intersection GROUP_NULL: Final[Group] = ... GROUP_EMPTY: Final[Group] = ... class Info: def __new__(cls, info: Optional[Info] = None) -> Info: ... def __eq__(self, other: object) -> bool: ... def __ne__(self, other: object) -> bool: ... def __iter__(self) -> Iterator[str]: ... def __bool__(self) -> bool: ... def __len__(self) -> int: ... def __getitem__(self, item: str) -> str: ... def __setitem__(self, item: str, value: str) -> None: ... def __delitem__(self, item: str) -> None: ... def __contains__(self, value: str) -> bool: ... @classmethod def Create(cls) -> Info: ... def Free(self) -> None: ... def Dup(self) -> Info: ... def Get(self, key: str, maxlen: int = -1) -> Optional[str]: ... def Set(self, key: str, value: str) -> None: ... def Delete(self, key: str) -> None: ... def Get_nkeys(self) -> int: ... def Get_nthkey(self, n: int) -> str: ... def py2f(self) -> int: ... @classmethod def f2py(cls, arg: int) -> Info: ... def get(self, key: str, default: Optional[str] = None) -> Optional[str]: ... def keys(self) -> List[str]: ... def values(self) -> List[str]: ... def items(self) -> List[Tuple[str, str]]: ... def update(self, other: Union[Info, Mapping[str, str], Iterable[Tuple[str, str]]] = (), **kwds: str) -> None: ... def pop(self, key: str, *default: str) -> str: ... def popitem(self) -> Tuple[str, str]: ... def copy(self) -> Info: ... def clear(self) -> None: ... INFO_NULL: Final[Info] = ... INFO_ENV: Final[Info] = ... class Errhandler: def __new__(cls, errhandler: Optional[Errhandler] = None) -> Errhandler: ... def __eq__(self, other: object) -> bool: ... def __ne__(self, other: object) -> bool: ... def __bool__(self) -> bool: ... def Free(self) -> None: ... def py2f(self) -> int: ... @classmethod def f2py(cls, arg: int) -> Errhandler: ... ERRHANDLER_NULL: Final[Errhandler] = ... ERRORS_RETURN: Final[Errhandler] = ... ERRORS_ARE_FATAL: Final[Errhandler] = ... class Comm: def __new__(cls, comm: Optional[Comm] = None) -> Comm: ... def __eq__(self, other: object) -> bool: ... def __ne__(self, other: object) -> bool: ... def __bool__(self) -> bool: ... def Get_group(self) -> Group: ... def Get_size(self) -> int: ... def Get_rank(self) -> int: ... @classmethod def Compare(cls, comm1: Comm, comm2: Comm) -> int: ... def Clone(self) -> Comm: ... def Dup(self, info: Optional[Info] = None) -> Comm: ... def Dup_with_info(self, info: Info) -> Comm: ... def Idup(self) -> Tuple[Comm, Request]: ... def Create(self, group: Group) -> Comm: ... def Create_group(self, group: Group, tag: int = 0) -> Comm: ... def Split(self, color: int = 0, key: int = 0) -> Comm: ... def Split_type(self, split_type: int, key: int = 0, info: Info = INFO_NULL) -> Comm: ... def Free(self) -> None: ... def Set_info(self, info: Info) -> None: ... def Get_info(self) -> Info: ... def Send(self, buf: BufSpec, dest: int, tag: int = 0) -> None: ... def Recv(self, buf: BufSpec, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Optional[Status] = None) -> None: ... def Sendrecv(self, sendbuf: BufSpec, dest: int, sendtag: int = 0, recvbuf: BufSpec = None, source: int = ANY_SOURCE, recvtag: int = ANY_TAG, status: Optional[Status] = None) -> None: ... def Sendrecv_replace(self, buf: BufSpec, dest: int, sendtag: int = 0, source: int = ANY_SOURCE, recvtag: int = ANY_TAG, status: Optional[Status] = None) -> None: ... def Isend(self, buf: BufSpec, dest: int, tag: int = 0) -> Request: ... def Irecv(self, buf: BufSpec, source: int = ANY_SOURCE, tag: int = ANY_TAG) -> Request: ... def Probe(self, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Optional[Status] = None) -> Literal[True]: ... def Iprobe(self, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Optional[Status] = None) -> bool: ... def Mprobe(self, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Optional[Status] = None) -> Message: ... def Improbe(self, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Optional[Status] = None) -> Optional[Message]: ... def Send_init(self, buf: BufSpec, dest: int, tag: int = 0) -> Prequest: ... def Recv_init(self, buf: BufSpec, source: int = ANY_SOURCE, tag: int = ANY_TAG) -> Prequest: ... def Bsend(self, buf: BufSpec, dest: int, tag: int = 0) -> None: ... def Ssend(self, buf: BufSpec, dest: int, tag: int = 0) -> None: ... def Rsend(self, buf: BufSpec, dest: int, tag: int = 0) -> None: ... def Ibsend(self, buf: BufSpec, dest: int, tag: int = 0) -> Request: ... def Issend(self, buf: BufSpec, dest: int, tag: int = 0) -> Request: ... def Irsend(self, buf: BufSpec, dest: int, tag: int = 0) -> Request: ... def Bsend_init(self, buf: BufSpec, dest: int, tag: int = 0) -> Request: ... def Ssend_init(self, buf: BufSpec, dest: int, tag: int = 0) -> Request: ... def Rsend_init(self, buf: BufSpec, dest: int, tag: int = 0) -> Request: ... def Barrier(self) -> None: ... def Bcast(self, buf: BufSpec, root: int = 0) -> None: ... def Gather(self, sendbuf: Union[BufSpec, InPlace], recvbuf: Optional[BufSpecB], root: int = 0) -> None: ... def Gatherv(self, sendbuf: Union[BufSpec, InPlace], recvbuf: Optional[BufSpecV], root: int = 0) -> None: ... def Scatter(self, sendbuf: Optional[BufSpecB], recvbuf: Union[BufSpec, InPlace], root: int = 0) -> None: ... def Scatterv(self, sendbuf: Optional[BufSpecV], recvbuf: Union[BufSpec, InPlace], root: int = 0) -> None: ... def Allgather(self, sendbuf: Union[BufSpec, InPlace], recvbuf: BufSpecB) -> None: ... def Allgatherv(self, sendbuf: Union[BufSpec, InPlace], recvbuf: BufSpecV) -> None: ... def Alltoall(self, sendbuf: Union[BufSpecB, InPlace], recvbuf: BufSpecB) -> None: ... def Alltoallv(self, sendbuf: Union[BufSpecV, InPlace], recvbuf: BufSpecV) -> None: ... def Alltoallw(self, sendbuf: Union[BufSpecW, InPlace], recvbuf: BufSpecW) -> None: ... def Reduce(self, sendbuf: Union[BufSpec, InPlace], recvbuf: Optional[BufSpec], op: Op = SUM, root: int = 0) -> None: ... def Allreduce(self, sendbuf: Union[BufSpec, InPlace], recvbuf: BufSpec, op: Op = SUM) -> None: ... def Reduce_scatter_block(self, sendbuf: Union[BufSpecB, InPlace], recvbuf: Union[BufSpec, BufSpecB], op: Op = SUM) -> None: ... def Reduce_scatter(self, sendbuf: Union[BufSpec, InPlace], recvbuf: BufSpec, recvcounts: Optional[Sequence[int]] = None, op: Op = SUM) -> None: ... def Ibarrier(self) -> Request: ... def Ibcast(self, buf: BufSpec, root: int = 0) -> Request: ... def Igather(self, sendbuf: Union[BufSpec, InPlace], recvbuf: Optional[BufSpecB], root: int = 0) -> Request: ... def Igatherv(self, sendbuf: Union[BufSpec, InPlace], recvbuf: Optional[BufSpecV], root: int = 0) -> Request: ... def Iscatter(self, sendbuf: Optional[BufSpecB], recvbuf: Union[BufSpec, InPlace], root: int = 0) -> Request: ... def Iscatterv(self, sendbuf: Optional[BufSpecV], recvbuf: Union[BufSpec, InPlace], root: int = 0) -> Request: ... def Iallgather(self, sendbuf: Union[BufSpec, InPlace], recvbuf: BufSpecB) -> Request: ... def Iallgatherv(self, sendbuf: Union[BufSpec, InPlace], recvbuf: BufSpecV) -> Request: ... def Ialltoall(self, sendbuf: Union[BufSpecB, InPlace], recvbuf: BufSpecB) -> Request: ... def Ialltoallv(self, sendbuf: Union[BufSpecV, InPlace], recvbuf: BufSpecV) -> Request: ... def Ialltoallw(self, sendbuf: Union[BufSpecW, InPlace], recvbuf: BufSpecW) -> Request: ... def Ireduce(self, sendbuf: Union[BufSpec, InPlace], recvbuf: Optional[BufSpec], op: Op = SUM, root: int = 0) -> Request: ... def Iallreduce(self, sendbuf: Union[BufSpec, InPlace], recvbuf: BufSpec, op: Op = SUM) -> Request: ... def Ireduce_scatter_block(self, sendbuf: Union[BufSpecB, InPlace], recvbuf: Union[BufSpec, BufSpecB], op: Op = SUM) -> Request: ... def Ireduce_scatter(self, sendbuf: Union[BufSpec, InPlace], recvbuf: BufSpec, recvcounts: Optional[Sequence[int]] = None, op: Op = SUM) -> Request: ... def Is_inter(self) -> bool: ... def Is_intra(self) -> bool: ... def Get_topology(self) -> int: ... @classmethod def Get_parent(cls) -> Intercomm: ... def Disconnect(self) -> None: ... @classmethod def Join(cls, fd: int) -> Intercomm: ... def Get_attr(self, keyval: int) -> Optional[Union[int, Any]]: ... def Set_attr(self, keyval: int, attrval: Any) -> None: ... def Delete_attr(self, keyval: int) -> None: ... @classmethod def Create_keyval(cls, copy_fn: Optional[Callable[[Comm, int, Any], Any]] = None, delete_fn: Optional[Callable[[Comm, int, Any], None]] = None, nopython: bool = False) -> int: ... @classmethod def Free_keyval(cls, keyval: int) -> int: ... def Get_errhandler(self) -> Errhandler: ... def Set_errhandler(self, errhandler: Errhandler) -> None: ... def Call_errhandler(self, errorcode: int) -> None: ... def Abort(self, errorcode: int = 0) -> NoReturn: ... def Get_name(self) -> str: ... def Set_name(self, name: str) -> None: ... def py2f(self) -> int: ... @classmethod def f2py(cls, arg: int) -> Comm: ... def send(self, obj: Any, dest: int, tag: int = 0) -> None: ... def bsend(self, obj: Any, dest: int, tag: int = 0) -> None: ... def ssend(self, obj: Any, dest: int, tag: int = 0) -> None: ... def recv(self, buf: Optional[Buffer] = None, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Optional[Status] = None) -> Any: ... def sendrecv(self, sendobj: Any, dest: int, sendtag: int = 0, recvbuf: Optional[Buffer] = None, source: int = ANY_SOURCE, recvtag: int = ANY_TAG, status: Optional[Status] = None) -> Any: ... def isend(self, obj: Any, dest: int, tag: int = 0) -> Request: ... def ibsend(self, obj: Any, dest: int, tag: int = 0) -> Request: ... def issend(self, obj: Any, dest: int, tag: int = 0) -> Request: ... def irecv(self, buf: Optional[Buffer] = None, source: int = ANY_SOURCE, tag: int = ANY_TAG) -> Request: ... def probe(self, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Optional[Status] = None) -> Literal[True]: ... def iprobe(self, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Optional[Status] = None) -> bool: ... def mprobe(self, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Optional[Status] = None) -> Message: ... def improbe(self, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Optional[Status] = None) -> Optional[Message]: ... def barrier(self) -> None: ... def bcast(self, obj: Any, root: int = 0) -> Any: ... def gather(self, sendobj: Any, root: int = 0) -> Optional[List[Any]]: ... def scatter(self, sendobj: Sequence[Any], root: int = 0) -> Any: ... def allgather(self, sendobj: Any) -> List[Any]: ... def alltoall(self, sendobj: Sequence[Any]) -> List[Any]: ... def reduce(self, sendobj: Any, op: Union[Op, Callable[[Any, Any], Any]] = SUM, root: int = 0) -> Optional[Any]: ... def allreduce(self, sendobj: Any, op: Union[Op, Callable[[Any, Any], Any]] = SUM) -> Any: ... group: Group size: int rank: int info: Info is_inter: bool is_intra: bool topology: int is_topo: bool name: str COMM_NULL: Final[Comm] = ... class Intracomm(Comm): def __new__(cls, comm: Optional[Comm] = None) -> Intracomm: ... def Create_cart(self, dims: Sequence[int], periods: Optional[Sequence[bool]] = None, reorder: bool = False) -> Cartcomm: ... def Create_graph(self, index: Sequence[int], edges: Sequence[int], reorder: bool = False) -> Graphcomm: ... def Create_dist_graph_adjacent(self, sources: Sequence[int], destinations: Sequence[int], sourceweights: Optional[Sequence[int]] = None, destweights: Optional[Sequence[int]] = None, info: Info = INFO_NULL, reorder: bool = False) -> Distgraphcomm: ... def Create_dist_graph(self, sources: Sequence[int], degrees: Sequence[int], destinations: Sequence[int], weights: Optional[Sequence[int]] = None, info: Info = INFO_NULL, reorder: bool = False) -> Distgraphcomm: ... def Create_intercomm(self, local_leader: int, peer_comm: Intracomm, remote_leader: int, tag: int = 0) -> Intercomm: ... def Cart_map(self, dims: Sequence[int], periods: Optional[Sequence[bool]] = None) -> int: ... def Graph_map(self, index: Sequence[int], edges: Sequence[int]) -> int: ... def Scan(self, sendbuf: Union[BufSpec, InPlace], recvbuf: BufSpec, op: Op = SUM) -> None: ... def Exscan(self, sendbuf: Union[BufSpec, InPlace], recvbuf: BufSpec, op: Op = SUM) -> None: ... def Iscan(self, sendbuf: Union[BufSpec, InPlace], recvbuf: BufSpec, op: Op = SUM) -> Request: ... def Iexscan(self, sendbuf: Union[BufSpec, InPlace], recvbuf: BufSpec, op: Op = SUM) -> Request: ... def scan(self, sendobj: Any, op: Union[Op, Callable[[Any, Any], Any]] = SUM) -> Any: ... def exscan(self, sendobj: Any, op: Union[Op, Callable[[Any, Any], Any]] = SUM) -> Any: ... def Spawn(self, command: str, args: Optional[Sequence[str]] = None, maxprocs: int = 1, info: Info = INFO_NULL, root: int = 0, errcodes: Optional[list] = None) -> Intercomm: ... def Spawn_multiple(self, command: Sequence[str], args: Optional[Sequence[Sequence[str]]] = None, maxprocs: Optional[Sequence[int]] = None, info: Union[Info, Sequence[Info]] = INFO_NULL, root: int = 0, errcodes: Optional[list] = None) -> Intercomm: ... def Accept(self, port_name: str, info: Info = INFO_NULL, root: int = 0) -> Intercomm: ... def Connect(self, port_name: str, info: Info = INFO_NULL, root: int = 0) -> Intercomm: ... COMM_SELF: Final[Intracomm] = ... COMM_WORLD: Final[Intracomm] = ... class Topocomm(Intracomm): def __new__(cls, comm: Optional[Comm] = None) -> Topocomm: ... def Neighbor_allgather(self, sendbuf: BufSpec, recvbuf: BufSpecB) -> None: ... def Neighbor_allgatherv(self, sendbuf: BufSpec, recvbuf: BufSpecV) -> None: ... def Neighbor_alltoall(self, sendbuf: BufSpecB, recvbuf: BufSpecB) -> None: ... def Neighbor_alltoallv(self, sendbuf: BufSpecV, recvbuf: BufSpecV) -> None: ... def Neighbor_alltoallw(self, sendbuf: BufSpecW, recvbuf: BufSpecW) -> None: ... def Ineighbor_allgather(self, sendbuf: BufSpec, recvbuf: BufSpecB) -> Request: ... def Ineighbor_allgatherv(self, sendbuf: BufSpec, recvbuf: BufSpecV) -> Request: ... def Ineighbor_alltoall(self, sendbuf: BufSpecB, recvbuf: BufSpecB) -> Request: ... def Ineighbor_alltoallv(self, sendbuf: BufSpecV, recvbuf: BufSpecV) -> Request: ... def Ineighbor_alltoallw(self, sendbuf: BufSpecW, recvbuf: BufSpecW) -> Request: ... def neighbor_allgather(self, sendobj: Any) -> List[Any]: ... def neighbor_alltoall(self, sendobj: List[Any]) -> List[Any]: ... degrees: Tuple[int, int] indegree: int outdegree: int inoutedges: Tuple[List[int], List[int]] inedges: List[int] outedges: List[int] class Cartcomm(Topocomm): def __new__(cls, comm: Optional[Comm] = None) -> Cartcomm: ... def Get_dim(self) -> int: ... def Get_topo(self) -> Tuple[List[int], List[int], List[int]]: ... def Get_cart_rank(self, coords: Sequence[int]) -> int: ... def Get_coords(self, rank: int) -> List[int]: ... def Shift(self, direction: int, disp: int) -> Tuple[int, int]: ... def Sub(self, remain_dims: Sequence[bool]) -> Cartcomm: ... dim: int ndim: int topo: Tuple[List[int], List[int], List[int]] dims: List[int] periods: List[int] coords: List[int] class Graphcomm(Topocomm): def __new__(cls, comm: Optional[Comm] = None) -> Graphcomm: ... def Get_dims(self) -> Tuple[int, int]: ... def Get_topo(self) -> Tuple[List[int], List[int]]: ... def Get_neighbors_count(self, rank: int) -> int: ... def Get_neighbors(self, rank: int) -> List[int]: ... dims: Tuple[int, int] nnodes: int nedges: int topo: Tuple[List[int], List[int]] index: List[int] edges: List[int] nneighbors: int neighbors: List[int] class Distgraphcomm(Topocomm): def __new__(cls, comm: Optional[Comm] = None) -> Distgraphcomm: ... def Get_dist_neighbors_count(self) -> int: ... def Get_dist_neighbors(self) -> Tuple[List[int], List[int], Optional[Tuple[List[int], List[int]]]]: ... class Intercomm(Comm): def __new__(cls, comm: Optional[Comm] = None) -> Intercomm: ... def Get_remote_group(self) -> Group: ... def Get_remote_size(self) -> int: ... def Merge(self, high: bool = False) -> Intracomm: ... remote_group: Group remote_size: int class Win: def __new__(cls, win: Optional[Win] = None) -> Win: ... def __eq__(self, other: object) -> bool: ... def __ne__(self, other: object) -> bool: ... def __bool__(self) -> bool: ... @classmethod def Create(cls, memory: Union[Buffer, Bottom, None], disp_unit: int = 1, info: Info = INFO_NULL, comm: Intracomm = COMM_SELF) -> Win: ... @classmethod def Allocate(cls, size: int, disp_unit: int = 1, info: Info = INFO_NULL, comm: Intracomm = COMM_SELF) -> Win: ... @classmethod def Allocate_shared(cls, size: int, disp_unit: int = 1, info: Info = INFO_NULL, comm: Intracomm = COMM_SELF) -> Win: ... def Shared_query(self, rank: int) -> Tuple[memory, int]: ... @classmethod def Create_dynamic(cls, info: Info = INFO_NULL, comm: Intracomm = COMM_SELF) -> Win: ... def Attach(self, memory: Buffer) -> None: ... def Detach(self, memory: Buffer) -> None: ... def Free(self) -> None: ... def Set_info(self, info: Info) -> None: ... def Get_info(self) -> Info: ... def Get_group(self) -> Group: ... def Get_attr(self, keyval: int) -> Optional[Union[int, Any]]: ... def Set_attr(self, keyval: int, attrval: Any) -> None: ... def Delete_attr(self, keyval: int) -> None: ... @classmethod def Create_keyval(cls, copy_fn: Optional[Callable[[Win, int, Any], Any]] = None, delete_fn: Optional[Callable[[Win, int, Any], None]] = None, nopython: bool = False) -> int: ... @classmethod def Free_keyval(cls, keyval: int) -> int: ... def tomemory(self) -> memory: ... def Put(self, origin: BufSpec, target_rank: int, target: Optional[TargetSpec] = None) -> None: ... def Get(self, origin: BufSpec, target_rank: int, target: Optional[TargetSpec] = None) -> None: ... def Accumulate(self, origin: BufSpec, target_rank: int, target: Optional[TargetSpec] = None, op: Op = SUM) -> None: ... def Get_accumulate(self, origin: BufSpec, result: BufSpec, target_rank: int, target: Optional[TargetSpec] = None, op: Op = SUM) -> None: ... def Fetch_and_op(self, origin: BufSpec, result: BufSpec, target_rank: int, target_disp: int = 0, op: Op = SUM) -> None: ... def Compare_and_swap(self, origin: BufSpec, compare: BufSpec, result: BufSpec, target_rank: int, target_disp: int = 0) -> None: ... def Rput(self, origin: BufSpec, target_rank: int, target: Optional[TargetSpec] = None) -> Request: ... def Rget(self, origin: BufSpec, target_rank: int, target: Optional[TargetSpec] = None) -> Request: ... def Raccumulate(self, origin: BufSpec, target_rank: int, target: Optional[TargetSpec] = None, op: Op = SUM) -> Request: ... def Rget_accumulate(self, origin: BufSpec, result: BufSpec, target_rank: int, target: Optional[TargetSpec] = None, op: Op = SUM) -> Request: ... def Fence(self, assertion: int = 0) -> None: ... def Start(self, group: Group, assertion: int = 0) -> None: ... def Complete(self) -> None: ... def Post(self, group: Group, assertion: int = 0) -> None: ... def Wait(self) -> Literal[True]: ... def Test(self) -> bool: ... def Lock(self, rank: int, lock_type: int = LOCK_EXCLUSIVE, assertion: int = 0) -> None: ... def Unlock(self, rank: int) -> None: ... def Lock_all(self, assertion: int = 0) -> None: ... def Unlock_all(self) -> None: ... def Flush(self, rank: int) -> None: ... def Flush_all(self) -> None: ... def Flush_local(self, rank: int) -> None: ... def Flush_local_all(self) -> None: ... def Sync(self) -> None: ... def Get_errhandler(self) -> Errhandler: ... def Set_errhandler(self, errhandler: Errhandler) -> None: ... def Call_errhandler(self, errorcode: int) -> None: ... def Get_name(self) -> str: ... def Set_name(self, name: str) -> None: ... def py2f(self) -> int: ... @classmethod def f2py(cls, arg: int) -> Win: ... info: Info group: Group attrs: Tuple[int, int, int] flavor: int model: int name: str WIN_NULL: Final[Win] = ... class File: def __new__(cls, file: Optional[File] = None) -> File: ... def __eq__(self, other: object) -> bool: ... def __ne__(self, other: object) -> bool: ... def __bool__(self) -> bool: ... @classmethod def Open(cls, comm: Intracomm, filename: str, amode: int = MODE_RDONLY, info: Info = INFO_NULL) -> File: ... def Close(self) -> None: ... @classmethod def Delete(cls, filename: str, info: Info = INFO_NULL) -> None: ... def Set_size(self, size: int) -> None: ... def Preallocate(self, size: int) -> None: ... def Get_size(self) -> int: ... def Get_amode(self) -> int: ... def Get_group(self) -> Group: ... def Set_info(self, info: Info) -> None: ... def Get_info(self) -> Info: ... def Set_view(self, disp: int = 0, etype: Datatype = BYTE, filetype: Optional[Datatype] = None, datarep: str = 'native', info: Info = INFO_NULL) -> None: ... def Get_view(self) -> Tuple[int, Datatype, Datatype, str]: ... def Read_at(self, offset: int, buf: BufSpec, status: Optional[Status] = None) -> None: ... def Read_at_all(self, offset: int, buf: BufSpec, status: Optional[Status] = None) -> None: ... def Write_at(self, offset: int, buf: BufSpec, status: Optional[Status] = None) -> None: ... def Write_at_all(self, offset: int, buf: BufSpec, status: Optional[Status] = None) -> None: ... def Iread_at(self, offset: int, buf: BufSpec) -> Request: ... def Iread_at_all(self, offset: int, buf: BufSpec) -> Request: ... def Iwrite_at(self, offset: int, buf: BufSpec) -> Request: ... def Iwrite_at_all(self, offset: int, buf: BufSpec) -> Request: ... def Read(self, buf: BufSpec, status: Optional[Status] = None) -> None: ... def Read_all(self, buf: BufSpec, status: Optional[Status] = None) -> None: ... def Write(self, buf: BufSpec, status: Optional[Status] = None) -> None: ... def Write_all(self, buf: BufSpec, status: Optional[Status] = None) -> None: ... def Iread(self, buf: BufSpec) -> Request: ... def Iread_all(self, buf: BufSpec) -> Request: ... def Iwrite(self, buf: BufSpec) -> Request: ... def Iwrite_all(self, buf: BufSpec) -> Request: ... def Seek(self, offset: int, whence: int = SEEK_SET) -> None: ... def Get_position(self) -> int: ... def Get_byte_offset(self, offset: int) -> int: ... def Read_shared(self, buf: BufSpec, status: Optional[Status] = None) -> None: ... def Write_shared(self, buf: BufSpec, status: Optional[Status] = None) -> None: ... def Iread_shared(self, buf: BufSpec) -> Request: ... def Iwrite_shared(self, buf: BufSpec) -> Request: ... def Read_ordered(self, buf: BufSpec, status: Optional[Status] = None) -> None: ... def Write_ordered(self, buf: BufSpec, status: Optional[Status] = None) -> None: ... def Seek_shared(self, offset: int, whence: int = SEEK_SET) -> None: ... def Get_position_shared(self) -> int: ... def Read_at_all_begin(self, offset: int, buf: BufSpec) -> None: ... def Read_at_all_end(self, buf: BufSpec, status: Optional[Status] = None) -> None: ... def Write_at_all_begin(self, offset: int, buf: BufSpec) -> None: ... def Write_at_all_end(self, buf: BufSpec, status: Optional[Status] = None) -> None: ... def Read_all_begin(self, buf: BufSpec) -> None: ... def Read_all_end(self, buf: BufSpec, status: Optional[Status] = None) -> None: ... def Write_all_begin(self, buf: BufSpec) -> None: ... def Write_all_end(self, buf: BufSpec, status: Optional[Status] = None) -> None: ... def Read_ordered_begin(self, buf: BufSpec) -> None: ... def Read_ordered_end(self, buf: BufSpec, status: Optional[Status] = None) -> None: ... def Write_ordered_begin(self, buf: BufSpec) -> None: ... def Write_ordered_end(self, buf: BufSpec, status: Optional[Status] = None) -> None: ... def Get_type_extent(self, datatype: Datatype) -> int: ... def Set_atomicity(self, flag: bool) -> None: ... def Get_atomicity(self) -> bool: ... def Sync(self) -> None: ... def Get_errhandler(self) -> Errhandler: ... def Set_errhandler(self, errhandler: Errhandler) -> None: ... def Call_errhandler(self, errorcode: int) -> None: ... def py2f(self) -> int: ... @classmethod def f2py(cls, arg: int) -> File: ... size: int amode: int group: Group info: Info atomicity: bool FILE_NULL: Final[File] = ... class memory: @overload def __new__(cls) -> memory: ... @overload def __new__(cls, buf: Buffer, /) -> memory: ... def __len__(self) -> int: ... @overload def __getitem__(self, item: int) -> int: ... @overload def __getitem__(self, item: slice) -> memory: ... @overload def __setitem__(self, item: int, value: int) -> None: ... @overload def __setitem__(self, item: slice, value: Buffer) -> None: ... @staticmethod def allocate(nbytes: int, clear: bool = False) -> memory: ... @staticmethod def frombuffer(obj: Buffer, readonly: bool = False) -> memory: ... @staticmethod def fromaddress(address: int, nbytes: int, readonly: bool = False) -> memory: ... def tobytes(self, order: Optional[str] = None) -> bytes: ... def toreadonly(self) -> memory: ... def release(self) -> None: ... address: int obj: Optional[Buffer] nbytes: int readonly: bool format: str itemsize: int class Pickle: @overload def __init__(self, dumps: Callable[[Any, int], bytes], loads: Callable[[Buffer], Any], protocol: Optional[int] = None, ) -> None: ... @overload def __init__(self, dumps: Optional[Callable[[Any], bytes]] = None, loads: Optional[Callable[[Buffer], Any]] = None, ) -> None: ... def dumps(self, obj: Any, buffer_callback: Optional[Callable[[Buffer], Any]] = None) -> bytes: ... def loads(self, data: Buffer, buffers: Optional[Iterable[Buffer]] = None) -> Any: ... PROTOCOL: Optional[int] pickle: Final[Pickle] = ... class Exception(RuntimeError): def __init__(self, ierr: int = SUCCESS) -> None: ... def __eq__(self, other: object) -> bool: ... def __ne__(self, other: object) -> bool: ... def __hash__(self) -> int: ... def __bool__(self) -> bool: ... def __int__(self) -> int: ... def __repr__(self) -> str: ... def __str__(self) -> str: ... def Get_error_code(self) -> int: ... error_code: int def Get_error_class(self) -> int: ... error_class: int def Get_error_string(self) -> str: ... error_string: str def Get_error_class(errorcode: int) -> int: ... def Get_error_string(errorcode: int) -> str: ... def Add_error_class() -> int: ... def Add_error_code(errorclass: int) -> int: ... def Add_error_string(errorcode: int, string: str) -> None: ... def Get_address(location: Union[Buffer, Bottom]) -> int: ... def Aint_add(base: int, disp: int) -> int: ... def Aint_diff(addr1: int, addr2: int) -> int: ... def Compute_dims(nnodes: int, dims: Union[int, Sequence[int]]) -> List[int]: ... def Attach_buffer(buf: Buffer) -> None: ... def Detach_buffer() -> Buffer: ... def Open_port(info: Info = INFO_NULL) -> str: ... def Close_port(port_name: str) -> None: ... def Publish_name(service_name: str, port_name: str, info: Info = INFO_NULL) -> None: ... def Unpublish_name(service_name: str, port_name: str, info: Info = INFO_NULL) -> None: ... def Lookup_name(service_name: str, info: Info = INFO_NULL) -> str: ... def Register_datarep(datarep: str, read_fn: Callable[[Buffer, Datatype, int, Buffer, int], None], write_fn: Callable[[Buffer, Datatype, int, Buffer, int], None], extent_fn: Callable[[Datatype], int]) -> None: ... def Alloc_mem(size: int, info: Info = INFO_NULL) -> memory: ... def Free_mem(mem: memory) -> None: ... def Init() -> None: ... def Finalize() -> None: ... def Init_thread(required: int = THREAD_MULTIPLE) -> int: ... def Query_thread() -> int: ... def Is_thread_main() -> bool: ... def Is_initialized() -> bool: ... def Is_finalized() -> bool: ... def Get_version() -> Tuple[int, int]: ... def Get_library_version() -> str: ... def Get_processor_name() -> str: ... def Wtime() -> float: ... def Wtick() -> float: ... def Pcontrol(level: int) -> None: ... def get_vendor() -> Tuple[str, Tuple[int, int, int]]: ... def _set_abort_status(status: Any) -> None: ... def _comm_lock(comm: Comm, key: Hashable = None) -> Lock: ... def _comm_lock_table(comm: Comm) -> Dict[Hashable, Lock]: ... _lock_table = _comm_lock_table def _commctx_intra(comm: Intracomm) -> Tuple[Intracomm, int]: ... def _commctx_inter(comm: Intercomm) -> Tuple[Intercomm, int, Intracomm, bool]: ... def _typecode(datatype: Datatype) -> Optional[str]: ... def _sizeof(arg: Any) -> int: ... def _addressof(arg: Any) -> int: ... def _handleof(arg: Any) -> int: ... _typedict: Final[Dict[str, Datatype]] = ... _typedict_c: Final[Dict[str, Datatype]] = ... _typedict_f: Final[Dict[str, Datatype]] = ... Buffer = Any # TODO Count = int Displ = int TypeSpec = Union[Datatype, str] BufSpec = Union[ Buffer, Tuple[Buffer, Count], # (buffer, count) Tuple[Buffer, TypeSpec], # (buffer, datatype) Tuple[Buffer, Count, TypeSpec], # (buffer, count, datatype) Tuple[Bottom, Count, Datatype], # (BOTTOM, count, datatype) List, # (buffer, count, datatype) ] BufSpecB = Union[ Buffer, Tuple[Buffer, TypeSpec], # (buffer, datatype) Tuple[Buffer, Count, TypeSpec], # (buffer, count, datatype) List, # (buffer, count, datatype) ] BufSpecV = Union[ Buffer, Tuple[Buffer, Sequence[Count]], # (buffer, counts) Tuple[Buffer, Tuple[Sequence[Count], Sequence[Displ]]], # (buffer, (counts, displs)) Tuple[Buffer, TypeSpec], # (buffer, datatype) Tuple[Buffer, Sequence[Count], TypeSpec], # (buffer, counts, datatype) Tuple[Buffer, Tuple[Sequence[Count], Sequence[Displ]], TypeSpec], # (buffer, (counts, displs), datatype) Tuple[Buffer, Sequence[Count], Sequence[Displ], TypeSpec], # (buffer, counts, displs, datatype) Tuple[Bottom, Tuple[Sequence[Count], Sequence[Displ]], Datatype], # (BOTTOM, (counts, displs), datatype) Tuple[Bottom, Sequence[Count], Sequence[Displ], Datatype], # (BOTTOM, counts, displs, datatype) List, # (buffer, counts, displs, datatypes) ] BufSpecW = Union[ Tuple[Buffer, Sequence[Datatype]], # (buffer, datatypes) Tuple[Buffer, Tuple[Sequence[Count], Sequence[Displ]], Sequence[Datatype]], # (buffer, (counts, displs), datatypes) Tuple[Buffer, Sequence[Count], Sequence[Displ], Sequence[Datatype]], # (buffer, counts, displs, datatypes) Tuple[Bottom, Tuple[Sequence[Count], Sequence[Displ]], Sequence[Datatype]], # (BOTTOM, (counts, displs), datatypes) Tuple[Bottom, Sequence[Count], Sequence[Displ], Sequence[Datatype]], # (BOTTOM, counts, displs, datatypes) List, # (buffer, counts, displs, datatypes) ] TargetSpec = Union[ Displ, # displ Tuple[()], # () Tuple[Displ], # (displ,) Tuple[Displ, Count], # (displ, count) Tuple[Displ, Count, Datatype], # (displ, count, datatype) List, # (displ, count, datatype) ] mpi4py-3.1.6/src/mpi4py/MPI.pyx000066400000000000000000000006641460670727200161640ustar00rootroot00000000000000#cython: embedsignature=True #cython: annotation_typing=False #cython: cdivision=True #cython: binding=False #cython: auto_pickle=False #cython: always_allow_keywords=True #cython: allow_none_for_extension_args=False #cython: autotestdict=False #cython: warn.multiple_declarators=False #cython: optimize.use_switch=False #cython: legacy_implicit_noexcept=True from __future__ import absolute_import cimport cython include "MPI/MPI.pyx" mpi4py-3.1.6/src/mpi4py/MPI/000077500000000000000000000000001460670727200154145ustar00rootroot00000000000000mpi4py-3.1.6/src/mpi4py/MPI/CAPI.pxi000066400000000000000000000103641460670727200166560ustar00rootroot00000000000000# ----------------------------------------------------------------------------- # Datatype cdef api object PyMPIDatatype_New(MPI_Datatype arg): cdef Datatype obj = Datatype.__new__(Datatype) obj.ob_mpi = arg return obj cdef api MPI_Datatype* PyMPIDatatype_Get(object arg) except NULL: return &(arg).ob_mpi # ----------------------------------------------------------------------------- # Status cdef api object PyMPIStatus_New(MPI_Status *arg): cdef Status obj = Status.__new__(Status) if (arg != NULL and arg != MPI_STATUS_IGNORE and arg != MPI_STATUSES_IGNORE): obj.ob_mpi = arg[0] else: pass # XXX should fail ? return obj cdef api MPI_Status* PyMPIStatus_Get(object arg) except? NULL: if arg is None: return MPI_STATUS_IGNORE return &(arg).ob_mpi # ----------------------------------------------------------------------------- # Request cdef api object PyMPIRequest_New(MPI_Request arg): cdef Request obj = Request.__new__(Request) obj.ob_mpi = arg return obj cdef api MPI_Request* PyMPIRequest_Get(object arg) except NULL: return &(arg).ob_mpi # ----------------------------------------------------------------------------- # Message cdef api object PyMPIMessage_New(MPI_Message arg): cdef Message obj = Message.__new__(Message) obj.ob_mpi = arg return obj cdef api MPI_Message* PyMPIMessage_Get(object arg) except NULL: return &(arg).ob_mpi # ----------------------------------------------------------------------------- # Op cdef api object PyMPIOp_New(MPI_Op arg): cdef Op obj = Op.__new__(Op) obj.ob_mpi = arg return obj cdef api MPI_Op* PyMPIOp_Get(object arg) except NULL: return &(arg).ob_mpi # ----------------------------------------------------------------------------- # Group cdef api object PyMPIGroup_New(MPI_Group arg): cdef Group obj = Group.__new__(Group) obj.ob_mpi = arg return obj cdef api MPI_Group* PyMPIGroup_Get(object arg) except NULL: return &(arg).ob_mpi # ----------------------------------------------------------------------------- # Info cdef api object PyMPIInfo_New(MPI_Info arg): cdef Info obj = Info.__new__(Info) obj.ob_mpi = arg return obj cdef api MPI_Info* PyMPIInfo_Get(object arg) except NULL: return &(arg).ob_mpi # ----------------------------------------------------------------------------- # Errhandler cdef api object PyMPIErrhandler_New(MPI_Errhandler arg): cdef Errhandler obj = Errhandler.__new__(Errhandler) obj.ob_mpi = arg return obj cdef api MPI_Errhandler* PyMPIErrhandler_Get(object arg) except NULL: return &(arg).ob_mpi # ----------------------------------------------------------------------------- # Comm cdef api object PyMPIComm_New(MPI_Comm arg): cdef type cls = Comm cdef int inter = 0 cdef int topo = MPI_UNDEFINED if arg != MPI_COMM_NULL: CHKERR( MPI_Comm_test_inter(arg, &inter) ) if inter: cls = Intercomm else: CHKERR( MPI_Topo_test(arg, &topo) ) if topo == MPI_UNDEFINED: cls = Intracomm elif topo == MPI_CART: cls = Cartcomm elif topo == MPI_GRAPH: cls = Graphcomm elif topo == MPI_DIST_GRAPH: cls = Distgraphcomm else: cls = Intracomm cdef Comm obj = cls.__new__(cls) obj.ob_mpi = arg return obj cdef api MPI_Comm* PyMPIComm_Get(object arg) except NULL: return &(arg).ob_mpi # ----------------------------------------------------------------------------- # Win cdef api object PyMPIWin_New(MPI_Win arg): cdef Win obj = Win.__new__(Win) obj.ob_mpi = arg return obj cdef api MPI_Win* PyMPIWin_Get(object arg) except NULL: return &(arg).ob_mpi # ----------------------------------------------------------------------------- # File cdef api object PyMPIFile_New(MPI_File arg): cdef File obj = File.__new__(File) obj.ob_mpi = arg return obj cdef api MPI_File* PyMPIFile_Get(object arg) except NULL: return &(arg).ob_mpi # ----------------------------------------------------------------------------- mpi4py-3.1.6/src/mpi4py/MPI/Comm.pyx000066400000000000000000002513611460670727200170610ustar00rootroot00000000000000# Communicator Comparisons # ------------------------ IDENT = MPI_IDENT #: Groups are identical, contexts are the same CONGRUENT = MPI_CONGRUENT #: Groups are identical, contexts are different SIMILAR = MPI_SIMILAR #: Groups are similar, rank order differs UNEQUAL = MPI_UNEQUAL #: Groups are different # Communicator Topologies # ----------------------- CART = MPI_CART #: Cartesian topology GRAPH = MPI_GRAPH #: General graph topology DIST_GRAPH = MPI_DIST_GRAPH #: Distributed graph topology # Graph Communicator Weights # -------------------------- UNWEIGHTED = __UNWEIGHTED__ #: Unweighted graph WEIGHTS_EMPTY = __WEIGHTS_EMPTY__ #: Empty graph weights # Communicator Split Type # ----------------------- COMM_TYPE_SHARED = MPI_COMM_TYPE_SHARED cdef class Comm: """ Communicator """ def __cinit__(self, Comm comm: Optional[Comm] = None): self.ob_mpi = MPI_COMM_NULL if comm is None: return self.ob_mpi = comm.ob_mpi def __dealloc__(self): if not (self.flags & PyMPI_OWNED): return CHKERR( del_Comm(&self.ob_mpi) ) def __richcmp__(self, other, int op): if not isinstance(other, Comm): return NotImplemented cdef Comm s = self, o = other if op == Py_EQ: return (s.ob_mpi == o.ob_mpi) elif op == Py_NE: return (s.ob_mpi != o.ob_mpi) cdef mod = type(self).__module__ cdef cls = type(self).__name__ raise TypeError("unorderable type: '%s.%s'" % (mod, cls)) def __bool__(self) -> bool: return self.ob_mpi != MPI_COMM_NULL # Group # ----- def Get_group(self) -> Group: """ Access the group associated with a communicator """ cdef Group group = Group.__new__(Group) with nogil: CHKERR( MPI_Comm_group(self.ob_mpi, &group.ob_mpi) ) return group property group: """communicator group""" def __get__(self) -> Group: return self.Get_group() # Communicator Accessors # ---------------------- def Get_size(self) -> int: """ Return the number of processes in a communicator """ cdef int size = -1 CHKERR( MPI_Comm_size(self.ob_mpi, &size) ) return size property size: """number of processes in communicator""" def __get__(self) -> int: return self.Get_size() def Get_rank(self) -> int: """ Return the rank of this process in a communicator """ cdef int rank = MPI_PROC_NULL CHKERR( MPI_Comm_rank(self.ob_mpi, &rank) ) return rank property rank: """rank of this process in communicator""" def __get__(self) -> int: return self.Get_rank() @classmethod def Compare(cls, Comm comm1: Comm, Comm comm2: Comm) -> int: """ Compare two communicators """ cdef int flag = MPI_UNEQUAL with nogil: CHKERR( MPI_Comm_compare( comm1.ob_mpi, comm2.ob_mpi, &flag) ) return flag # Communicator Constructors # ------------------------- def Clone(self) -> Comm: """ Clone an existing communicator """ cdef type comm_type = type(self) cdef Comm comm = comm_type.__new__(comm_type) with nogil: CHKERR( MPI_Comm_dup(self.ob_mpi, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm def Dup(self, Info info: Optional[Info] = None) -> Comm: """ Duplicate an existing communicator """ cdef MPI_Info cinfo = arg_Info(info) cdef type comm_type = type(self) cdef Comm comm = comm_type.__new__(comm_type) if info is None: with nogil: CHKERR( MPI_Comm_dup( self.ob_mpi, &comm.ob_mpi) ) else: with nogil: CHKERR( MPI_Comm_dup_with_info( self.ob_mpi, cinfo, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm def Dup_with_info(self, Info info: Info) -> Comm: """ Duplicate an existing communicator """ cdef type comm_type = type(self) cdef Comm comm = comm_type.__new__(comm_type) with nogil: CHKERR( MPI_Comm_dup_with_info( self.ob_mpi, info.ob_mpi, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm def Idup(self) -> Tuple[Comm, Request]: """ Nonblocking duplicate an existing communicator """ cdef type comm_type = type(self) cdef Comm comm = comm_type.__new__(comm_type) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Comm_idup( self.ob_mpi, &comm.ob_mpi, &request.ob_mpi) ) comm_set_eh(comm.ob_mpi) return (comm, request) def Create(self, Group group: Group) -> Comm: """ Create communicator from group """ cdef type comm_type = Comm if isinstance(self, Intracomm): comm_type = Intracomm elif isinstance(self, Intercomm): comm_type = Intercomm cdef Comm comm = comm_type.__new__(comm_type) with nogil: CHKERR( MPI_Comm_create( self.ob_mpi, group.ob_mpi, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm def Create_group(self, Group group: Group, int tag: int = 0) -> Comm: """ Create communicator from group """ cdef type comm_type = Comm if isinstance(self, Intracomm): comm_type = Intracomm elif isinstance(self, Intercomm): comm_type = Intercomm cdef Comm comm = comm_type.__new__(comm_type) with nogil: CHKERR( MPI_Comm_create_group( self.ob_mpi, group.ob_mpi, tag, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm def Split(self, int color: int = 0, int key: int = 0) -> Comm: """ Split communicator by color and key """ cdef type comm_type = Comm if isinstance(self, Intracomm): comm_type = Intracomm elif isinstance(self, Intercomm): comm_type = Intercomm cdef Comm comm = comm_type.__new__(comm_type) with nogil: CHKERR( MPI_Comm_split( self.ob_mpi, color, key, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm def Split_type( self, int split_type: int, int key: int = 0, Info info: Info = INFO_NULL, ) -> Comm: """ Split communicator by split type """ cdef type comm_type = Comm if isinstance(self, Intracomm): comm_type = Intracomm elif isinstance(self, Intercomm): comm_type = Intercomm cdef Comm comm = comm_type.__new__(comm_type) with nogil: CHKERR( MPI_Comm_split_type( self.ob_mpi, split_type, key, info.ob_mpi, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm # Communicator Destructor # ----------------------- def Free(self) -> None: """ Free a communicator """ with nogil: CHKERR( MPI_Comm_free(&self.ob_mpi) ) if self is __COMM_SELF__: self.ob_mpi = MPI_COMM_SELF if self is __COMM_WORLD__: self.ob_mpi = MPI_COMM_WORLD # Communicator Info # ----------------- def Set_info(self, Info info: Info) -> None: """ Set new values for the hints associated with a communicator """ with nogil: CHKERR( MPI_Comm_set_info( self.ob_mpi, info.ob_mpi) ) def Get_info(self) -> Info: """ Return the hints for a communicator that are currently in use """ cdef Info info = Info.__new__(Info) with nogil: CHKERR( MPI_Comm_get_info( self.ob_mpi, &info.ob_mpi) ) return info property info: """communicator info""" def __get__(self) -> Info: return self.Get_info() def __set__(self, value: Info): self.Set_info(value) # Point to Point communication # ---------------------------- # Blocking Send and Receive Operations # ------------------------------------ def Send( self, buf: BufSpec, int dest: int, int tag: int = 0, ) -> None: """ Blocking send .. note:: This function may block until the message is received. Whether or not `Send` blocks depends on several factors and is implementation dependent """ cdef _p_msg_p2p smsg = message_p2p_send(buf, dest) with nogil: CHKERR( MPI_Send( smsg.buf, smsg.count, smsg.dtype, dest, tag, self.ob_mpi) ) def Recv( self, buf: BufSpec, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Optional[Status] = None, ) -> None: """ Blocking receive .. note:: This function blocks until the message is received """ cdef _p_msg_p2p rmsg = message_p2p_recv(buf, source) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Recv( rmsg.buf, rmsg.count, rmsg.dtype, source, tag, self.ob_mpi, statusp) ) # Send-Receive # ------------ def Sendrecv( self, sendbuf: BufSpec, int dest: int, int sendtag: int = 0, recvbuf: BufSpec = None, int source: int = ANY_SOURCE, int recvtag: int = ANY_TAG, Status status: Optional[Status] = None, ) -> None: """ Send and receive a message .. note:: This function is guaranteed not to deadlock in situations where pairs of blocking sends and receives may deadlock. .. caution:: A common mistake when using this function is to mismatch the tags with the source and destination ranks, which can result in deadlock. """ cdef _p_msg_p2p smsg = message_p2p_send(sendbuf, dest) cdef _p_msg_p2p rmsg = message_p2p_recv(recvbuf, source) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Sendrecv( smsg.buf, smsg.count, smsg.dtype, dest, sendtag, rmsg.buf, rmsg.count, rmsg.dtype, source, recvtag, self.ob_mpi, statusp) ) def Sendrecv_replace( self, buf: BufSpec, int dest: int, int sendtag: int = 0, int source: int = ANY_SOURCE, int recvtag: int = ANY_TAG, Status status: Optional[Status] = None, ) -> None: """ Send and receive a message .. note:: This function is guaranteed not to deadlock in situations where pairs of blocking sends and receives may deadlock. .. caution:: A common mistake when using this function is to mismatch the tags with the source and destination ranks, which can result in deadlock. """ cdef int rank = MPI_PROC_NULL if dest != MPI_PROC_NULL: rank = dest if source != MPI_PROC_NULL: rank = source cdef _p_msg_p2p rmsg = message_p2p_recv(buf, rank) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Sendrecv_replace( rmsg.buf, rmsg.count, rmsg.dtype, dest, sendtag, source, recvtag, self.ob_mpi, statusp) ) # Nonblocking Communications # -------------------------- def Isend( self, buf: BufSpec, int dest: int, int tag: int = 0, ) -> Request: """ Nonblocking send """ cdef _p_msg_p2p smsg = message_p2p_send(buf, dest) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Isend( smsg.buf, smsg.count, smsg.dtype, dest, tag, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = smsg return request def Irecv( self, buf: BufSpec, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, ) -> Request: """ Nonblocking receive """ cdef _p_msg_p2p rmsg = message_p2p_recv(buf, source) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Irecv( rmsg.buf, rmsg.count, rmsg.dtype, source, tag, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = rmsg return request # Probe # ----- def Probe( self, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Optional[Status] = None, ) -> Literal[True]: """ Blocking test for a message .. note:: This function blocks until the message arrives. """ cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Probe( source, tag, self.ob_mpi, statusp) ) return True def Iprobe( self, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Optional[Status] = None, ) -> bool: """ Nonblocking test for a message """ cdef int flag = 0 cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Iprobe( source, tag, self.ob_mpi, &flag, statusp) ) return flag # Matching Probe # -------------- def Mprobe( self, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Optional[Status] = None, ) -> Message: """ Blocking test for a matched message """ cdef MPI_Message cmessage = MPI_MESSAGE_NULL cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Mprobe( source, tag, self.ob_mpi, &cmessage, statusp) ) cdef Message message = Message.__new__(Message) message.ob_mpi = cmessage return message def Improbe( self, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Optional[Status] = None, ) -> Optional[Message]: """ Nonblocking test for a matched message """ cdef int flag = 0 cdef MPI_Message cmessage = MPI_MESSAGE_NULL cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Improbe( source, tag, self.ob_mpi, &flag, &cmessage, statusp) ) if flag == 0: return None cdef Message message = Message.__new__(Message) message.ob_mpi = cmessage return message # Persistent Communication # ------------------------ def Send_init( self, buf: BufSpec, int dest: int, int tag: int = 0, ) -> Prequest: """ Create a persistent request for a standard send """ cdef _p_msg_p2p smsg = message_p2p_send(buf, dest) cdef Prequest request = Prequest.__new__(Prequest) with nogil: CHKERR( MPI_Send_init( smsg.buf, smsg.count, smsg.dtype, dest, tag, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = smsg return request def Recv_init( self, buf: BufSpec, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, ) -> Prequest: """ Create a persistent request for a receive """ cdef _p_msg_p2p rmsg = message_p2p_recv(buf, source) cdef Prequest request = Prequest.__new__(Prequest) with nogil: CHKERR( MPI_Recv_init( rmsg.buf, rmsg.count, rmsg.dtype, source, tag, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = rmsg return request # Communication Modes # ------------------- # Blocking calls def Bsend( self, buf: BufSpec, int dest: int, int tag: int = 0, ) -> None: """ Blocking send in buffered mode """ cdef _p_msg_p2p smsg = message_p2p_send(buf, dest) with nogil: CHKERR( MPI_Bsend( smsg.buf, smsg.count, smsg.dtype, dest, tag, self.ob_mpi) ) def Ssend( self, buf: BufSpec, int dest: int, int tag: int = 0, ) -> None: """ Blocking send in synchronous mode """ cdef _p_msg_p2p smsg = message_p2p_send(buf, dest) with nogil: CHKERR( MPI_Ssend( smsg.buf, smsg.count, smsg.dtype, dest, tag, self.ob_mpi) ) def Rsend( self, buf: BufSpec, int dest: int, int tag: int = 0, ) -> None: """ Blocking send in ready mode """ cdef _p_msg_p2p smsg = message_p2p_send(buf, dest) with nogil: CHKERR( MPI_Rsend( smsg.buf, smsg.count, smsg.dtype, dest, tag, self.ob_mpi) ) # Nonblocking calls def Ibsend( self, buf: BufSpec, int dest: int, int tag: int = 0, ) -> Request: """ Nonblocking send in buffered mode """ cdef _p_msg_p2p smsg = message_p2p_send(buf, dest) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Ibsend( smsg.buf, smsg.count, smsg.dtype, dest, tag, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = smsg return request def Issend( self, buf: BufSpec, int dest: int, int tag: int = 0, ) -> Request: """ Nonblocking send in synchronous mode """ cdef _p_msg_p2p smsg = message_p2p_send(buf, dest) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Issend( smsg.buf, smsg.count, smsg.dtype, dest, tag, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = smsg return request def Irsend( self, buf: BufSpec, int dest: int, int tag: int = 0, ) -> Request: """ Nonblocking send in ready mode """ cdef _p_msg_p2p smsg = message_p2p_send(buf, dest) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Irsend( smsg.buf, smsg.count, smsg.dtype, dest, tag, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = smsg return request # Persistent Requests def Bsend_init( self, buf: BufSpec, int dest: int, int tag: int = 0, ) -> Request: """ Persistent request for a send in buffered mode """ cdef _p_msg_p2p smsg = message_p2p_send(buf, dest) cdef Prequest request = Prequest.__new__(Prequest) with nogil: CHKERR( MPI_Bsend_init( smsg.buf, smsg.count, smsg.dtype, dest, tag, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = smsg return request def Ssend_init( self, buf: BufSpec, int dest: int, int tag: int = 0, ) -> Request: """ Persistent request for a send in synchronous mode """ cdef _p_msg_p2p smsg = message_p2p_send(buf, dest) cdef Prequest request = Prequest.__new__(Prequest) with nogil: CHKERR( MPI_Ssend_init( smsg.buf, smsg.count, smsg.dtype, dest, tag, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = smsg return request def Rsend_init( self, buf: BufSpec, int dest: int, int tag: int = 0, ) -> Request: """ Persistent request for a send in ready mode """ cdef _p_msg_p2p smsg = message_p2p_send(buf, dest) cdef Prequest request = Prequest.__new__(Prequest) with nogil: CHKERR( MPI_Rsend_init( smsg.buf, smsg.count, smsg.dtype, dest, tag, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = smsg return request # Collective Communications # ------------------------- # Barrier Synchronization # ----------------------- def Barrier(self) -> None: """ Barrier synchronization """ with nogil: CHKERR( MPI_Barrier(self.ob_mpi) ) # Global Communication Functions # ------------------------------ def Bcast( self, buf: BufSpec, int root: int = 0, ) -> None: """ Broadcast a message from one process to all other processes in a group """ cdef _p_msg_cco m = message_cco() m.for_bcast(buf, root, self.ob_mpi) with nogil: CHKERR( MPI_Bcast( m.sbuf, m.scount, m.stype, root, self.ob_mpi) ) def Gather( self, sendbuf: Union[BufSpec, InPlace], recvbuf: Optional[BufSpecB], int root: int = 0, ) -> None: """ Gather together values from a group of processes """ cdef _p_msg_cco m = message_cco() m.for_gather(0, sendbuf, recvbuf, root, self.ob_mpi) with nogil: CHKERR( MPI_Gather( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, root, self.ob_mpi) ) def Gatherv( self, sendbuf: Union[BufSpec, InPlace], recvbuf: Optional[BufSpecV], int root: int = 0, ) -> None: """ Gather Vector, gather data to one process from all other processes in a group providing different amount of data and displacements at the receiving sides """ cdef _p_msg_cco m = message_cco() m.for_gather(1, sendbuf, recvbuf, root, self.ob_mpi) with nogil: CHKERR( MPI_Gatherv( m.sbuf, m.scount, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, root, self.ob_mpi) ) def Scatter( self, sendbuf: Optional[BufSpecB], recvbuf: Union[BufSpec, InPlace], int root: int = 0, ) -> None: """ Scatter data from one process to all other processes in a group """ cdef _p_msg_cco m = message_cco() m.for_scatter(0, sendbuf, recvbuf, root, self.ob_mpi) with nogil: CHKERR( MPI_Scatter( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, root, self.ob_mpi) ) def Scatterv( self, sendbuf: Optional[BufSpecV], recvbuf: Union[BufSpec, InPlace], int root: int = 0, ) -> None: """ Scatter Vector, scatter data from one process to all other processes in a group providing different amount of data and displacements at the sending side """ cdef _p_msg_cco m = message_cco() m.for_scatter(1, sendbuf, recvbuf, root, self.ob_mpi) with nogil: CHKERR( MPI_Scatterv( m.sbuf, m.scounts, m.sdispls, m.stype, m.rbuf, m.rcount, m.rtype, root, self.ob_mpi) ) def Allgather( self, sendbuf: Union[BufSpec, InPlace], recvbuf: BufSpecB, ) -> None: """ Gather to All, gather data from all processes and distribute it to all other processes in a group """ cdef _p_msg_cco m = message_cco() m.for_allgather(0, sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Allgather( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, self.ob_mpi) ) def Allgatherv( self, sendbuf: Union[BufSpec, InPlace], recvbuf: BufSpecV, ) -> None: """ Gather to All Vector, gather data from all processes and distribute it to all other processes in a group providing different amount of data and displacements """ cdef _p_msg_cco m = message_cco() m.for_allgather(1, sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Allgatherv( m.sbuf, m.scount, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, self.ob_mpi) ) def Alltoall( self, sendbuf: Union[BufSpecB, InPlace], recvbuf: BufSpecB, ) -> None: """ All to All Scatter/Gather, send data from all to all processes in a group """ cdef _p_msg_cco m = message_cco() m.for_alltoall(0, sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Alltoall( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, self.ob_mpi) ) def Alltoallv( self, sendbuf: Union[BufSpecV, InPlace], recvbuf: BufSpecV, ) -> None: """ All to All Scatter/Gather Vector, send data from all to all processes in a group providing different amount of data and displacements """ cdef _p_msg_cco m = message_cco() m.for_alltoall(1, sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Alltoallv( m.sbuf, m.scounts, m.sdispls, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, self.ob_mpi) ) def Alltoallw( self, sendbuf: Union[BufSpecW, InPlace], recvbuf: BufSpecW, ) -> None: """ Generalized All-to-All communication allowing different counts, displacements and datatypes for each partner """ cdef _p_msg_ccow m = message_ccow() m.for_alltoallw(sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Alltoallw( m.sbuf, m.scounts, m.sdispls, m.stypes, m.rbuf, m.rcounts, m.rdispls, m.rtypes, self.ob_mpi) ) # Global Reduction Operations # --------------------------- def Reduce( self, sendbuf: Union[BufSpec, InPlace], recvbuf: Optional[BufSpec], Op op: Op = SUM, int root: int = 0, ) -> None: """ Reduce to Root """ cdef _p_msg_cco m = message_cco() m.for_reduce(sendbuf, recvbuf, root, self.ob_mpi) with nogil: CHKERR( MPI_Reduce( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, root, self.ob_mpi) ) def Allreduce( self, sendbuf: Union[BufSpec, InPlace], recvbuf: BufSpec, Op op: Op = SUM, ) -> None: """ Reduce to All """ cdef _p_msg_cco m = message_cco() m.for_allreduce(sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Allreduce( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, self.ob_mpi) ) def Reduce_scatter_block( self, sendbuf: Union[BufSpecB, InPlace], recvbuf: Union[BufSpec, BufSpecB], Op op: Op = SUM, ) -> None: """ Reduce-Scatter Block (regular, non-vector version) """ cdef _p_msg_cco m = message_cco() m.for_reduce_scatter_block(sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Reduce_scatter_block( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, self.ob_mpi) ) def Reduce_scatter( self, sendbuf: Union[BufSpec, InPlace], recvbuf: BufSpec, recvcounts: Optional[Sequence[int]] = None, Op op: Op = SUM, ) -> None: """ Reduce-Scatter (vector version) """ cdef _p_msg_cco m = message_cco() m.for_reduce_scatter(sendbuf, recvbuf, recvcounts, self.ob_mpi) with nogil: CHKERR( MPI_Reduce_scatter( m.sbuf, m.rbuf, m.rcounts, m.rtype, op.ob_mpi, self.ob_mpi) ) # Nonblocking Collectives # ----------------------- def Ibarrier(self) -> Request: """ Nonblocking Barrier """ cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Ibarrier(self.ob_mpi, &request.ob_mpi) ) return request def Ibcast( self, buf: BufSpec, int root: int = 0, ) -> Request: """ Nonblocking Broadcast """ cdef _p_msg_cco m = message_cco() m.for_bcast(buf, root, self.ob_mpi) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Ibcast( m.sbuf, m.scount, m.stype, root, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Igather( self, sendbuf: Union[BufSpec, InPlace], recvbuf: Optional[BufSpecB], int root: int = 0, ) -> Request: """ Nonblocking Gather """ cdef _p_msg_cco m = message_cco() m.for_gather(0, sendbuf, recvbuf, root, self.ob_mpi) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Igather( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, root, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Igatherv( self, sendbuf: Union[BufSpec, InPlace], recvbuf: Optional[BufSpecV], int root: int = 0, ) -> Request: """ Nonblocking Gather Vector """ cdef _p_msg_cco m = message_cco() m.for_gather(1, sendbuf, recvbuf, root, self.ob_mpi) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Igatherv( m.sbuf, m.scount, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, root, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Iscatter( self, sendbuf: Optional[BufSpecB], recvbuf: Union[BufSpec, InPlace], int root: int = 0, ) -> Request: """ Nonblocking Scatter """ cdef _p_msg_cco m = message_cco() m.for_scatter(0, sendbuf, recvbuf, root, self.ob_mpi) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Iscatter( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, root, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Iscatterv( self, sendbuf: Optional[BufSpecV], recvbuf: Union[BufSpec, InPlace], int root: int = 0, ) -> Request: """ Nonblocking Scatter Vector """ cdef _p_msg_cco m = message_cco() m.for_scatter(1, sendbuf, recvbuf, root, self.ob_mpi) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Iscatterv( m.sbuf, m.scounts, m.sdispls, m.stype, m.rbuf, m.rcount, m.rtype, root, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Iallgather( self, sendbuf: Union[BufSpec, InPlace], recvbuf: BufSpecB, ) -> Request: """ Nonblocking Gather to All """ cdef _p_msg_cco m = message_cco() m.for_allgather(0, sendbuf, recvbuf, self.ob_mpi) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Iallgather( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Iallgatherv( self, sendbuf: Union[BufSpec, InPlace], recvbuf: BufSpecV, ) -> Request: """ Nonblocking Gather to All Vector """ cdef _p_msg_cco m = message_cco() m.for_allgather(1, sendbuf, recvbuf, self.ob_mpi) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Iallgatherv( m.sbuf, m.scount, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, self.ob_mpi, &request.ob_mpi) ) return request def Ialltoall( self, sendbuf: Union[BufSpecB, InPlace], recvbuf: BufSpecB, ) -> Request: """ Nonblocking All to All Scatter/Gather """ cdef _p_msg_cco m = message_cco() m.for_alltoall(0, sendbuf, recvbuf, self.ob_mpi) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Ialltoall( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Ialltoallv( self, sendbuf: Union[BufSpecV, InPlace], recvbuf: BufSpecV, ) -> Request: """ Nonblocking All to All Scatter/Gather Vector """ cdef _p_msg_cco m = message_cco() m.for_alltoall(1, sendbuf, recvbuf, self.ob_mpi) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Ialltoallv( m.sbuf, m.scounts, m.sdispls, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Ialltoallw( self, sendbuf: Union[BufSpecW, InPlace], recvbuf: BufSpecW, ) -> Request: """ Nonblocking Generalized All-to-All """ cdef _p_msg_ccow m = message_ccow() m.for_alltoallw(sendbuf, recvbuf, self.ob_mpi) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Ialltoallw( m.sbuf, m.scounts, m.sdispls, m.stypes, m.rbuf, m.rcounts, m.rdispls, m.rtypes, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Ireduce( self, sendbuf: Union[BufSpec, InPlace], recvbuf: Optional[BufSpec], Op op: Op = SUM, int root: int = 0, ) -> Request: """ Nonblocking Reduce to Root """ cdef _p_msg_cco m = message_cco() m.for_reduce(sendbuf, recvbuf, root, self.ob_mpi) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Ireduce( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, root, self.ob_mpi, &request.ob_mpi) ) return request def Iallreduce( self, sendbuf: Union[BufSpec, InPlace], recvbuf: BufSpec, Op op: Op = SUM, ) -> Request: """ Nonblocking Reduce to All """ cdef _p_msg_cco m = message_cco() m.for_allreduce(sendbuf, recvbuf, self.ob_mpi) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Iallreduce( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, self.ob_mpi, &request.ob_mpi) ) return request def Ireduce_scatter_block( self, sendbuf: Union[BufSpecB, InPlace], recvbuf: Union[BufSpec, BufSpecB], Op op: Op = SUM, ) -> Request: """ Nonblocking Reduce-Scatter Block (regular, non-vector version) """ cdef _p_msg_cco m = message_cco() m.for_reduce_scatter_block(sendbuf, recvbuf, self.ob_mpi) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Ireduce_scatter_block( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, self.ob_mpi, &request.ob_mpi) ) return request def Ireduce_scatter( self, sendbuf: Union[BufSpec, InPlace], recvbuf: BufSpec, recvcounts: Optional[Sequence[int]] = None, Op op: Op = SUM, ) -> Request: """ Nonblocking Reduce-Scatter (vector version) """ cdef _p_msg_cco m = message_cco() m.for_reduce_scatter(sendbuf, recvbuf, recvcounts, self.ob_mpi) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Ireduce_scatter( m.sbuf, m.rbuf, m.rcounts, m.rtype, op.ob_mpi, self.ob_mpi, &request.ob_mpi) ) return request # Tests # ----- def Is_inter(self) -> bool: """ Test to see if a comm is an intercommunicator """ cdef int flag = 0 CHKERR( MPI_Comm_test_inter(self.ob_mpi, &flag) ) return flag property is_inter: """is intercommunicator""" def __get__(self) -> bool: return self.Is_inter() def Is_intra(self) -> bool: """ Test to see if a comm is an intracommunicator """ return not self.Is_inter() property is_intra: """is intracommunicator""" def __get__(self) -> bool: return self.Is_intra() def Get_topology(self) -> int: """ Determine the type of topology (if any) associated with a communicator """ cdef int topo = MPI_UNDEFINED CHKERR( MPI_Topo_test(self.ob_mpi, &topo) ) return topo property topology: """communicator topology type""" def __get__(self) -> int: return self.Get_topology() property is_topo: """is a topology communicator""" def __get__(self) -> bool: return self.Get_topology() != MPI_UNDEFINED # Process Creation and Management # ------------------------------- @classmethod def Get_parent(cls) -> Intercomm: """ Return the parent intercommunicator for this process """ cdef Intercomm comm = __COMM_PARENT__ with nogil: CHKERR( MPI_Comm_get_parent(&comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm def Disconnect(self) -> None: """ Disconnect from a communicator """ with nogil: CHKERR( MPI_Comm_disconnect(&self.ob_mpi) ) @classmethod def Join(cls, int fd: int) -> Intercomm: """ Create a intercommunicator by joining two processes connected by a socket """ cdef Intercomm comm = Intercomm.__new__(Intercomm) with nogil: CHKERR( MPI_Comm_join(fd, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm # Attributes # ---------- def Get_attr(self, int keyval: int) -> Optional[Union[int, Any]]: """ Retrieve attribute value by key """ cdef void *attrval = NULL cdef int flag = 0 CHKERR( MPI_Comm_get_attr(self.ob_mpi, keyval, &attrval, &flag) ) if not flag: return None if attrval == NULL: return 0 # MPI-1 predefined attribute keyvals if (keyval == MPI_TAG_UB or keyval == MPI_HOST or keyval == MPI_IO or keyval == MPI_WTIME_IS_GLOBAL): return (attrval)[0] # MPI-2 predefined attribute keyvals elif (keyval == MPI_UNIVERSE_SIZE or keyval == MPI_APPNUM or keyval == MPI_LASTUSEDCODE): return (attrval)[0] # user-defined attribute keyval return PyMPI_attr_get(self.ob_mpi, keyval, attrval) def Set_attr(self, int keyval: int, attrval: Any) -> None: """ Store attribute value associated with a key """ PyMPI_attr_set(self.ob_mpi, keyval, attrval) def Delete_attr(self, int keyval: int) -> None: """ Delete attribute value associated with a key """ CHKERR( MPI_Comm_delete_attr(self.ob_mpi, keyval) ) @classmethod def Create_keyval( cls, copy_fn: Optional[Callable[[Comm, int, Any], Any]] = None, delete_fn: Optional[Callable[[Comm, int, Any], None]] = None, nopython: bool = False, ) -> int: """ Create a new attribute key for communicators """ cdef object state = _p_keyval(copy_fn, delete_fn, nopython) cdef int keyval = MPI_KEYVAL_INVALID cdef MPI_Comm_copy_attr_function *_copy = PyMPI_attr_copy_fn cdef MPI_Comm_delete_attr_function *_del = PyMPI_attr_delete_fn cdef void *extra_state = state CHKERR( MPI_Comm_create_keyval(_copy, _del, &keyval, extra_state) ) comm_keyval[keyval] = state return keyval @classmethod def Free_keyval(cls, int keyval: int) -> int: """ Free an attribute key for communicators """ cdef int keyval_save = keyval CHKERR( MPI_Comm_free_keyval(&keyval) ) try: del comm_keyval[keyval_save] except KeyError: pass return keyval # Error handling # -------------- def Get_errhandler(self) -> Errhandler: """ Get the error handler for a communicator """ cdef Errhandler errhandler = Errhandler.__new__(Errhandler) CHKERR( MPI_Comm_get_errhandler(self.ob_mpi, &errhandler.ob_mpi) ) return errhandler def Set_errhandler(self, Errhandler errhandler: Errhandler) -> None: """ Set the error handler for a communicator """ CHKERR( MPI_Comm_set_errhandler(self.ob_mpi, errhandler.ob_mpi) ) def Call_errhandler(self, int errorcode: int) -> None: """ Call the error handler installed on a communicator """ CHKERR( MPI_Comm_call_errhandler(self.ob_mpi, errorcode) ) def Abort(self, int errorcode: int = 0) -> NoReturn: """ Terminate MPI execution environment .. warning:: This is a direct call, use it with care!!!. """ CHKERR( MPI_Abort(self.ob_mpi, errorcode) ) # Naming Objects # -------------- def Get_name(self) -> str: """ Get the print name for this communicator """ cdef char name[MPI_MAX_OBJECT_NAME+1] cdef int nlen = 0 CHKERR( MPI_Comm_get_name(self.ob_mpi, name, &nlen) ) return tompistr(name, nlen) def Set_name(self, name: str) -> None: """ Set the print name for this communicator """ cdef char *cname = NULL name = asmpistr(name, &cname) CHKERR( MPI_Comm_set_name(self.ob_mpi, cname) ) property name: """communicator name""" def __get__(self) -> str: return self.Get_name() def __set__(self, value: str): self.Set_name(value) # Fortran Handle # -------------- def py2f(self) -> int: """ """ return MPI_Comm_c2f(self.ob_mpi) @classmethod def f2py(cls, arg: int) -> Comm: """ """ cdef MPI_Comm comm = MPI_Comm_f2c(arg) return PyMPIComm_New(comm) # Python Communication # -------------------- # def send( self, obj: Any, int dest: int, int tag: int = 0, ) -> None: """Send""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_send(obj, dest, tag, comm) # def bsend( self, obj: Any, int dest: int, int tag: int = 0, ) -> None: """Send in buffered mode""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_bsend(obj, dest, tag, comm) # def ssend( self, obj: Any, int dest: int, int tag: int = 0, ) -> None: """Send in synchronous mode""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_ssend(obj, dest, tag, comm) # def recv( self, buf: Optional[Buffer] = None, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Optional[Status] = None, ) -> Any: """Receive""" cdef MPI_Comm comm = self.ob_mpi cdef MPI_Status *statusp = arg_Status(status) return PyMPI_recv(buf, source, tag, comm, statusp) # def sendrecv( self, sendobj: Any, int dest: int, int sendtag: int = 0, recvbuf: Optional[Buffer] = None, int source: int = ANY_SOURCE, int recvtag: int = ANY_TAG, Status status: Optional[Status] = None, ) -> Any: """Send and Receive""" cdef MPI_Comm comm = self.ob_mpi cdef MPI_Status *statusp = arg_Status(status) return PyMPI_sendrecv(sendobj, dest, sendtag, recvbuf, source, recvtag, comm, statusp) # def isend( self, obj: Any, int dest: int, int tag: int = 0, ) -> Request: """Nonblocking send""" cdef MPI_Comm comm = self.ob_mpi cdef Request request = Request.__new__(Request) request.ob_buf = PyMPI_isend(obj, dest, tag, comm, &request.ob_mpi) return request # def ibsend( self, obj: Any, int dest: int, int tag: int = 0, ) -> Request: """Nonblocking send in buffered mode""" cdef MPI_Comm comm = self.ob_mpi cdef Request request = Request.__new__(Request) request.ob_buf = PyMPI_ibsend(obj, dest, tag, comm, &request.ob_mpi) return request # def issend( self, obj: Any, int dest: int, int tag: int = 0, ) -> Request: """Nonblocking send in synchronous mode""" cdef MPI_Comm comm = self.ob_mpi cdef Request request = Request.__new__(Request) request.ob_buf = PyMPI_issend(obj, dest, tag, comm, &request.ob_mpi) return request # def irecv( self, buf: Optional[Buffer] = None, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, ) -> Request: """Nonblocking receive""" cdef MPI_Comm comm = self.ob_mpi cdef Request request = Request.__new__(Request) request.ob_buf = PyMPI_irecv(buf, source, tag, comm, &request.ob_mpi) return request # def probe( self, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Optional[Status] = None, ) -> Literal[True]: """Blocking test for a message""" cdef MPI_Comm comm = self.ob_mpi cdef MPI_Status *statusp = arg_Status(status) return PyMPI_probe(source, tag, comm, statusp) # def iprobe( self, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Optional[Status] = None, ) -> bool: """Nonblocking test for a message""" cdef MPI_Comm comm = self.ob_mpi cdef MPI_Status *statusp = arg_Status(status) return PyMPI_iprobe(source, tag, comm, statusp) # def mprobe( self, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Optional[Status] = None, ) -> Message: """Blocking test for a matched message""" cdef MPI_Comm comm = self.ob_mpi cdef MPI_Status *statusp = arg_Status(status) cdef Message message = Message.__new__(Message) message.ob_buf = PyMPI_mprobe(source, tag, comm, &message.ob_mpi, statusp) return message # def improbe( self, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Optional[Status] = None, ) -> Optional[Message]: """Nonblocking test for a matched message""" cdef int flag = 0 cdef MPI_Comm comm = self.ob_mpi cdef MPI_Status *statusp = arg_Status(status) cdef Message message = Message.__new__(Message) message.ob_buf = PyMPI_improbe(source, tag, comm, &flag, &message.ob_mpi, statusp) if flag == 0: return None return message # def barrier(self) -> None: """Barrier""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_barrier(comm) # def bcast( self, obj: Any, int root: int = 0, ) -> Any: """Broadcast""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_bcast(obj, root, comm) # def gather( self, sendobj: Any, int root: int = 0, ) -> Optional[List[Any]]: """Gather""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_gather(sendobj, root, comm) # def scatter( self, sendobj: Sequence[Any], int root: int = 0, ) -> Any: """Scatter""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_scatter(sendobj, root, comm) # def allgather( self, sendobj: Any, ) -> List[Any]: """Gather to All""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_allgather(sendobj, comm) # def alltoall( self, sendobj: Sequence[Any], ) -> List[Any]: """All to All Scatter/Gather""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_alltoall(sendobj, comm) # def reduce( self, sendobj: Any, op: Union[Op, Callable[[Any, Any], Any]] = SUM, int root: int = 0, ) -> Optional[Any]: """Reduce to Root""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_reduce(sendobj, op, root, comm) # def allreduce( self, sendobj: Any, op: Union[Op, Callable[[Any, Any], Any]] = SUM, ) -> Any: """Reduce to All""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_allreduce(sendobj, op, comm) cdef class Intracomm(Comm): """ Intracommunicator """ def __cinit__(self, Comm comm: Optional[Comm] = None): if self.ob_mpi == MPI_COMM_NULL: return cdef int inter = 1 CHKERR( MPI_Comm_test_inter(self.ob_mpi, &inter) ) if inter: raise TypeError( "expecting an intracommunicator") # Communicator Constructors # ------------------------- def Create_cart( self, dims: Sequence[int], periods: Optional[Sequence[bool]] = None, bint reorder: bool = False, ) -> Cartcomm: """ Create cartesian communicator """ cdef int ndims = 0, *idims = NULL, *iperiods = NULL dims = getarray(dims, &ndims, &idims) if periods is None: periods = False if isinstance(periods, bool): periods = [periods] * ndims periods = chkarray(periods, ndims, &iperiods) # cdef Cartcomm comm = Cartcomm.__new__(Cartcomm) with nogil: CHKERR( MPI_Cart_create( self.ob_mpi, ndims, idims, iperiods, reorder, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm def Create_graph( self, index: Sequence[int], edges: Sequence[int], bint reorder: bool = False, ) -> Graphcomm: """ Create graph communicator """ cdef int nnodes = 0, *iindex = NULL index = getarray(index, &nnodes, &iindex) cdef int nedges = 0, *iedges = NULL edges = getarray(edges, &nedges, &iedges) # extension: 'standard' adjacency arrays if iindex[0]==0 and iindex[nnodes-1]==nedges: nnodes -= 1; iindex += 1; # cdef Graphcomm comm = Graphcomm.__new__(Graphcomm) with nogil: CHKERR( MPI_Graph_create( self.ob_mpi, nnodes, iindex, iedges, reorder, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm def Create_dist_graph_adjacent( self, sources: Sequence[int], destinations: Sequence[int], sourceweights: Optional[Sequence[int]] = None, destweights: Optional[Sequence[int]] = None, Info info: Info = INFO_NULL, bint reorder: bool = False, ) -> Distgraphcomm: """ Create distributed graph communicator """ cdef int indegree = 0, *isource = NULL cdef int outdegree = 0, *idest = NULL cdef int *isourceweight = MPI_UNWEIGHTED cdef int *idestweight = MPI_UNWEIGHTED if sources is not None: sources = getarray(sources, &indegree, &isource) sourceweights = asarray_weights( sourceweights, indegree, &isourceweight) if destinations is not None: destinations = getarray(destinations, &outdegree, &idest) destweights = asarray_weights( destweights, outdegree, &idestweight) # cdef Distgraphcomm comm = Distgraphcomm.__new__(Distgraphcomm) with nogil: CHKERR( MPI_Dist_graph_create_adjacent( self.ob_mpi, indegree, isource, isourceweight, outdegree, idest, idestweight, info.ob_mpi, reorder, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm def Create_dist_graph( self, sources: Sequence[int], degrees: Sequence[int], destinations: Sequence[int], weights: Optional[Sequence[int]] = None, Info info: Info = INFO_NULL, bint reorder: bool = False, ) -> Distgraphcomm: """ Create distributed graph communicator """ cdef int nv = 0, ne = 0, i = 0 cdef int *isource = NULL, *idegree = NULL, cdef int *idest = NULL, *iweight = MPI_UNWEIGHTED sources = getarray(sources, &nv, &isource) degrees = chkarray(degrees, nv, &idegree) for i from 0 <= i < nv: ne += idegree[i] destinations = chkarray(destinations, ne, &idest) weights = asarray_weights(weights, ne, &iweight) # cdef Distgraphcomm comm = Distgraphcomm.__new__(Distgraphcomm) with nogil: CHKERR( MPI_Dist_graph_create( self.ob_mpi, nv, isource, idegree, idest, iweight, info.ob_mpi, reorder, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm def Create_intercomm( self, int local_leader: int, Intracomm peer_comm: Intracomm, int remote_leader: int, int tag: int = 0, ) -> Intercomm: """ Create intercommunicator """ cdef Intercomm comm = Intercomm.__new__(Intercomm) with nogil: CHKERR( MPI_Intercomm_create( self.ob_mpi, local_leader, peer_comm.ob_mpi, remote_leader, tag, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm # Low-Level Topology Functions # ---------------------------- def Cart_map( self, dims: Sequence[int], periods: Optional[Sequence[bool]] = None, ) -> int: """ Return an optimal placement for the calling process on the physical machine """ cdef int ndims = 0, *idims = NULL, *iperiods = NULL dims = getarray(dims, &ndims, &idims) if periods is None: periods = False if isinstance(periods, bool): periods = [periods] * ndims periods = chkarray(periods, ndims, &iperiods) cdef int rank = MPI_PROC_NULL CHKERR( MPI_Cart_map(self.ob_mpi, ndims, idims, iperiods, &rank) ) return rank def Graph_map( self, index: Sequence[int], edges: Sequence[int], ) -> int: """ Return an optimal placement for the calling process on the physical machine """ cdef int nnodes = 0, *iindex = NULL index = getarray(index, &nnodes, &iindex) cdef int nedges = 0, *iedges = NULL edges = getarray(edges, &nedges, &iedges) # extension: accept more 'standard' adjacency arrays if iindex[0]==0 and iindex[nnodes-1]==nedges: nnodes -= 1; iindex += 1; cdef int rank = MPI_PROC_NULL CHKERR( MPI_Graph_map(self.ob_mpi, nnodes, iindex, iedges, &rank) ) return rank # Global Reduction Operations # --------------------------- # Inclusive Scan def Scan( self, sendbuf: Union[BufSpec, InPlace], recvbuf: BufSpec, Op op: Op = SUM, ) -> None: """ Inclusive Scan """ cdef _p_msg_cco m = message_cco() m.for_scan(sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Scan( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, self.ob_mpi) ) # Exclusive Scan def Exscan( self, sendbuf: Union[BufSpec, InPlace], recvbuf: BufSpec, Op op: Op = SUM, ) -> None: """ Exclusive Scan """ cdef _p_msg_cco m = message_cco() m.for_exscan(sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Exscan( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, self.ob_mpi) ) # Nonblocking def Iscan( self, sendbuf: Union[BufSpec, InPlace], recvbuf: BufSpec, Op op: Op = SUM, ) -> Request: """ Inclusive Scan """ cdef _p_msg_cco m = message_cco() m.for_scan(sendbuf, recvbuf, self.ob_mpi) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Iscan( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, self.ob_mpi, &request.ob_mpi) ) return request def Iexscan( self, sendbuf: Union[BufSpec, InPlace], recvbuf: BufSpec, Op op: Op = SUM, ) -> Request: """ Inclusive Scan """ cdef _p_msg_cco m = message_cco() m.for_exscan(sendbuf, recvbuf, self.ob_mpi) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Iexscan( m.sbuf, m.rbuf, m.rcount, m.rtype, op.ob_mpi, self.ob_mpi, &request.ob_mpi) ) return request # Python Communication # def scan( self, sendobj: Any, op: Union[Op, Callable[[Any, Any], Any]] = SUM, ) -> Any: """Inclusive Scan""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_scan(sendobj, op, comm) # def exscan( self, sendobj: Any, op: Union[Op, Callable[[Any, Any], Any]] = SUM, ) -> Any: """Exclusive Scan""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_exscan(sendobj, op, comm) # Establishing Communication # -------------------------- # Starting Processes def Spawn( self, command: str, args: Optional[Sequence[str]] = None, int maxprocs: int = 1, Info info: Info = INFO_NULL, int root: int = 0, errcodes: Optional[list] = None, ) -> Intercomm: """ Spawn instances of a single MPI application """ cdef char *cmd = NULL cdef char **argv = MPI_ARGV_NULL cdef int *ierrcodes = MPI_ERRCODES_IGNORE # cdef int rank = MPI_UNDEFINED CHKERR( MPI_Comm_rank(self.ob_mpi, &rank) ) cdef tmp1, tmp2, tmp3 if root == rank: tmp1 = asmpistr(command, &cmd) tmp2 = asarray_argv(args, &argv) if errcodes is not None: tmp3 = newarray(maxprocs, &ierrcodes) # cdef Intercomm comm = Intercomm.__new__(Intercomm) with nogil: CHKERR( MPI_Comm_spawn( cmd, argv, maxprocs, info.ob_mpi, root, self.ob_mpi, &comm.ob_mpi, ierrcodes) ) # cdef int i=0 if errcodes is not None: errcodes[:] = [ierrcodes[i] for i from 0 <= i < maxprocs] # comm_set_eh(comm.ob_mpi) return comm def Spawn_multiple( self, command: Sequence[str], args: Optional[Sequence[Sequence[str]]] = None, maxprocs: Optional[Sequence[int]] = None, info: Union[Info, Sequence[Info]] = INFO_NULL, int root: int = 0, errcodes: Optional[list] = None, ) -> Intercomm: """ Spawn instances of multiple MPI applications """ cdef int count = 0 cdef char **cmds = NULL cdef char ***argvs = MPI_ARGVS_NULL cdef MPI_Info *infos = NULL cdef int *imaxprocs = NULL cdef int *ierrcodes = MPI_ERRCODES_IGNORE # cdef int rank = MPI_UNDEFINED CHKERR( MPI_Comm_rank(self.ob_mpi, &rank) ) cdef tmp1, tmp2, tmp3, tmp4, tmp5 if root == rank: tmp1 = asarray_cmds(command, &count, &cmds) tmp2 = asarray_argvs(args, count, &argvs) tmp3 = asarray_nprocs(maxprocs, count, &imaxprocs) tmp4 = asarray_Info(info, count, &infos) cdef int i=0, np=0 if errcodes is not None: if root != rank: count = len(maxprocs) tmp3 = asarray_nprocs(maxprocs, count, &imaxprocs) for i from 0 <= i < count: np += imaxprocs[i] tmp5 = newarray(np, &ierrcodes) # cdef Intercomm comm = Intercomm.__new__(Intercomm) with nogil: CHKERR( MPI_Comm_spawn_multiple( count, cmds, argvs, imaxprocs, infos, root, self.ob_mpi, &comm.ob_mpi, ierrcodes) ) # cdef int j=0, p=0, q=0 if errcodes is not None: errcodes[:] = [[] for j from 0 <= j < count] for i from 0 <= i < count: q = p + imaxprocs[i] errcodes[i][:] = [ierrcodes[j] for j from p <= j < q] p = q # comm_set_eh(comm.ob_mpi) return comm # Server Routines def Accept( self, port_name: str, Info info: Info = INFO_NULL, int root: int = 0, ) -> Intercomm: """ Accept a request to form a new intercommunicator """ cdef char *cportname = NULL cdef int rank = MPI_UNDEFINED CHKERR( MPI_Comm_rank(self.ob_mpi, &rank) ) if root == rank: port_name = asmpistr(port_name, &cportname) cdef Intercomm comm = Intercomm.__new__(Intercomm) with nogil: CHKERR( MPI_Comm_accept( cportname, info.ob_mpi, root, self.ob_mpi, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm # Client Routines def Connect( self, port_name: str, Info info: Info = INFO_NULL, int root: int = 0, ) -> Intercomm: """ Make a request to form a new intercommunicator """ cdef char *cportname = NULL cdef int rank = MPI_UNDEFINED CHKERR( MPI_Comm_rank(self.ob_mpi, &rank) ) if root == rank: port_name = asmpistr(port_name, &cportname) cdef Intercomm comm = Intercomm.__new__(Intercomm) with nogil: CHKERR( MPI_Comm_connect( cportname, info.ob_mpi, root, self.ob_mpi, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm cdef class Topocomm(Intracomm): """ Topology intracommunicator """ def __cinit__(self, Comm comm: Optional[Comm] = None): if self.ob_mpi == MPI_COMM_NULL: return cdef int topo = MPI_UNDEFINED CHKERR( MPI_Topo_test(self.ob_mpi, &topo) ) if topo == MPI_UNDEFINED: raise TypeError( "expecting a topology communicator") property degrees: "number of incoming and outgoing neighbors" def __get__(self) -> Tuple[int, int]: cdef object dim, rank cdef object nneighbors if isinstance(self, Cartcomm): dim = self.Get_dim() return (2*dim, 2*dim) if isinstance(self, Graphcomm): rank = self.Get_rank() nneighbors = self.Get_neighbors_count(rank) return (nneighbors, nneighbors) if isinstance(self, Distgraphcomm): nneighbors = self.Get_dist_neighbors_count()[:2] return nneighbors raise TypeError("Not a topology communicator") property indegree: "number of incoming neighbors" def __get__(self) -> int: return self.degrees[0] property outdegree: "number of outgoing neighbors" def __get__(self) -> int: return self.degrees[1] property inoutedges: "incoming and outgoing neighbors" def __get__(self) -> Tuple[List[int], List[int]]: cdef object direction, source, dest, rank cdef object neighbors if isinstance(self, Cartcomm): neighbors = [] for direction in range(self.Get_dim()): source, dest = self.Shift(direction, 1) neighbors.append(source) neighbors.append(dest) return (neighbors, neighbors) if isinstance(self, Graphcomm): rank = self.Get_rank() neighbors = self.Get_neighbors(rank) return (neighbors, neighbors) if isinstance(self, Distgraphcomm): neighbors = self.Get_dist_neighbors()[:2] return neighbors raise TypeError("Not a topology communicator") property inedges: "incoming neighbors" def __get__(self) -> List[int]: return self.inoutedges[0] property outedges: "outgoing neighbors" def __get__(self) -> List[int]: return self.inoutedges[1] # Neighborhood Collectives # ------------------------ def Neighbor_allgather( self, sendbuf: BufSpec, recvbuf: BufSpecB, ) -> None: """ Neighbor Gather to All """ cdef _p_msg_cco m = message_cco() m.for_neighbor_allgather(0, sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Neighbor_allgather( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, self.ob_mpi) ) def Neighbor_allgatherv( self, sendbuf: BufSpec, recvbuf: BufSpecV, ) -> None: """ Neighbor Gather to All Vector """ cdef _p_msg_cco m = message_cco() m.for_neighbor_allgather(1, sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Neighbor_allgatherv( m.sbuf, m.scount, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, self.ob_mpi) ) def Neighbor_alltoall( self, sendbuf: BufSpecB, recvbuf: BufSpecB, ) -> None: """ Neighbor All-to-All """ cdef _p_msg_cco m = message_cco() m.for_neighbor_alltoall(0, sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Neighbor_alltoall( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, self.ob_mpi) ) def Neighbor_alltoallv( self, sendbuf: BufSpecV, recvbuf: BufSpecV, ) -> None: """ Neighbor All-to-All Vector """ cdef _p_msg_cco m = message_cco() m.for_neighbor_alltoall(1, sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Neighbor_alltoallv( m.sbuf, m.scounts, m.sdispls, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, self.ob_mpi) ) def Neighbor_alltoallw( self, sendbuf: BufSpecW, recvbuf: BufSpecW, ) -> None: """ Neighbor All-to-All Generalized """ cdef _p_msg_ccow m = message_ccow() m.for_neighbor_alltoallw(sendbuf, recvbuf, self.ob_mpi) with nogil: CHKERR( MPI_Neighbor_alltoallw( m.sbuf, m.scounts, m.sdisplsA, m.stypes, m.rbuf, m.rcounts, m.rdisplsA, m.rtypes, self.ob_mpi) ) # Nonblocking Neighborhood Collectives # ------------------------------------ def Ineighbor_allgather( self, sendbuf: BufSpec, recvbuf: BufSpecB, ) -> Request: """ Nonblocking Neighbor Gather to All """ cdef _p_msg_cco m = message_cco() m.for_neighbor_allgather(0, sendbuf, recvbuf, self.ob_mpi) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Ineighbor_allgather( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Ineighbor_allgatherv( self, sendbuf: BufSpec, recvbuf: BufSpecV, ) -> Request: """ Nonblocking Neighbor Gather to All Vector """ cdef _p_msg_cco m = message_cco() m.for_neighbor_allgather(1, sendbuf, recvbuf, self.ob_mpi) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Ineighbor_allgatherv( m.sbuf, m.scount, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Ineighbor_alltoall( self, sendbuf: BufSpecB, recvbuf: BufSpecB, ) -> Request: """ Nonblocking Neighbor All-to-All """ cdef _p_msg_cco m = message_cco() m.for_neighbor_alltoall(0, sendbuf, recvbuf, self.ob_mpi) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Ineighbor_alltoall( m.sbuf, m.scount, m.stype, m.rbuf, m.rcount, m.rtype, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Ineighbor_alltoallv( self, sendbuf: BufSpecV, recvbuf: BufSpecV, ) -> Request: """ Nonblocking Neighbor All-to-All Vector """ cdef _p_msg_cco m = message_cco() m.for_neighbor_alltoall(1, sendbuf, recvbuf, self.ob_mpi) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Ineighbor_alltoallv( m.sbuf, m.scounts, m.sdispls, m.stype, m.rbuf, m.rcounts, m.rdispls, m.rtype, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request def Ineighbor_alltoallw( self, sendbuf: BufSpecW, recvbuf: BufSpecW, ) -> Request: """ Nonblocking Neighbor All-to-All Generalized """ cdef _p_msg_ccow m = message_ccow() m.for_neighbor_alltoallw(sendbuf, recvbuf, self.ob_mpi) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Ineighbor_alltoallw( m.sbuf, m.scounts, m.sdisplsA, m.stypes, m.rbuf, m.rcounts, m.rdisplsA, m.rtypes, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = m return request # Python Communication # def neighbor_allgather(self, sendobj: Any) -> List[Any]: """Neighbor Gather to All""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_neighbor_allgather(sendobj, comm) # def neighbor_alltoall(self, sendobj: List[Any]) -> List[Any]: """Neighbor All to All Scatter/Gather""" cdef MPI_Comm comm = self.ob_mpi return PyMPI_neighbor_alltoall(sendobj, comm) cdef class Cartcomm(Topocomm): """ Cartesian topology intracommunicator """ def __cinit__(self, Comm comm: Optional[Comm] = None): if self.ob_mpi == MPI_COMM_NULL: return cdef int topo = MPI_UNDEFINED CHKERR( MPI_Topo_test(self.ob_mpi, &topo) ) if topo != MPI_CART: raise TypeError( "expecting a Cartesian communicator") # Cartesian Inquiry Functions # --------------------------- def Get_dim(self) -> int: """ Return number of dimensions """ cdef int dim = 0 CHKERR( MPI_Cartdim_get(self.ob_mpi, &dim) ) return dim property dim: """number of dimensions""" def __get__(self) -> int: return self.Get_dim() property ndim: """number of dimensions""" def __get__(self) -> int: return self.Get_dim() def Get_topo(self) -> Tuple[List[int], List[int], List[int]]: """ Return information on the cartesian topology """ cdef int ndim = 0 CHKERR( MPI_Cartdim_get(self.ob_mpi, &ndim) ) cdef int *idims = NULL cdef tmp1 = newarray(ndim, &idims) cdef int *iperiods = NULL cdef tmp2 = newarray(ndim, &iperiods) cdef int *icoords = NULL cdef tmp3 = newarray(ndim, &icoords) CHKERR( MPI_Cart_get(self.ob_mpi, ndim, idims, iperiods, icoords) ) cdef int i = 0 cdef object dims = [idims[i] for i from 0 <= i < ndim] cdef object periods = [iperiods[i] for i from 0 <= i < ndim] cdef object coords = [icoords[i] for i from 0 <= i < ndim] return (dims, periods, coords) property topo: """topology information""" def __get__(self) -> Tuple[List[int], List[int], List[int]]: return self.Get_topo() property dims: """dimensions""" def __get__(self) -> List[int]: return self.Get_topo()[0] property periods: """periodicity""" def __get__(self) -> List[int]: return self.Get_topo()[1] property coords: """coordinates""" def __get__(self) -> List[int]: return self.Get_topo()[2] # Cartesian Translator Functions # ------------------------------ def Get_cart_rank(self, coords: Sequence[int]) -> int: """ Translate logical coordinates to ranks """ cdef int ndim = 0, *icoords = NULL CHKERR( MPI_Cartdim_get( self.ob_mpi, &ndim) ) coords = chkarray(coords, ndim, &icoords) cdef int rank = MPI_PROC_NULL CHKERR( MPI_Cart_rank(self.ob_mpi, icoords, &rank) ) return rank def Get_coords(self, int rank: int) -> List[int]: """ Translate ranks to logical coordinates """ cdef int i = 0, ndim = 0, *icoords = NULL CHKERR( MPI_Cartdim_get(self.ob_mpi, &ndim) ) cdef tmp = newarray(ndim, &icoords) CHKERR( MPI_Cart_coords(self.ob_mpi, rank, ndim, icoords) ) cdef object coords = [icoords[i] for i from 0 <= i < ndim] return coords # Cartesian Shift Function # ------------------------ def Shift(self, int direction: int, int disp: int) -> Tuple[int, int]: """ Return a tuple (source, dest) of process ranks for data shifting with Comm.Sendrecv() """ cdef int source = MPI_PROC_NULL, dest = MPI_PROC_NULL CHKERR( MPI_Cart_shift(self.ob_mpi, direction, disp, &source, &dest) ) return (source, dest) # Cartesian Partition Function # ---------------------------- def Sub(self, remain_dims: Sequence[bool]) -> Cartcomm: """ Return cartesian communicators that form lower-dimensional subgrids """ cdef int ndim = 0, *iremdims = NULL CHKERR( MPI_Cartdim_get(self.ob_mpi, &ndim) ) remain_dims = chkarray(remain_dims, ndim, &iremdims) cdef Cartcomm comm = Cartcomm.__new__(Cartcomm) with nogil: CHKERR( MPI_Cart_sub(self.ob_mpi, iremdims, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm # Cartesian Convenience Function def Compute_dims(int nnodes: int, dims: Union[int, Sequence[int]]) -> List[int]: """ Return a balanced distribution of processes per coordinate direction """ cdef int i = 0, ndims = 0, *idims = NULL try: ndims = len(dims) except: ndims = dims dims = [0] * ndims cdef tmp = chkarray(dims, ndims, &idims) CHKERR( MPI_Dims_create(nnodes, ndims, idims) ) dims = [idims[i] for i from 0 <= i < ndims] return dims cdef class Graphcomm(Topocomm): """ General graph topology intracommunicator """ def __cinit__(self, Comm comm: Optional[Comm] = None): if self.ob_mpi == MPI_COMM_NULL: return cdef int topo = MPI_UNDEFINED CHKERR( MPI_Topo_test(self.ob_mpi, &topo) ) if topo != MPI_GRAPH: raise TypeError( "expecting a general graph communicator") # Graph Inquiry Functions # ----------------------- def Get_dims(self) -> Tuple[int, int]: """ Return the number of nodes and edges """ cdef int nnodes = 0, nedges = 0 CHKERR( MPI_Graphdims_get(self.ob_mpi, &nnodes, &nedges) ) return (nnodes, nedges) property dims: """number of nodes and edges""" def __get__(self) -> Tuple[int, int]: return self.Get_dims() property nnodes: """number of nodes""" def __get__(self) -> int: return self.Get_dims()[0] property nedges: """number of edges""" def __get__(self) -> int: return self.Get_dims()[1] def Get_topo(self) -> Tuple[List[int], List[int]]: """ Return index and edges """ cdef int nindex = 0, nedges = 0 CHKERR( MPI_Graphdims_get( self.ob_mpi, &nindex, &nedges) ) cdef int *iindex = NULL cdef tmp1 = newarray(nindex, &iindex) cdef int *iedges = NULL cdef tmp2 = newarray(nedges, &iedges) CHKERR( MPI_Graph_get(self.ob_mpi, nindex, nedges, iindex, iedges) ) cdef int i = 0 cdef object index = [iindex[i] for i from 0 <= i < nindex] cdef object edges = [iedges[i] for i from 0 <= i < nedges] return (index, edges) property topo: """topology information""" def __get__(self) -> Tuple[List[int], List[int]]: return self.Get_topo() property index: """index""" def __get__(self) -> List[int]: return self.Get_topo()[0] property edges: """edges""" def __get__(self) -> List[int]: return self.Get_topo()[1] # Graph Information Functions # --------------------------- def Get_neighbors_count(self, int rank: int) -> int: """ Return number of neighbors of a process """ cdef int nneighbors = 0 CHKERR( MPI_Graph_neighbors_count(self.ob_mpi, rank, &nneighbors) ) return nneighbors property nneighbors: """number of neighbors""" def __get__(self) -> int: cdef int rank = self.Get_rank() return self.Get_neighbors_count(rank) def Get_neighbors(self, int rank: int) -> List[int]: """ Return list of neighbors of a process """ cdef int i = 0, nneighbors = 0, *ineighbors = NULL CHKERR( MPI_Graph_neighbors_count( self.ob_mpi, rank, &nneighbors) ) cdef tmp = newarray(nneighbors, &ineighbors) CHKERR( MPI_Graph_neighbors( self.ob_mpi, rank, nneighbors, ineighbors) ) cdef object neighbors = [ineighbors[i] for i from 0 <= i < nneighbors] return neighbors property neighbors: """neighbors""" def __get__(self) -> List[int]: cdef int rank = self.Get_rank() return self.Get_neighbors(rank) cdef class Distgraphcomm(Topocomm): """ Distributed graph topology intracommunicator """ def __cinit__(self, Comm comm: Optional[Comm] = None): if self.ob_mpi == MPI_COMM_NULL: return cdef int topo = MPI_UNDEFINED CHKERR( MPI_Topo_test(self.ob_mpi, &topo) ) if topo != MPI_DIST_GRAPH: raise TypeError( "expecting a distributed graph communicator") # Topology Inquiry Functions # -------------------------- def Get_dist_neighbors_count(self) -> int: """ Return adjacency information for a distributed graph topology """ cdef int indegree = 0 cdef int outdegree = 0 cdef int weighted = 0 CHKERR( MPI_Dist_graph_neighbors_count( self.ob_mpi, &indegree, &outdegree, &weighted) ) return (indegree, outdegree, weighted) def Get_dist_neighbors(self) \ -> Tuple[List[int], List[int], Optional[Tuple[List[int], List[int]]]]: """ Return adjacency information for a distributed graph topology """ cdef int maxindegree = 0, maxoutdegree = 0, weighted = 0 CHKERR( MPI_Dist_graph_neighbors_count( self.ob_mpi, &maxindegree, &maxoutdegree, &weighted) ) # cdef int *sources = NULL, *destinations = NULL cdef int *sourceweights = MPI_UNWEIGHTED cdef int *destweights = MPI_UNWEIGHTED cdef tmp1, tmp2, tmp3, tmp4 tmp1 = newarray(maxindegree, &sources) tmp2 = newarray(maxoutdegree, &destinations) cdef int i = 0 if weighted: tmp3 = newarray(maxindegree, &sourceweights) for i from 0 <= i < maxindegree: sourceweights[i] = 1 tmp4 = newarray(maxoutdegree, &destweights) for i from 0 <= i < maxoutdegree: destweights[i] = 1 # CHKERR( MPI_Dist_graph_neighbors( self.ob_mpi, maxindegree, sources, sourceweights, maxoutdegree, destinations, destweights) ) # cdef object src = [sources[i] for i from 0 <= i < maxindegree] cdef object dst = [destinations[i] for i from 0 <= i < maxoutdegree] if not weighted: return (src, dst, None) # cdef object sw = [sourceweights[i] for i from 0 <= i < maxindegree] cdef object dw = [destweights[i] for i from 0 <= i < maxoutdegree] return (src, dst, (sw, dw)) cdef class Intercomm(Comm): """ Intercommunicator """ def __cinit__(self, Comm comm: Optional[Comm] = None): if self.ob_mpi == MPI_COMM_NULL: return cdef int inter = 0 CHKERR( MPI_Comm_test_inter(self.ob_mpi, &inter) ) if not inter: raise TypeError( "expecting an intercommunicator") # Intercommunicator Accessors # --------------------------- def Get_remote_group(self) -> Group: """ Access the remote group associated with the inter-communicator """ cdef Group group = Group.__new__(Group) with nogil: CHKERR( MPI_Comm_remote_group( self.ob_mpi, &group.ob_mpi) ) return group property remote_group: """remote group""" def __get__(self) -> Group: return self.Get_remote_group() def Get_remote_size(self) -> int: """ Intercommunicator remote size """ cdef int size = -1 CHKERR( MPI_Comm_remote_size(self.ob_mpi, &size) ) return size property remote_size: """number of remote processes""" def __get__(self) -> int: return self.Get_remote_size() # Communicator Constructors # ------------------------- def Merge(self, bint high: bool = False) -> Intracomm: """ Merge intercommunicator """ cdef Intracomm comm = Intracomm.__new__(Intracomm) with nogil: CHKERR( MPI_Intercomm_merge( self.ob_mpi, high, &comm.ob_mpi) ) comm_set_eh(comm.ob_mpi) return comm cdef Comm __COMM_NULL__ = new_Comm ( MPI_COMM_NULL ) cdef Intracomm __COMM_SELF__ = new_Intracomm ( MPI_COMM_SELF ) cdef Intracomm __COMM_WORLD__ = new_Intracomm ( MPI_COMM_WORLD ) cdef Intercomm __COMM_PARENT__ = new_Intercomm ( MPI_COMM_NULL ) # Predefined communicators # ------------------------ COMM_NULL = __COMM_NULL__ #: Null communicator handle COMM_SELF = __COMM_SELF__ #: Self communicator handle COMM_WORLD = __COMM_WORLD__ #: World communicator handle # Buffer Allocation and Usage # --------------------------- BSEND_OVERHEAD = MPI_BSEND_OVERHEAD #: Upper bound of memory overhead for sending in buffered mode def Attach_buffer(buf: Buffer) -> None: """ Attach a user-provided buffer for sending in buffered mode """ cdef void *base = NULL cdef int size = 0 attach_buffer(buf, &base, &size) with nogil: CHKERR( MPI_Buffer_attach(base, size) ) def Detach_buffer() -> Buffer: """ Remove an existing attached buffer """ cdef void *base = NULL cdef int size = 0 with nogil: CHKERR( MPI_Buffer_detach(&base, &size) ) return detach_buffer(base, size) # -------------------------------------------------------------------- # Process Creation and Management # -------------------------------------------------------------------- # Server Routines # --------------- def Open_port(Info info: Info = INFO_NULL) -> str: """ Return an address that can be used to establish connections between groups of MPI processes """ cdef char cportname[MPI_MAX_PORT_NAME+1] cportname[0] = 0 # just in case with nogil: CHKERR( MPI_Open_port(info.ob_mpi, cportname) ) cportname[MPI_MAX_PORT_NAME] = 0 # just in case return mpistr(cportname) def Close_port(port_name: str) -> None: """ Close a port """ cdef char *cportname = NULL port_name = asmpistr(port_name, &cportname) with nogil: CHKERR( MPI_Close_port(cportname) ) # Name Publishing # --------------- def Publish_name( service_name: str, port_name: str, info: Info = INFO_NULL, ) -> None: """ Publish a service name """ if isinstance(port_name, Info): # backward compatibility port_name, info = info, port_name cdef char *csrvcname = NULL service_name = asmpistr(service_name, &csrvcname) cdef char *cportname = NULL port_name = asmpistr(port_name, &cportname) cdef MPI_Info cinfo = arg_Info(info) with nogil: CHKERR( MPI_Publish_name(csrvcname, cinfo, cportname) ) def Unpublish_name( service_name: str, port_name: str, info: Info = INFO_NULL, ) -> None: """ Unpublish a service name """ if isinstance(port_name, Info): # backward compatibility port_name, info = info, port_name cdef char *csrvcname = NULL service_name = asmpistr(service_name, &csrvcname) cdef char *cportname = NULL port_name = asmpistr(port_name, &cportname) cdef MPI_Info cinfo = arg_Info(info) with nogil: CHKERR( MPI_Unpublish_name(csrvcname, cinfo, cportname) ) def Lookup_name( service_name: str, info: Info = INFO_NULL, ) -> str: """ Lookup a port name given a service name """ cdef char *csrvcname = NULL service_name = asmpistr(service_name, &csrvcname) cdef MPI_Info cinfo = arg_Info(info) cdef char cportname[MPI_MAX_PORT_NAME+1] cportname[0] = 0 # just in case with nogil: CHKERR( MPI_Lookup_name(csrvcname, cinfo, cportname) ) cportname[MPI_MAX_PORT_NAME] = 0 # just in case return mpistr(cportname) mpi4py-3.1.6/src/mpi4py/MPI/Datatype.pyx000066400000000000000000001250401460670727200177330ustar00rootroot00000000000000# Storage order for arrays # ------------------------ ORDER_C = MPI_ORDER_C #: C order (a.k.a. row major) ORDER_FORTRAN = MPI_ORDER_FORTRAN #: Fortran order (a.k.a. column major) ORDER_F = MPI_ORDER_FORTRAN #: Convenience alias for ORDER_FORTRAN # Type classes for Fortran datatype matching # ------------------------------------------ TYPECLASS_INTEGER = MPI_TYPECLASS_INTEGER TYPECLASS_REAL = MPI_TYPECLASS_REAL TYPECLASS_COMPLEX = MPI_TYPECLASS_COMPLEX # Type of distributions (HPF-like arrays) # --------------------------------------- DISTRIBUTE_NONE = MPI_DISTRIBUTE_NONE #: Dimension not distributed DISTRIBUTE_BLOCK = MPI_DISTRIBUTE_BLOCK #: Block distribution DISTRIBUTE_CYCLIC = MPI_DISTRIBUTE_CYCLIC #: Cyclic distribution DISTRIBUTE_DFLT_DARG = MPI_DISTRIBUTE_DFLT_DARG #: Default distribution # Combiner values for datatype decoding # ------------------------------------- COMBINER_NAMED = MPI_COMBINER_NAMED COMBINER_DUP = MPI_COMBINER_DUP COMBINER_CONTIGUOUS = MPI_COMBINER_CONTIGUOUS COMBINER_VECTOR = MPI_COMBINER_VECTOR COMBINER_HVECTOR = MPI_COMBINER_HVECTOR COMBINER_INDEXED = MPI_COMBINER_INDEXED COMBINER_HINDEXED = MPI_COMBINER_HINDEXED COMBINER_INDEXED_BLOCK = MPI_COMBINER_INDEXED_BLOCK COMBINER_HINDEXED_BLOCK = MPI_COMBINER_HINDEXED_BLOCK COMBINER_STRUCT = MPI_COMBINER_STRUCT COMBINER_SUBARRAY = MPI_COMBINER_SUBARRAY COMBINER_DARRAY = MPI_COMBINER_DARRAY COMBINER_RESIZED = MPI_COMBINER_RESIZED COMBINER_F90_REAL = MPI_COMBINER_F90_REAL COMBINER_F90_COMPLEX = MPI_COMBINER_F90_COMPLEX COMBINER_F90_INTEGER = MPI_COMBINER_F90_INTEGER cdef class Datatype: """ Datatype object """ def __cinit__(self, Datatype datatype: Optional[Datatype] = None): self.ob_mpi = MPI_DATATYPE_NULL if datatype is None: return self.ob_mpi = datatype.ob_mpi def __dealloc__(self): if not (self.flags & PyMPI_OWNED): return CHKERR( del_Datatype(&self.ob_mpi) ) def __richcmp__(self, other, int op): if not isinstance(other, Datatype): return NotImplemented cdef Datatype s = self, o = other if op == Py_EQ: return (s.ob_mpi == o.ob_mpi) elif op == Py_NE: return (s.ob_mpi != o.ob_mpi) cdef mod = type(self).__module__ cdef cls = type(self).__name__ raise TypeError("unorderable type: '%s.%s'" % (mod, cls)) def __bool__(self) -> bool: return self.ob_mpi != MPI_DATATYPE_NULL # Datatype Accessors # ------------------ def Get_size(self) -> int: """ Return the number of bytes occupied by entries in the datatype """ cdef MPI_Count size = 0 CHKERR( MPI_Type_size_x(self.ob_mpi, &size) ) return size property size: """size (in bytes)""" def __get__(self) -> int: cdef MPI_Count size = 0 CHKERR( MPI_Type_size_x(self.ob_mpi, &size) ) return size def Get_extent(self) -> Tuple[int, int]: """ Return lower bound and extent of datatype """ cdef MPI_Count lb = 0, extent = 0 CHKERR( MPI_Type_get_extent_x(self.ob_mpi, &lb, &extent) ) return (lb, extent) property extent: """extent""" def __get__(self) -> int: cdef MPI_Count lb = 0, extent = 0 CHKERR( MPI_Type_get_extent_x(self.ob_mpi, &lb, &extent) ) return extent property lb: """lower bound""" def __get__(self) -> int: cdef MPI_Count lb = 0, extent = 0 CHKERR( MPI_Type_get_extent_x(self.ob_mpi, &lb, &extent) ) return lb property ub: """upper bound""" def __get__(self) -> int: cdef MPI_Count lb = 0, extent = 0 CHKERR( MPI_Type_get_extent_x(self.ob_mpi, &lb, &extent) ) return lb + extent # Datatype Constructors # --------------------- def Dup(self) -> Datatype: """ Duplicate a datatype """ cdef Datatype datatype = Datatype.__new__(Datatype) CHKERR( MPI_Type_dup(self.ob_mpi, &datatype.ob_mpi) ) return datatype Create_dup = Dup #: convenience alias def Create_contiguous(self, int count: int) -> Datatype: """ Create a contiguous datatype """ cdef Datatype datatype = Datatype.__new__(Datatype) CHKERR( MPI_Type_contiguous(count, self.ob_mpi, &datatype.ob_mpi) ) return datatype def Create_vector( self, int count: int, int blocklength: int, int stride: int, ) -> Datatype: """ Create a vector (strided) datatype """ cdef Datatype datatype = Datatype.__new__(Datatype) CHKERR( MPI_Type_vector(count, blocklength, stride, self.ob_mpi, &datatype.ob_mpi) ) return datatype def Create_hvector( self, int count: int, int blocklength: int, Aint stride: int, ) -> Datatype: """ Create a vector (strided) datatype """ cdef Datatype datatype = Datatype.__new__(Datatype) CHKERR( MPI_Type_create_hvector(count, blocklength, stride, self.ob_mpi, &datatype.ob_mpi) ) return datatype def Create_indexed( self, blocklengths: Sequence[int], displacements: Sequence[int], ) -> Datatype: """ Create an indexed datatype """ cdef int count = 0, *iblen = NULL, *idisp = NULL blocklengths = getarray(blocklengths, &count, &iblen) displacements = chkarray(displacements, count, &idisp) # cdef Datatype datatype = Datatype.__new__(Datatype) CHKERR( MPI_Type_indexed(count, iblen, idisp, self.ob_mpi, &datatype.ob_mpi) ) return datatype def Create_hindexed( self, blocklengths: Sequence[int], displacements: Sequence[int], ) -> Datatype: """ Create an indexed datatype with displacements in bytes """ cdef int count = 0, *iblen = NULL blocklengths = getarray(blocklengths, &count, &iblen) cdef MPI_Aint *idisp = NULL displacements = chkarray(displacements, count, &idisp) # cdef Datatype datatype = Datatype.__new__(Datatype) CHKERR( MPI_Type_create_hindexed(count, iblen, idisp, self.ob_mpi, &datatype.ob_mpi) ) return datatype def Create_indexed_block( self, int blocklength: int, displacements: Sequence[int], ) -> Datatype: """ Create an indexed datatype with constant-sized blocks """ cdef int count = 0, *idisp = NULL displacements = getarray(displacements, &count, &idisp) # cdef Datatype datatype = Datatype.__new__(Datatype) CHKERR( MPI_Type_create_indexed_block(count, blocklength, idisp, self.ob_mpi, &datatype.ob_mpi) ) return datatype def Create_hindexed_block( self, int blocklength: int, displacements: Sequence[int], ) -> Datatype: """ Create an indexed datatype with constant-sized blocks and displacements in bytes """ cdef int count = 0 cdef MPI_Aint *idisp = NULL displacements = getarray(displacements, &count, &idisp) # cdef Datatype datatype = Datatype.__new__(Datatype) CHKERR( MPI_Type_create_hindexed_block(count, blocklength, idisp, self.ob_mpi, &datatype.ob_mpi) ) return datatype @classmethod def Create_struct( cls, blocklengths: Sequence[int], displacements: Sequence[int], datatypes: Sequence[Datatype], ) -> Datatype: """ Create an datatype from a general set of block sizes, displacements and datatypes """ cdef int count = 0, *iblen = NULL blocklengths = getarray(blocklengths, &count, &iblen) cdef MPI_Aint *idisp = NULL displacements = chkarray(displacements, count, &idisp) cdef MPI_Datatype *ptype = NULL datatypes = asarray_Datatype(datatypes, count, &ptype) # cdef Datatype datatype = Datatype.__new__(Datatype) CHKERR( MPI_Type_create_struct(count, iblen, idisp, ptype, &datatype.ob_mpi) ) return datatype # Subarray Datatype Constructor # ----------------------------- def Create_subarray( self, sizes: Sequence[int], subsizes: Sequence[int], starts: Sequence[int], int order: int = ORDER_C, ) -> Datatype: """ Create a datatype for a subarray of a regular, multidimensional array """ cdef int ndims = 0, *isizes = NULL cdef int *isubsizes = NULL, *istarts = NULL sizes = getarray(sizes, &ndims, &isizes ) subsizes = chkarray(subsizes, ndims, &isubsizes) starts = chkarray(starts, ndims, &istarts ) # cdef Datatype datatype = Datatype.__new__(Datatype) CHKERR( MPI_Type_create_subarray(ndims, isizes, isubsizes, istarts, order, self.ob_mpi, &datatype.ob_mpi) ) return datatype # Distributed Array Datatype Constructor # -------------------------------------- def Create_darray( self, int size: int, int rank: int, gsizes: Sequence[int], distribs: Sequence[int], dargs: Sequence[int], psizes: Sequence[int], int order: int = ORDER_C, ) -> Datatype: """ Create a datatype representing an HPF-like distributed array on Cartesian process grids """ cdef int ndims = 0, *igsizes = NULL cdef int *idistribs = NULL, *idargs = NULL, *ipsizes = NULL gsizes = getarray(gsizes, &ndims, &igsizes ) distribs = chkarray(distribs, ndims, &idistribs ) dargs = chkarray(dargs, ndims, &idargs ) psizes = chkarray(psizes, ndims, &ipsizes ) # cdef Datatype datatype = Datatype.__new__(Datatype) CHKERR( MPI_Type_create_darray(size, rank, ndims, igsizes, idistribs, idargs, ipsizes, order, self.ob_mpi, &datatype.ob_mpi) ) return datatype # Parametrized and size-specific Fortran Datatypes # ------------------------------------------------ @classmethod def Create_f90_integer(cls, int r: int) -> Datatype: """ Return a bounded integer datatype """ cdef Datatype datatype = Datatype.__new__(Datatype) CHKERR( MPI_Type_create_f90_integer(r, &datatype.ob_mpi) ) return datatype @classmethod def Create_f90_real(cls, int p: int, int r: int) -> Datatype: """ Return a bounded real datatype """ cdef Datatype datatype = Datatype.__new__(Datatype) CHKERR( MPI_Type_create_f90_real(p, r, &datatype.ob_mpi) ) return datatype @classmethod def Create_f90_complex(cls, int p: int, int r: int) -> Datatype: """ Return a bounded complex datatype """ cdef Datatype datatype = Datatype.__new__(Datatype) CHKERR( MPI_Type_create_f90_complex(p, r, &datatype.ob_mpi) ) return datatype @classmethod def Match_size(cls, int typeclass: int, int size: int) -> Datatype: """ Find a datatype matching a specified size in bytes """ cdef Datatype datatype = Datatype.__new__(Datatype) CHKERR( MPI_Type_match_size(typeclass, size, &datatype.ob_mpi) ) return datatype # Use of Derived Datatypes # ------------------------ def Commit(self) -> Datatype: """ Commit the datatype """ CHKERR( MPI_Type_commit(&self.ob_mpi) ) return self def Free(self) -> None: """ Free the datatype """ CHKERR( MPI_Type_free(&self.ob_mpi) ) cdef Datatype t = self cdef MPI_Datatype p = MPI_DATATYPE_NULL if t is __UB__ : p = MPI_UB elif t is __LB__ : p = MPI_LB elif t is __PACKED__ : p = MPI_PACKED elif t is __BYTE__ : p = MPI_BYTE elif t is __AINT__ : p = MPI_AINT elif t is __OFFSET__ : p = MPI_OFFSET elif t is __COUNT__ : p = MPI_COUNT elif t is __CHAR__ : p = MPI_CHAR elif t is __WCHAR__ : p = MPI_WCHAR elif t is __SIGNED_CHAR__ : p = MPI_SIGNED_CHAR elif t is __SHORT__ : p = MPI_SHORT elif t is __INT__ : p = MPI_INT elif t is __LONG__ : p = MPI_LONG elif t is __LONG_LONG__ : p = MPI_LONG_LONG elif t is __UNSIGNED_CHAR__ : p = MPI_UNSIGNED_CHAR elif t is __UNSIGNED_SHORT__ : p = MPI_UNSIGNED_SHORT elif t is __UNSIGNED__ : p = MPI_UNSIGNED elif t is __UNSIGNED_LONG__ : p = MPI_UNSIGNED_LONG elif t is __UNSIGNED_LONG_LONG__ : p = MPI_UNSIGNED_LONG_LONG elif t is __FLOAT__ : p = MPI_FLOAT elif t is __DOUBLE__ : p = MPI_DOUBLE elif t is __LONG_DOUBLE__ : p = MPI_LONG_DOUBLE elif t is __C_BOOL__ : p = MPI_C_BOOL elif t is __INT8_T__ : p = MPI_INT8_T elif t is __INT16_T__ : p = MPI_INT16_T elif t is __INT32_T__ : p = MPI_INT32_T elif t is __INT64_T__ : p = MPI_INT64_T elif t is __UINT8_T__ : p = MPI_UINT8_T elif t is __UINT16_T__ : p = MPI_UINT16_T elif t is __UINT32_T__ : p = MPI_UINT32_T elif t is __UINT64_T__ : p = MPI_UINT64_T elif t is __C_COMPLEX__ : p = MPI_C_COMPLEX elif t is __C_FLOAT_COMPLEX__ : p = MPI_C_FLOAT_COMPLEX elif t is __C_DOUBLE_COMPLEX__ : p = MPI_C_DOUBLE_COMPLEX elif t is __C_LONG_DOUBLE_COMPLEX__ : p = MPI_C_LONG_DOUBLE_COMPLEX elif t is __CXX_BOOL__ : p = MPI_CXX_BOOL elif t is __CXX_FLOAT_COMPLEX__ : p = MPI_CXX_FLOAT_COMPLEX elif t is __CXX_DOUBLE_COMPLEX__ : p = MPI_CXX_DOUBLE_COMPLEX elif t is __CXX_LONG_DOUBLE_COMPLEX__: p = MPI_CXX_LONG_DOUBLE_COMPLEX elif t is __SHORT_INT__ : p = MPI_SHORT_INT elif t is __TWOINT__ : p = MPI_2INT elif t is __LONG_INT__ : p = MPI_LONG_INT elif t is __FLOAT_INT__ : p = MPI_FLOAT_INT elif t is __DOUBLE_INT__ : p = MPI_DOUBLE_INT elif t is __LONG_DOUBLE_INT__ : p = MPI_LONG_DOUBLE_INT elif t is __CHARACTER__ : p = MPI_CHARACTER elif t is __LOGICAL__ : p = MPI_LOGICAL elif t is __INTEGER__ : p = MPI_INTEGER elif t is __REAL__ : p = MPI_REAL elif t is __DOUBLE_PRECISION__ : p = MPI_DOUBLE_PRECISION elif t is __COMPLEX__ : p = MPI_COMPLEX elif t is __DOUBLE_COMPLEX__ : p = MPI_DOUBLE_COMPLEX elif t is __LOGICAL1__ : p = MPI_LOGICAL1 elif t is __LOGICAL2__ : p = MPI_LOGICAL2 elif t is __LOGICAL4__ : p = MPI_LOGICAL4 elif t is __LOGICAL8__ : p = MPI_LOGICAL8 elif t is __INTEGER1__ : p = MPI_INTEGER1 elif t is __INTEGER2__ : p = MPI_INTEGER2 elif t is __INTEGER4__ : p = MPI_INTEGER4 elif t is __INTEGER8__ : p = MPI_INTEGER8 elif t is __INTEGER16__ : p = MPI_INTEGER16 elif t is __REAL2__ : p = MPI_REAL2 elif t is __REAL4__ : p = MPI_REAL4 elif t is __REAL8__ : p = MPI_REAL8 elif t is __REAL16__ : p = MPI_REAL16 elif t is __COMPLEX4__ : p = MPI_COMPLEX4 elif t is __COMPLEX8__ : p = MPI_COMPLEX8 elif t is __COMPLEX16__ : p = MPI_COMPLEX16 elif t is __COMPLEX32__ : p = MPI_COMPLEX32 self.ob_mpi = p # Datatype Resizing # ----------------- def Create_resized(self, Aint lb: int, Aint extent: int) -> Datatype: """ Create a datatype with a new lower bound and extent """ cdef Datatype datatype = Datatype.__new__(Datatype) CHKERR( MPI_Type_create_resized(self.ob_mpi, lb, extent, &datatype.ob_mpi) ) return datatype Resized = Create_resized #: compatibility alias def Get_true_extent(self) -> Tuple[int, int]: """ Return the true lower bound and extent of a datatype """ cdef MPI_Count lb = 0, extent = 0 CHKERR( MPI_Type_get_true_extent_x(self.ob_mpi, &lb, &extent) ) return (lb, extent) property true_extent: """true extent""" def __get__(self) -> int: cdef MPI_Count lb = 0, extent = 0 CHKERR( MPI_Type_get_true_extent_x(self.ob_mpi, &lb, &extent) ) return extent property true_lb: """true lower bound""" def __get__(self) -> int: cdef MPI_Count lb = 0, extent = 0 CHKERR( MPI_Type_get_true_extent_x(self.ob_mpi, &lb, &extent) ) return lb property true_ub: """true upper bound""" def __get__(self) -> int: cdef MPI_Count lb = 0, extent = 0 CHKERR( MPI_Type_get_true_extent_x(self.ob_mpi, &lb, &extent) ) return lb + extent # Decoding a Datatype # ------------------- def Get_envelope(self) -> Tuple[int, int, int, int]: """ Return information on the number and type of input arguments used in the call that created a datatype """ cdef int ni = 0, na = 0, nd = 0, combiner = MPI_UNDEFINED CHKERR( MPI_Type_get_envelope(self.ob_mpi, &ni, &na, &nd, &combiner) ) return (ni, na, nd, combiner) property envelope: """datatype envelope""" def __get__(self) -> Tuple[int, int, int, int]: return self.Get_envelope() def Get_contents(self) -> Tuple[List[int], List[int], List[Datatype]]: """ Retrieve the actual arguments used in the call that created a datatype """ cdef int ni = 0, na = 0, nd = 0, combiner = MPI_UNDEFINED CHKERR( MPI_Type_get_envelope(self.ob_mpi, &ni, &na, &nd, &combiner) ) cdef int *i = NULL cdef MPI_Aint *a = NULL cdef MPI_Datatype *d = NULL cdef tmp1 = allocate(ni, sizeof(int), &i) cdef tmp2 = allocate(na, sizeof(MPI_Aint), &a) cdef tmp3 = allocate(nd, sizeof(MPI_Datatype), &d) CHKERR( MPI_Type_get_contents(self.ob_mpi, ni, na, nd, i, a, d) ) cdef int k = 0 cdef object integers = [i[k] for k from 0 <= k < ni] cdef object addresses = [a[k] for k from 0 <= k < na] cdef object datatypes = [ref_Datatype(d[k]) for k from 0 <= k < nd] return (integers, addresses, datatypes) property contents: """datatype contents""" def __get__(self) -> Tuple[List[int], List[int], List[Datatype]]: return self.Get_contents() def decode(self) -> Tuple[Datatype, str, Dict[str, Any]]: """ Convenience method for decoding a datatype """ # get the datatype envelope cdef int ni = 0, na = 0, nd = 0, combiner = MPI_UNDEFINED CHKERR( MPI_Type_get_envelope(self.ob_mpi, &ni, &na, &nd, &combiner) ) # return self immediately for named datatypes if combiner == MPI_COMBINER_NAMED: return self # get the datatype contents cdef int *i = NULL cdef MPI_Aint *a = NULL cdef MPI_Datatype *d = NULL cdef tmp1 = allocate(ni, sizeof(int), &i) cdef tmp2 = allocate(na, sizeof(MPI_Aint), &a) cdef tmp3 = allocate(nd, sizeof(MPI_Datatype), &d) CHKERR( MPI_Type_get_contents(self.ob_mpi, ni, na, nd, i, a, d) ) # manage in advance the contained datatypes cdef int k = 0, s1, e1, s2, e2, s3, e3, s4, e4 cdef object oldtype = None if combiner == MPI_COMBINER_STRUCT: oldtype = [ref_Datatype(d[k]) for k from 0 <= k < nd] elif (combiner != MPI_COMBINER_F90_INTEGER and combiner != MPI_COMBINER_F90_REAL and combiner != MPI_COMBINER_F90_COMPLEX): oldtype = ref_Datatype(d[0]) # dispatch depending on the combiner value if combiner == MPI_COMBINER_DUP: return (oldtype, ('DUP'), {}) elif combiner == MPI_COMBINER_CONTIGUOUS: return (oldtype, ('CONTIGUOUS'), {('count') : i[0]}) elif combiner == MPI_COMBINER_VECTOR: return (oldtype, ('VECTOR'), {('count') : i[0], ('blocklength') : i[1], ('stride') : i[2]}) elif combiner == MPI_COMBINER_HVECTOR: return (oldtype, ('HVECTOR'), {('count') : i[0], ('blocklength') : i[1], ('stride') : a[0]}) elif combiner == MPI_COMBINER_INDEXED: s1 = 1; e1 = i[0] s2 = i[0]+1; e2 = 2*i[0] return (oldtype, ('INDEXED'), {('blocklengths') : [i[k] for k from s1 <= k <= e1], ('displacements') : [i[k] for k from s2 <= k <= e2]}) elif combiner == MPI_COMBINER_HINDEXED: s1 = 1; e1 = i[0] s2 = 0; e2 = i[0]-1 return (oldtype, ('HINDEXED'), {('blocklengths') : [i[k] for k from s1 <= k <= e1], ('displacements') : [a[k] for k from s2 <= k <= e2]}) elif combiner == MPI_COMBINER_INDEXED_BLOCK: s2 = 2; e2 = i[0]+1 return (oldtype, ('INDEXED_BLOCK'), {('blocklength') : i[1], ('displacements') : [i[k] for k from s2 <= k <= e2]}) elif combiner == MPI_COMBINER_HINDEXED_BLOCK: s2 = 0; e2 = i[0]-1 return (oldtype, ('HINDEXED_BLOCK'), {('blocklength') : i[1], ('displacements') : [a[k] for k from s2 <= k <= e2]}) elif combiner == MPI_COMBINER_STRUCT: s1 = 1; e1 = i[0] s2 = 0; e2 = i[0]-1 return (DATATYPE_NULL, ('STRUCT'), {('blocklengths') : [i[k] for k from s1 <= k <= e1], ('displacements') : [a[k] for k from s2 <= k <= e2], ('datatypes') : oldtype}) elif combiner == MPI_COMBINER_SUBARRAY: s1 = 1; e1 = i[0] s2 = i[0]+1; e2 = 2*i[0] s3 = 2*i[0]+1; e3 = 3*i[0] return (oldtype, ('SUBARRAY'), {('sizes') : [i[k] for k from s1 <= k <= e1], ('subsizes') : [i[k] for k from s2 <= k <= e2], ('starts') : [i[k] for k from s3 <= k <= e3], ('order') : i[3*i[0]+1]}) elif combiner == MPI_COMBINER_DARRAY: s1 = 3; e1 = i[2]+2 s2 = i[2]+3; e2 = 2*i[2]+2 s3 = 2*i[2]+3; e3 = 3*i[2]+2 s4 = 3*i[2]+3; e4 = 4*i[2]+2 return (oldtype, ('DARRAY'), {('size') : i[0], ('rank') : i[1], ('gsizes') : [i[k] for k from s1 <= k <= e1], ('distribs') : [i[k] for k from s2 <= k <= e2], ('dargs') : [i[k] for k from s3 <= k <= e3], ('psizes') : [i[k] for k from s4 <= k <= e4], ('order') : i[4*i[2]+3]}) elif combiner == MPI_COMBINER_RESIZED: return (oldtype, ('RESIZED'), {('lb') : a[0], ('extent') : a[1]}) elif combiner == MPI_COMBINER_F90_INTEGER: return (DATATYPE_NULL, ('F90_INTEGER'), {('r') : i[0]}) elif combiner == MPI_COMBINER_F90_REAL: return (DATATYPE_NULL, ('F90_REAL'), {('p') : i[0], ('r') : i[1]}) elif combiner == MPI_COMBINER_F90_COMPLEX: return (DATATYPE_NULL, ('F90_COMPLEX'), {('p') : i[0], ('r') : i[1]}) else: return None property combiner: """datatype combiner""" def __get__(self) -> int: return self.Get_envelope()[3] property is_named: """is a named datatype""" def __get__(self) -> bool: cdef int combiner = self.Get_envelope()[3] return combiner == MPI_COMBINER_NAMED property is_predefined: """is a predefined datatype""" def __get__(self) -> bool: if self.ob_mpi == MPI_DATATYPE_NULL: return True cdef int combiner = self.Get_envelope()[3] return (combiner == MPI_COMBINER_NAMED or combiner == MPI_COMBINER_F90_INTEGER or combiner == MPI_COMBINER_F90_REAL or combiner == MPI_COMBINER_F90_COMPLEX) # Pack and Unpack # --------------- def Pack( self, inbuf: BufSpec, outbuf: BufSpec, int position: int, Comm comm: Comm, ) -> int: """ Pack into contiguous memory according to datatype. """ cdef MPI_Aint lb = 0, extent = 0 CHKERR( MPI_Type_get_extent(self.ob_mpi, &lb, &extent) ) # cdef void *ibptr = NULL, *obptr = NULL cdef MPI_Aint iblen = 0, oblen = 0 cdef tmp1 = getbuffer_r(inbuf, &ibptr, &iblen) cdef tmp2 = getbuffer_w(outbuf, &obptr, &oblen) cdef int icount = downcast(iblen//extent) cdef int osize = clipcount(oblen) # CHKERR( MPI_Pack(ibptr, icount, self.ob_mpi, obptr, osize, &position, comm.ob_mpi) ) return position def Unpack( self, inbuf: BufSpec, int position: int, outbuf: BufSpec, Comm comm: Comm, ) -> int: """ Unpack from contiguous memory according to datatype. """ cdef MPI_Aint lb = 0, extent = 0 CHKERR( MPI_Type_get_extent(self.ob_mpi, &lb, &extent) ) # cdef void *ibptr = NULL, *obptr = NULL cdef MPI_Aint iblen = 0, oblen = 0 cdef tmp1 = getbuffer_r(inbuf, &ibptr, &iblen) cdef tmp2 = getbuffer_w(outbuf, &obptr, &oblen) cdef int isize = clipcount(iblen) cdef int ocount = downcast(oblen//extent) # CHKERR( MPI_Unpack(ibptr, isize, &position, obptr, ocount, self.ob_mpi, comm.ob_mpi) ) return position def Pack_size( self, int count: int, Comm comm: Comm, ) -> int: """ Return the upper bound on the amount of space (in bytes) needed to pack a message according to datatype. """ cdef int size = 0 CHKERR( MPI_Pack_size(count, self.ob_mpi, comm.ob_mpi, &size) ) return size # Canonical Pack and Unpack # ------------------------- def Pack_external( self, datarep: str, inbuf: BufSpec, outbuf: BufSpec, Aint position: int, ) -> int: """ Pack into contiguous memory according to datatype, using a portable data representation (**external32**). """ cdef char *cdatarep = NULL datarep = asmpistr(datarep, &cdatarep) cdef MPI_Aint lb = 0, extent = 0 CHKERR( MPI_Type_get_extent(self.ob_mpi, &lb, &extent) ) # cdef void *ibptr = NULL, *obptr = NULL cdef MPI_Aint iblen = 0, oblen = 0 cdef tmp1 = getbuffer_r(inbuf, &ibptr, &iblen) cdef tmp2 = getbuffer_w(outbuf, &obptr, &oblen) cdef int icount = downcast(iblen//extent) # CHKERR( MPI_Pack_external(cdatarep, ibptr, icount, self.ob_mpi, obptr, oblen, &position) ) return position def Unpack_external( self, datarep: str, inbuf: BufSpec, Aint position: int, outbuf: BufSpec, ) -> int: """ Unpack from contiguous memory according to datatype, using a portable data representation (**external32**). """ cdef char *cdatarep = NULL datarep = asmpistr(datarep, &cdatarep) cdef MPI_Aint lb = 0, extent = 0 CHKERR( MPI_Type_get_extent(self.ob_mpi, &lb, &extent) ) # cdef void *ibptr = NULL, *obptr = NULL cdef MPI_Aint iblen = 0, oblen = 0 cdef tmp1 = getbuffer_r(inbuf, &ibptr, &iblen) cdef tmp2 = getbuffer_w(outbuf, &obptr, &oblen) cdef int ocount = downcast(oblen//extent) # CHKERR( MPI_Unpack_external(cdatarep, ibptr, iblen, &position, obptr, ocount, self.ob_mpi) ) return position def Pack_external_size( self, datarep: str, int count: int, ) -> int: """ Return the upper bound on the amount of space (in bytes) needed to pack a message according to datatype, using a portable data representation (**external32**). """ cdef char *cdatarep = NULL cdef MPI_Aint size = 0 datarep = asmpistr(datarep, &cdatarep) CHKERR( MPI_Pack_external_size(cdatarep, count, self.ob_mpi, &size) ) return size # Attributes # ---------- def Get_attr(self, int keyval: int) -> Optional[Union[int, Any]]: """ Retrieve attribute value by key """ cdef void *attrval = NULL cdef int flag = 0 CHKERR( MPI_Type_get_attr(self.ob_mpi, keyval, &attrval, &flag) ) if not flag: return None if attrval == NULL: return 0 # user-defined attribute keyval return PyMPI_attr_get(self.ob_mpi, keyval, attrval) def Set_attr(self, int keyval: int, attrval: Any) -> None: """ Store attribute value associated with a key """ PyMPI_attr_set(self.ob_mpi, keyval, attrval) def Delete_attr(self, int keyval: int) -> None: """ Delete attribute value associated with a key """ CHKERR( MPI_Type_delete_attr(self.ob_mpi, keyval) ) @classmethod def Create_keyval( cls, copy_fn: Optional[Callable[[Datatype, int, Any], Any]] = None, delete_fn: Optional[Callable[[Datatype, int, Any], None]] = None, nopython: bool = False, ) -> int: """ Create a new attribute key for datatypes """ cdef object state = _p_keyval(copy_fn, delete_fn, nopython) cdef int keyval = MPI_KEYVAL_INVALID cdef MPI_Type_copy_attr_function *_copy = PyMPI_attr_copy_fn cdef MPI_Type_delete_attr_function *_del = PyMPI_attr_delete_fn cdef void *extra_state = state CHKERR( MPI_Type_create_keyval(_copy, _del, &keyval, extra_state) ) type_keyval[keyval] = state return keyval @classmethod def Free_keyval(cls, int keyval: int) -> int: """ Free an attribute key for datatypes """ cdef int keyval_save = keyval CHKERR( MPI_Type_free_keyval(&keyval) ) try: del type_keyval[keyval_save] except KeyError: pass return keyval # Naming Objects # -------------- def Get_name(self) -> str: """ Get the print name for this datatype """ cdef char name[MPI_MAX_OBJECT_NAME+1] cdef int nlen = 0 CHKERR( MPI_Type_get_name(self.ob_mpi, name, &nlen) ) return tompistr(name, nlen) def Set_name(self, name: str) -> None: """ Set the print name for this datatype """ cdef char *cname = NULL name = asmpistr(name, &cname) CHKERR( MPI_Type_set_name(self.ob_mpi, cname) ) property name: """datatype name""" def __get__(self) -> str: return self.Get_name() def __set__(self, value: str): self.Set_name(value) # Fortran Handle # -------------- def py2f(self) -> int: """ """ return MPI_Type_c2f(self.ob_mpi) @classmethod def f2py(cls, arg: int) -> Datatype: """ """ cdef Datatype datatype = Datatype.__new__(Datatype) datatype.ob_mpi = MPI_Type_f2c(arg) return datatype # Address Functions # ----------------- def Get_address(location: Union[Buffer, Bottom]) -> int: """ Get the address of a location in memory """ cdef void *baseptr = MPI_BOTTOM if not is_BOTTOM(location): getbuffer_r(location, &baseptr, NULL) cdef MPI_Aint address = 0 CHKERR( MPI_Get_address(baseptr, &address) ) return address def Aint_add(Aint base: int, Aint disp: int) -> int: """ Return the sum of base address and displacement """ return MPI_Aint_add(base, disp) def Aint_diff(Aint addr1: int, Aint addr2: int) -> int: """ Return the difference between absolute addresses """ return MPI_Aint_diff(addr1, addr2) cdef Datatype __DATATYPE_NULL__ = new_Datatype( MPI_DATATYPE_NULL ) cdef Datatype __UB__ = new_Datatype( MPI_UB ) cdef Datatype __LB__ = new_Datatype( MPI_LB ) cdef Datatype __PACKED__ = new_Datatype( MPI_PACKED ) cdef Datatype __BYTE__ = new_Datatype( MPI_BYTE ) cdef Datatype __AINT__ = new_Datatype( MPI_AINT ) cdef Datatype __OFFSET__ = new_Datatype( MPI_OFFSET ) cdef Datatype __COUNT__ = new_Datatype( MPI_COUNT ) cdef Datatype __CHAR__ = new_Datatype( MPI_CHAR ) cdef Datatype __WCHAR__ = new_Datatype( MPI_WCHAR ) cdef Datatype __SIGNED_CHAR__ = new_Datatype( MPI_SIGNED_CHAR ) cdef Datatype __SHORT__ = new_Datatype( MPI_SHORT ) cdef Datatype __INT__ = new_Datatype( MPI_INT ) cdef Datatype __LONG__ = new_Datatype( MPI_LONG ) cdef Datatype __LONG_LONG__ = new_Datatype( MPI_LONG_LONG ) cdef Datatype __UNSIGNED_CHAR__ = new_Datatype( MPI_UNSIGNED_CHAR ) cdef Datatype __UNSIGNED_SHORT__ = new_Datatype( MPI_UNSIGNED_SHORT ) cdef Datatype __UNSIGNED__ = new_Datatype( MPI_UNSIGNED ) cdef Datatype __UNSIGNED_LONG__ = new_Datatype( MPI_UNSIGNED_LONG ) cdef Datatype __UNSIGNED_LONG_LONG__ = new_Datatype( MPI_UNSIGNED_LONG_LONG ) cdef Datatype __FLOAT__ = new_Datatype( MPI_FLOAT ) cdef Datatype __DOUBLE__ = new_Datatype( MPI_DOUBLE ) cdef Datatype __LONG_DOUBLE__ = new_Datatype( MPI_LONG_DOUBLE ) cdef Datatype __C_BOOL__ = new_Datatype( MPI_C_BOOL ) cdef Datatype __INT8_T__ = new_Datatype( MPI_INT8_T ) cdef Datatype __INT16_T__ = new_Datatype( MPI_INT16_T ) cdef Datatype __INT32_T__ = new_Datatype( MPI_INT32_T ) cdef Datatype __INT64_T__ = new_Datatype( MPI_INT64_T ) cdef Datatype __UINT8_T__ = new_Datatype( MPI_UINT8_T ) cdef Datatype __UINT16_T__ = new_Datatype( MPI_UINT16_T ) cdef Datatype __UINT32_T__ = new_Datatype( MPI_UINT32_T ) cdef Datatype __UINT64_T__ = new_Datatype( MPI_UINT64_T ) cdef Datatype __C_COMPLEX__ = new_Datatype( MPI_C_COMPLEX ) cdef Datatype __C_FLOAT_COMPLEX__ = new_Datatype( MPI_C_FLOAT_COMPLEX ) cdef Datatype __C_DOUBLE_COMPLEX__ = new_Datatype( MPI_C_DOUBLE_COMPLEX ) cdef Datatype __C_LONG_DOUBLE_COMPLEX__ = new_Datatype( MPI_C_LONG_DOUBLE_COMPLEX ) cdef Datatype __CXX_BOOL__ = new_Datatype( MPI_CXX_BOOL ) cdef Datatype __CXX_FLOAT_COMPLEX__ = new_Datatype( MPI_CXX_FLOAT_COMPLEX ) cdef Datatype __CXX_DOUBLE_COMPLEX__ = new_Datatype( MPI_CXX_DOUBLE_COMPLEX ) cdef Datatype __CXX_LONG_DOUBLE_COMPLEX__ = new_Datatype( MPI_CXX_LONG_DOUBLE_COMPLEX ) cdef Datatype __SHORT_INT__ = new_Datatype( MPI_SHORT_INT ) cdef Datatype __TWOINT__ = new_Datatype( MPI_2INT ) cdef Datatype __LONG_INT__ = new_Datatype( MPI_LONG_INT ) cdef Datatype __FLOAT_INT__ = new_Datatype( MPI_FLOAT_INT ) cdef Datatype __DOUBLE_INT__ = new_Datatype( MPI_DOUBLE_INT ) cdef Datatype __LONG_DOUBLE_INT__ = new_Datatype( MPI_LONG_DOUBLE_INT ) cdef Datatype __CHARACTER__ = new_Datatype( MPI_CHARACTER ) cdef Datatype __LOGICAL__ = new_Datatype( MPI_LOGICAL ) cdef Datatype __INTEGER__ = new_Datatype( MPI_INTEGER ) cdef Datatype __REAL__ = new_Datatype( MPI_REAL ) cdef Datatype __DOUBLE_PRECISION__ = new_Datatype( MPI_DOUBLE_PRECISION ) cdef Datatype __COMPLEX__ = new_Datatype( MPI_COMPLEX ) cdef Datatype __DOUBLE_COMPLEX__ = new_Datatype( MPI_DOUBLE_COMPLEX ) cdef Datatype __LOGICAL1__ = new_Datatype( MPI_LOGICAL1 ) cdef Datatype __LOGICAL2__ = new_Datatype( MPI_LOGICAL2 ) cdef Datatype __LOGICAL4__ = new_Datatype( MPI_LOGICAL4 ) cdef Datatype __LOGICAL8__ = new_Datatype( MPI_LOGICAL8 ) cdef Datatype __INTEGER1__ = new_Datatype( MPI_INTEGER1 ) cdef Datatype __INTEGER2__ = new_Datatype( MPI_INTEGER2 ) cdef Datatype __INTEGER4__ = new_Datatype( MPI_INTEGER4 ) cdef Datatype __INTEGER8__ = new_Datatype( MPI_INTEGER8 ) cdef Datatype __INTEGER16__ = new_Datatype( MPI_INTEGER16 ) cdef Datatype __REAL2__ = new_Datatype( MPI_REAL2 ) cdef Datatype __REAL4__ = new_Datatype( MPI_REAL4 ) cdef Datatype __REAL8__ = new_Datatype( MPI_REAL8 ) cdef Datatype __REAL16__ = new_Datatype( MPI_REAL16 ) cdef Datatype __COMPLEX4__ = new_Datatype( MPI_COMPLEX4 ) cdef Datatype __COMPLEX8__ = new_Datatype( MPI_COMPLEX8 ) cdef Datatype __COMPLEX16__ = new_Datatype( MPI_COMPLEX16 ) cdef Datatype __COMPLEX32__ = new_Datatype( MPI_COMPLEX32 ) include "typemap.pxi" include "typestr.pxi" # Predefined datatype handles # --------------------------- DATATYPE_NULL = __DATATYPE_NULL__ #: Null datatype handle # Deprecated datatypes (since MPI-2) UB = __UB__ #: upper-bound marker LB = __LB__ #: lower-bound marker # MPI-specific datatypes PACKED = __PACKED__ BYTE = __BYTE__ AINT = __AINT__ OFFSET = __OFFSET__ COUNT = __COUNT__ # Elementary C datatypes CHAR = __CHAR__ WCHAR = __WCHAR__ SIGNED_CHAR = __SIGNED_CHAR__ SHORT = __SHORT__ INT = __INT__ LONG = __LONG__ LONG_LONG = __LONG_LONG__ UNSIGNED_CHAR = __UNSIGNED_CHAR__ UNSIGNED_SHORT = __UNSIGNED_SHORT__ UNSIGNED = __UNSIGNED__ UNSIGNED_LONG = __UNSIGNED_LONG__ UNSIGNED_LONG_LONG = __UNSIGNED_LONG_LONG__ FLOAT = __FLOAT__ DOUBLE = __DOUBLE__ LONG_DOUBLE = __LONG_DOUBLE__ # C99 datatypes C_BOOL = __C_BOOL__ INT8_T = __INT8_T__ INT16_T = __INT16_T__ INT32_T = __INT32_T__ INT64_T = __INT64_T__ UINT8_T = __UINT8_T__ UINT16_T = __UINT16_T__ UINT32_T = __UINT32_T__ UINT64_T = __UINT64_T__ C_COMPLEX = __C_COMPLEX__ C_FLOAT_COMPLEX = __C_FLOAT_COMPLEX__ C_DOUBLE_COMPLEX = __C_DOUBLE_COMPLEX__ C_LONG_DOUBLE_COMPLEX = __C_LONG_DOUBLE_COMPLEX__ # C++ datatypes CXX_BOOL = __CXX_BOOL__ CXX_FLOAT_COMPLEX = __CXX_FLOAT_COMPLEX__ CXX_DOUBLE_COMPLEX = __CXX_DOUBLE_COMPLEX__ CXX_LONG_DOUBLE_COMPLEX = __CXX_LONG_DOUBLE_COMPLEX__ # C Datatypes for reduction operations SHORT_INT = __SHORT_INT__ INT_INT = TWOINT = __TWOINT__ LONG_INT = __LONG_INT__ FLOAT_INT = __FLOAT_INT__ DOUBLE_INT = __DOUBLE_INT__ LONG_DOUBLE_INT = __LONG_DOUBLE_INT__ # Elementary Fortran datatypes CHARACTER = __CHARACTER__ LOGICAL = __LOGICAL__ INTEGER = __INTEGER__ REAL = __REAL__ DOUBLE_PRECISION = __DOUBLE_PRECISION__ COMPLEX = __COMPLEX__ DOUBLE_COMPLEX = __DOUBLE_COMPLEX__ # Size-specific Fortran datatypes LOGICAL1 = __LOGICAL1__ LOGICAL2 = __LOGICAL2__ LOGICAL4 = __LOGICAL4__ LOGICAL8 = __LOGICAL8__ INTEGER1 = __INTEGER1__ INTEGER2 = __INTEGER2__ INTEGER4 = __INTEGER4__ INTEGER8 = __INTEGER8__ INTEGER16 = __INTEGER16__ REAL2 = __REAL2__ REAL4 = __REAL4__ REAL8 = __REAL8__ REAL16 = __REAL16__ COMPLEX4 = __COMPLEX4__ COMPLEX8 = __COMPLEX8__ COMPLEX16 = __COMPLEX16__ COMPLEX32 = __COMPLEX32__ # Convenience aliases UNSIGNED_INT = __UNSIGNED__ SIGNED_SHORT = __SHORT__ SIGNED_INT = __INT__ SIGNED_LONG = __LONG__ SIGNED_LONG_LONG = __LONG_LONG__ BOOL = __C_BOOL__ SINT8_T = __INT8_T__ SINT16_T = __INT16_T__ SINT32_T = __INT32_T__ SINT64_T = __INT64_T__ F_BOOL = __LOGICAL__ F_INT = __INTEGER__ F_FLOAT = __REAL__ F_DOUBLE = __DOUBLE_PRECISION__ F_COMPLEX = __COMPLEX__ F_FLOAT_COMPLEX = __COMPLEX__ F_DOUBLE_COMPLEX = __DOUBLE_COMPLEX__ mpi4py-3.1.6/src/mpi4py/MPI/Errhandler.pyx000066400000000000000000000040131460670727200202420ustar00rootroot00000000000000cdef class Errhandler: """ Error handler """ def __cinit__(self, Errhandler errhandler: Optional[Errhandler] = None): self.ob_mpi = MPI_ERRHANDLER_NULL if errhandler is None: return self.ob_mpi = errhandler.ob_mpi def __dealloc__(self): if not (self.flags & PyMPI_OWNED): return CHKERR( del_Errhandler(&self.ob_mpi) ) def __richcmp__(self, other, int op): if not isinstance(other, Errhandler): return NotImplemented cdef Errhandler s = self, o = other if op == Py_EQ: return (s.ob_mpi == o.ob_mpi) elif op == Py_NE: return (s.ob_mpi != o.ob_mpi) cdef mod = type(self).__module__ cdef cls = type(self).__name__ raise TypeError("unorderable type: '%s.%s'" % (mod, cls)) def __bool__(self) -> bool: return self.ob_mpi != MPI_ERRHANDLER_NULL def Free(self) -> None: """ Free an error handler """ CHKERR( MPI_Errhandler_free(&self.ob_mpi) ) if self is __ERRORS_RETURN__: self.ob_mpi = MPI_ERRORS_RETURN if self is __ERRORS_ARE_FATAL__: self.ob_mpi = MPI_ERRORS_ARE_FATAL # Fortran Handle # -------------- def py2f(self) -> int: """ """ return MPI_Errhandler_c2f(self.ob_mpi) @classmethod def f2py(cls, arg: int) -> Errhandler: """ """ cdef Errhandler errhandler = Errhandler.__new__(Errhandler) errhandler.ob_mpi = MPI_Errhandler_f2c(arg) return errhandler cdef Errhandler __ERRHANDLER_NULL__ = new_Errhandler(MPI_ERRHANDLER_NULL) cdef Errhandler __ERRORS_RETURN__ = new_Errhandler(MPI_ERRORS_RETURN) cdef Errhandler __ERRORS_ARE_FATAL__ = new_Errhandler(MPI_ERRORS_ARE_FATAL) # Predefined errhandler handles # ----------------------------- ERRHANDLER_NULL = __ERRHANDLER_NULL__ #: Null error handler ERRORS_RETURN = __ERRORS_RETURN__ #: Errors return error handler ERRORS_ARE_FATAL = __ERRORS_ARE_FATAL__ #: Errors are fatal error handler mpi4py-3.1.6/src/mpi4py/MPI/Exception.pyx000066400000000000000000000075771460670727200201340ustar00rootroot00000000000000include "ExceptionP.pyx" #include "ExceptionC.pyx" MPIException = Exception # Actually no errors SUCCESS = MPI_SUCCESS ERR_LASTCODE = MPI_ERR_LASTCODE # MPI-1 Error classes # ------------------- # MPI-1 Objects ERR_COMM = MPI_ERR_COMM ERR_GROUP = MPI_ERR_GROUP ERR_TYPE = MPI_ERR_TYPE ERR_REQUEST = MPI_ERR_REQUEST ERR_OP = MPI_ERR_OP # Communication argument parameters ERR_BUFFER = MPI_ERR_BUFFER ERR_COUNT = MPI_ERR_COUNT ERR_TAG = MPI_ERR_TAG ERR_RANK = MPI_ERR_RANK ERR_ROOT = MPI_ERR_ROOT ERR_TRUNCATE = MPI_ERR_TRUNCATE # Multiple completion ERR_IN_STATUS = MPI_ERR_IN_STATUS ERR_PENDING = MPI_ERR_PENDING # Topology argument parameters ERR_TOPOLOGY = MPI_ERR_TOPOLOGY ERR_DIMS = MPI_ERR_DIMS # Other arguments parameters ERR_ARG = MPI_ERR_ARG # Other errors ERR_OTHER = MPI_ERR_OTHER ERR_UNKNOWN = MPI_ERR_UNKNOWN ERR_INTERN = MPI_ERR_INTERN # MPI-2 Error classes # ------------------- # MPI-2 Objects ERR_INFO = MPI_ERR_INFO ERR_FILE = MPI_ERR_FILE ERR_WIN = MPI_ERR_WIN # Object attributes ERR_KEYVAL = MPI_ERR_KEYVAL # Info Object ERR_INFO_KEY = MPI_ERR_INFO_KEY ERR_INFO_VALUE = MPI_ERR_INFO_VALUE ERR_INFO_NOKEY = MPI_ERR_INFO_NOKEY # Input/Ouput ERR_ACCESS = MPI_ERR_ACCESS ERR_AMODE = MPI_ERR_AMODE ERR_BAD_FILE = MPI_ERR_BAD_FILE ERR_FILE_EXISTS = MPI_ERR_FILE_EXISTS ERR_FILE_IN_USE = MPI_ERR_FILE_IN_USE ERR_NO_SPACE = MPI_ERR_NO_SPACE ERR_NO_SUCH_FILE = MPI_ERR_NO_SUCH_FILE ERR_IO = MPI_ERR_IO ERR_READ_ONLY = MPI_ERR_READ_ONLY ERR_CONVERSION = MPI_ERR_CONVERSION ERR_DUP_DATAREP = MPI_ERR_DUP_DATAREP ERR_UNSUPPORTED_DATAREP = MPI_ERR_UNSUPPORTED_DATAREP ERR_UNSUPPORTED_OPERATION = MPI_ERR_UNSUPPORTED_OPERATION # Dynamic Process Management ERR_NAME = MPI_ERR_NAME ERR_NO_MEM = MPI_ERR_NO_MEM ERR_NOT_SAME = MPI_ERR_NOT_SAME ERR_PORT = MPI_ERR_PORT ERR_QUOTA = MPI_ERR_QUOTA ERR_SERVICE = MPI_ERR_SERVICE ERR_SPAWN = MPI_ERR_SPAWN # Windows ERR_BASE = MPI_ERR_BASE ERR_SIZE = MPI_ERR_SIZE ERR_DISP = MPI_ERR_DISP ERR_ASSERT = MPI_ERR_ASSERT ERR_LOCKTYPE = MPI_ERR_LOCKTYPE ERR_RMA_CONFLICT = MPI_ERR_RMA_CONFLICT ERR_RMA_SYNC = MPI_ERR_RMA_SYNC ERR_RMA_RANGE = MPI_ERR_RMA_RANGE ERR_RMA_ATTACH = MPI_ERR_RMA_ATTACH ERR_RMA_SHARED = MPI_ERR_RMA_SHARED ERR_RMA_FLAVOR = MPI_ERR_RMA_FLAVOR def Get_error_class(int errorcode: int) -> int: """ Convert an *error code* into an *error class* """ cdef int errorclass = MPI_SUCCESS CHKERR( MPI_Error_class(errorcode, &errorclass) ) return errorclass def Get_error_string(int errorcode: int) -> str: """ Return the *error string* for a given *error class* or *error code* """ cdef char string[MPI_MAX_ERROR_STRING+1] cdef int resultlen = 0 CHKERR( MPI_Error_string(errorcode, string, &resultlen) ) return tompistr(string, resultlen) def Add_error_class() -> int: """ Add an *error class* to the known error classes """ cdef int errorclass = MPI_SUCCESS CHKERR( MPI_Add_error_class(&errorclass) ) return errorclass def Add_error_code(int errorclass: int) -> int: """ Add an *error code* to an *error class* """ cdef int errorcode = MPI_SUCCESS CHKERR( MPI_Add_error_code(errorclass, &errorcode) ) return errorcode def Add_error_string(int errorcode: int, string: str) -> None: """ Associate an *error string* with an *error class* or *errorcode* """ cdef char *cstring = NULL string = asmpistr(string, &cstring) CHKERR( MPI_Add_error_string(errorcode, cstring) ) mpi4py-3.1.6/src/mpi4py/MPI/ExceptionC.pyx000066400000000000000000000042651460670727200202260ustar00rootroot00000000000000cdef extern from "Python.h": ctypedef class __builtin__.RuntimeError [object PyBaseExceptionObject]: pass cdef class Exception(RuntimeError): """ Exception class """ cdef int ob_mpi def __cinit__(self, int ierr: int = 0): if ierr < MPI_SUCCESS: ierr = MPI_ERR_UNKNOWN self.ob_mpi = ierr RuntimeError.__init__(self, ierr) def __richcmp__(Exception self, object error, int op): cdef int ierr = self.ob_mpi if op == Py_LT: return ierr < error if op == Py_LE: return ierr <= error if op == Py_EQ: return ierr == error if op == Py_NE: return ierr != error if op == Py_GT: return ierr > error if op == Py_GE: return ierr >= error def __hash__(self) -> int: return hash(self.ob_mpi) def __bool__(self) -> bool: return self.ob_mpi != MPI_SUCCESS def __int__(self) -> int: return self.ob_mpi def __repr__(self) -> str: return "MPI.Exception(%d)" % self.ob_mpi def __str__(self) -> str: if not mpi_active(): return "error code: %d" % self.ob_mpi return self.Get_error_string() def Get_error_code(self) -> int: """ Error code """ cdef int errorcode = MPI_SUCCESS errorcode = self.ob_mpi return errorcode property error_code: """error code""" def __get__(self) -> int: return self.Get_error_code() def Get_error_class(self) -> int: """ Error class """ cdef int errorclass = MPI_SUCCESS CHKERR( MPI_Error_class(self.ob_mpi, &errorclass) ) return errorclass property error_class: """error class""" def __get__(self) -> int: return self.Get_error_class() def Get_error_string(self) -> str: """ Error string """ cdef char string[MPI_MAX_ERROR_STRING+1] cdef int resultlen = 0 CHKERR( MPI_Error_string(self.ob_mpi, string, &resultlen) ) return tompistr(string, resultlen) property error_string: """error string""" def __get__(self) -> str: return self.Get_error_string() mpi4py-3.1.6/src/mpi4py/MPI/ExceptionP.pyx000066400000000000000000000044011460670727200202330ustar00rootroot00000000000000class Exception(RuntimeError): """ Exception class """ def __init__(self, int ierr: int = 0): if ierr < MPI_SUCCESS: ierr = MPI_ERR_UNKNOWN self.ob_mpi = ierr RuntimeError.__init__(self, self.ob_mpi) def __eq__(self, object error) -> bool: cdef int ierr = self.ob_mpi return (ierr == error) def __ne__(self, object error) -> bool: cdef int ierr = self.ob_mpi return (ierr != error) def __lt__(self, object error) -> bool: cdef int ierr = self.ob_mpi return (ierr < error) def __le__(self, object error) -> bool: cdef int ierr = self.ob_mpi return (ierr <= error) def __gt__(self, object error) -> bool: cdef int ierr = self.ob_mpi return (ierr > error) def __ge__(self, object error) -> bool: cdef int ierr = self.ob_mpi return (ierr >= error) def __hash__(self) -> int: return hash(self.ob_mpi) def __bool__(self) -> bool: cdef int ierr = self.ob_mpi return ierr != MPI_SUCCESS def __int__(self) -> int: return self.ob_mpi def __repr__(self) -> str: return "MPI.Exception(%d)" % self.ob_mpi def __str__(self) -> str: if not mpi_active(): return "error code: %d" % self.ob_mpi return self.Get_error_string() def Get_error_code(self) -> int: """ Error code """ cdef int errorcode = MPI_SUCCESS errorcode = self.ob_mpi return errorcode error_code = property(Get_error_code, doc="error code") def Get_error_class(self) -> int: """ Error class """ cdef int errorclass = MPI_SUCCESS CHKERR( MPI_Error_class(self.ob_mpi, &errorclass) ) return errorclass error_class = property(Get_error_class, doc="error class") def Get_error_string(self) -> str: """ Error string """ cdef char string[MPI_MAX_ERROR_STRING+1] cdef int resultlen = 0 CHKERR( MPI_Error_string(self.ob_mpi, string, &resultlen) ) return tompistr(string, resultlen) error_string = property(Get_error_string, doc="error string") if PY2: __nonzero__ = __bool__ mpi4py-3.1.6/src/mpi4py/MPI/File.pyx000066400000000000000000000623041460670727200170420ustar00rootroot00000000000000# Opening modes # ------------- MODE_RDONLY = MPI_MODE_RDONLY #: Read only MODE_WRONLY = MPI_MODE_WRONLY #: Write only MODE_RDWR = MPI_MODE_RDWR #: Reading and writing MODE_CREATE = MPI_MODE_CREATE #: Create the file if it does not exist MODE_EXCL = MPI_MODE_EXCL #: Error if creating file that already exists MODE_DELETE_ON_CLOSE = MPI_MODE_DELETE_ON_CLOSE #: Delete file on close MODE_UNIQUE_OPEN = MPI_MODE_UNIQUE_OPEN #: File will not be concurrently opened elsewhere MODE_SEQUENTIAL = MPI_MODE_SEQUENTIAL #: File will only be accessed sequentially MODE_APPEND = MPI_MODE_APPEND #: Set initial position of all file pointers to end of file # Positioning # ----------- SEEK_SET = MPI_SEEK_SET #: File pointer is set to offset SEEK_CUR = MPI_SEEK_CUR #: File pointer is set to the current position plus offset SEEK_END = MPI_SEEK_END #: File pointer is set to the end plus offset DISPLACEMENT_CURRENT = MPI_DISPLACEMENT_CURRENT #: Special displacement value for files opened in sequential mode DISP_CUR = MPI_DISPLACEMENT_CURRENT #: Convenience alias for `DISPLACEMENT_CURRENT` cdef class File: """ File handle """ def __cinit__(self, File file: Optional[File] = None): self.ob_mpi = MPI_FILE_NULL if file is None: return self.ob_mpi = file.ob_mpi def __dealloc__(self): if not (self.flags & PyMPI_OWNED): return CHKERR( del_File(&self.ob_mpi) ) def __richcmp__(self, other, int op): if not isinstance(other, File): return NotImplemented cdef File s = self, o = other if op == Py_EQ: return (s.ob_mpi == o.ob_mpi) elif op == Py_NE: return (s.ob_mpi != o.ob_mpi) cdef mod = type(self).__module__ cdef cls = type(self).__name__ raise TypeError("unorderable type: '%s.%s'" % (mod, cls)) def __bool__(self) -> bool: return self.ob_mpi != MPI_FILE_NULL # File Manipulation # ----------------- @classmethod def Open( cls, Intracomm comm: Intracomm, filename: str, int amode: int = MODE_RDONLY, Info info: Info = INFO_NULL, ) -> File: """ Open a file """ cdef char *cfilename = NULL filename = asmpistr(filename, &cfilename) cdef File file = File.__new__(File) with nogil: CHKERR( MPI_File_open( comm.ob_mpi, cfilename, amode, info.ob_mpi, &file.ob_mpi) ) file_set_eh(file.ob_mpi) return file def Close(self) -> None: """ Close a file """ with nogil: CHKERR( MPI_File_close(&self.ob_mpi) ) @classmethod def Delete(cls, filename: str, Info info: Info = INFO_NULL) -> None: """ Delete a file """ cdef char *cfilename = NULL filename = asmpistr(filename, &cfilename) with nogil: CHKERR( MPI_File_delete(cfilename, info.ob_mpi) ) def Set_size(self, Offset size: int) -> None: """ Sets the file size """ with nogil: CHKERR( MPI_File_set_size(self.ob_mpi, size) ) def Preallocate(self, Offset size: int) -> None: """ Preallocate storage space for a file """ with nogil: CHKERR( MPI_File_preallocate(self.ob_mpi, size) ) def Get_size(self) -> int: """ Return the file size """ cdef MPI_Offset size = 0 with nogil: CHKERR( MPI_File_get_size(self.ob_mpi, &size) ) return size property size: """file size""" def __get__(self) -> int: return self.Get_size() def Get_amode(self) -> int: """ Return the file access mode """ cdef int amode = 0 with nogil: CHKERR( MPI_File_get_amode(self.ob_mpi, &amode) ) return amode property amode: """file access mode""" def __get__(self) -> int: return self.Get_amode() # File Group # ---------- def Get_group(self) -> Group: """ Return the group of processes that opened the file """ cdef Group group = Group.__new__(Group) with nogil: CHKERR( MPI_File_get_group(self.ob_mpi, &group.ob_mpi) ) return group property group: """file group""" def __get__(self) -> Group: return self.Get_group() # File Info # --------- def Set_info(self, Info info: Info) -> None: """ Set new values for the hints associated with a file """ with nogil: CHKERR( MPI_File_set_info(self.ob_mpi, info.ob_mpi) ) def Get_info(self) -> Info: """ Return the hints for a file that that are currently in use """ cdef Info info = Info.__new__(Info) with nogil: CHKERR( MPI_File_get_info(self.ob_mpi, &info.ob_mpi) ) return info property info: """file info""" def __get__(self) -> Info: return self.Get_info() def __set__(self, value: Info): self.Set_info(value) # File Views # ---------- def Set_view( self, Offset disp: int = 0, Datatype etype: Datatype = BYTE, Datatype filetype: Optional[Datatype] = None, datarep: str = "native", Info info: Info = INFO_NULL, ) -> None: """ Set the file view """ cdef char *cdatarep = b"native" if datarep is not None: datarep = asmpistr(datarep, &cdatarep) cdef MPI_Datatype cetype = etype.ob_mpi cdef MPI_Datatype cftype = cetype if filetype is not None: cftype = filetype.ob_mpi with nogil: CHKERR( MPI_File_set_view( self.ob_mpi, disp, cetype, cftype, cdatarep, info.ob_mpi) ) def Get_view(self) -> Tuple[int, Datatype, Datatype, str]: """ Return the file view """ cdef MPI_Offset disp = 0 cdef Datatype etype = Datatype.__new__(Datatype) cdef Datatype ftype = Datatype.__new__(Datatype) cdef char cdatarep[MPI_MAX_DATAREP_STRING+1] cdatarep[0] = 0 # just in case with nogil: CHKERR( MPI_File_get_view( self.ob_mpi, &disp, &etype.ob_mpi, &ftype.ob_mpi, cdatarep) ) cdatarep[MPI_MAX_DATAREP_STRING] = 0 # just in case #if builtin_Datatype(etype.ob_mpi): etype.flags = 0 #if builtin_Datatype(ftype.ob_mpi): ftype.flags = 0 cdef object datarep = mpistr(cdatarep) return (disp, etype, ftype, datarep) # Data Access # ----------- # Data Access with Explicit Offsets # --------------------------------- def Read_at( self, Offset offset: int, buf: BufSpec, Status status: Optional[Status] = None, ) -> None: """ Read using explicit offset """ cdef _p_msg_io m = message_io_read(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_read_at( self.ob_mpi, offset, m.buf, m.count, m.dtype, statusp) ) def Read_at_all( self, Offset offset: int, buf: BufSpec, Status status: Optional[Status] = None, ) -> None: """ Collective read using explicit offset """ cdef _p_msg_io m = message_io_read(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_read_at_all( self.ob_mpi, offset, m.buf, m.count, m.dtype, statusp) ) def Write_at( self, Offset offset: int, buf: BufSpec, Status status:Optional[Status] = None, ) -> None: """ Write using explicit offset """ cdef _p_msg_io m = message_io_write(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_write_at( self.ob_mpi, offset, m.buf, m.count, m.dtype, statusp) ) def Write_at_all( self, Offset offset: int, buf: BufSpec, Status status: Optional[Status] = None, ) -> None: """ Collective write using explicit offset """ cdef _p_msg_io m = message_io_write(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_write_at_all( self.ob_mpi, offset, m.buf, m.count, m.dtype, statusp) ) def Iread_at( self, Offset offset: int, buf: BufSpec, ) -> Request: """ Nonblocking read using explicit offset """ cdef _p_msg_io m = message_io_read(buf) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_File_iread_at( self.ob_mpi, offset, m.buf, m.count, m.dtype, &request.ob_mpi) ) request.ob_buf = m return request def Iread_at_all( self, Offset offset: int, buf: BufSpec, ) -> Request: """ Nonblocking collective read using explicit offset """ cdef _p_msg_io m = message_io_read(buf) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_File_iread_at_all( self.ob_mpi, offset, m.buf, m.count, m.dtype, &request.ob_mpi) ) request.ob_buf = m return request def Iwrite_at( self, Offset offset: int, buf: BufSpec, ) -> Request: """ Nonblocking write using explicit offset """ cdef _p_msg_io m = message_io_write(buf) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_File_iwrite_at( self.ob_mpi, offset, m.buf, m.count, m.dtype, &request.ob_mpi) ) request.ob_buf = m return request def Iwrite_at_all( self, Offset offset: int, buf: BufSpec, ) -> Request: """ Nonblocking collective write using explicit offset """ cdef _p_msg_io m = message_io_write(buf) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_File_iwrite_at_all( self.ob_mpi, offset, m.buf, m.count, m.dtype, &request.ob_mpi) ) request.ob_buf = m return request # Data Access with Individual File Pointers # ----------------------------------------- def Read( self, buf: BufSpec, Status status: Optional[Status] = None, ) -> None: """ Read using individual file pointer """ cdef _p_msg_io m = message_io_read(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_read( self.ob_mpi, m.buf, m.count, m.dtype, statusp) ) def Read_all( self, buf: BufSpec, Status status: Optional[Status] = None, ) -> None: """ Collective read using individual file pointer """ cdef _p_msg_io m = message_io_read(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_read_all( self.ob_mpi, m.buf, m.count, m.dtype, statusp) ) def Write( self, buf: BufSpec, Status status: Optional[Status] = None, ) -> None: """ Write using individual file pointer """ cdef _p_msg_io m = message_io_write(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_write( self.ob_mpi, m.buf, m.count, m.dtype, statusp) ) def Write_all( self, buf: BufSpec, Status status: Optional[Status] = None, ) -> None: """ Collective write using individual file pointer """ cdef _p_msg_io m = message_io_write(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_write_all( self.ob_mpi, m.buf, m.count, m.dtype, statusp) ) def Iread( self, buf: BufSpec, ) -> Request: """ Nonblocking read using individual file pointer """ cdef _p_msg_io m = message_io_read(buf) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_File_iread( self.ob_mpi, m.buf, m.count, m.dtype, &request.ob_mpi) ) request.ob_buf = m return request def Iread_all( self, buf: BufSpec, ) -> Request: """ Nonblocking collective read using individual file pointer """ cdef _p_msg_io m = message_io_read(buf) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_File_iread_all( self.ob_mpi, m.buf, m.count, m.dtype, &request.ob_mpi) ) request.ob_buf = m return request def Iwrite( self, buf: BufSpec, ) -> Request: """ Nonblocking write using individual file pointer """ cdef _p_msg_io m = message_io_write(buf) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_File_iwrite( self.ob_mpi, m.buf, m.count, m.dtype, &request.ob_mpi) ) request.ob_buf = m return request def Iwrite_all( self, buf: BufSpec, ) -> Request: """ Nonblocking collective write using individual file pointer """ cdef _p_msg_io m = message_io_write(buf) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_File_iwrite_all( self.ob_mpi, m.buf, m.count, m.dtype, &request.ob_mpi) ) request.ob_buf = m return request def Seek(self, Offset offset: int, int whence: int = SEEK_SET) -> None: """ Update the individual file pointer """ with nogil: CHKERR( MPI_File_seek(self.ob_mpi, offset, whence) ) def Get_position(self) -> int: """ Return the current position of the individual file pointer in etype units relative to the current view """ cdef MPI_Offset offset = 0 with nogil: CHKERR( MPI_File_get_position(self.ob_mpi, &offset) ) return offset def Get_byte_offset(self, Offset offset: int) -> int: """ Return the absolute byte position in the file corresponding to 'offset' etypes relative to the current view """ cdef MPI_Offset disp = 0 with nogil: CHKERR( MPI_File_get_byte_offset( self.ob_mpi, offset, &disp) ) return disp # Data Access with Shared File Pointers # ------------------------------------- def Read_shared( self, buf: BufSpec, Status status: Optional[Status] = None, ) -> None: """ Read using shared file pointer """ cdef _p_msg_io m = message_io_read(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_read_shared( self.ob_mpi, m.buf, m.count, m.dtype, statusp) ) def Write_shared( self, buf: BufSpec, Status status: Optional[Status] = None, ) -> None: """ Write using shared file pointer """ cdef _p_msg_io m = message_io_write(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_write_shared( self.ob_mpi, m.buf, m.count, m.dtype, statusp) ) def Iread_shared( self, buf: BufSpec, ) -> Request: """ Nonblocking read using shared file pointer """ cdef _p_msg_io m = message_io_read(buf) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_File_iread_shared( self.ob_mpi, m.buf, m.count, m.dtype, &request.ob_mpi) ) request.ob_buf = m return request def Iwrite_shared( self, buf: BufSpec, ) -> Request: """ Nonblocking write using shared file pointer """ cdef _p_msg_io m = message_io_write(buf) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_File_iwrite_shared( self.ob_mpi, m.buf, m.count, m.dtype, &request.ob_mpi) ) request.ob_buf = m return request def Read_ordered( self, buf: BufSpec, Status status: Optional[Status] = None, ) -> None: """ Collective read using shared file pointer """ cdef _p_msg_io m = message_io_read(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_read_ordered( self.ob_mpi, m.buf, m.count, m.dtype, statusp) ) def Write_ordered( self, buf: BufSpec, Status status: Optional[Status] = None, ) -> None: """ Collective write using shared file pointer """ cdef _p_msg_io m = message_io_write(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_write_ordered( self.ob_mpi, m.buf, m.count, m.dtype, statusp) ) def Seek_shared( self, Offset offset: int, int whence: int = SEEK_SET, ) -> None: """ Update the shared file pointer """ with nogil: CHKERR( MPI_File_seek_shared( self.ob_mpi, offset, whence) ) def Get_position_shared(self) -> int: """ Return the current position of the shared file pointer in etype units relative to the current view """ cdef MPI_Offset offset = 0 with nogil: CHKERR( MPI_File_get_position_shared( self.ob_mpi, &offset) ) return offset # Split Collective Data Access Routines # ------------------------------------- # explicit offset def Read_at_all_begin( self, Offset offset: int, buf: BufSpec, ) -> None: """ Start a split collective read using explict offset """ cdef _p_msg_io m = message_io_read(buf) with nogil: CHKERR( MPI_File_read_at_all_begin( self.ob_mpi, offset, m.buf, m.count, m.dtype) ) def Read_at_all_end( self, buf: BufSpec, Status status: Optional[Status] = None, ) -> None: """ Complete a split collective read using explict offset """ cdef _p_msg_io m = message_io_read(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_read_at_all_end( self.ob_mpi, m.buf, statusp) ) def Write_at_all_begin( self, Offset offset: int, buf: BufSpec, ) -> None: """ Start a split collective write using explict offset """ cdef _p_msg_io m = message_io_write(buf) with nogil: CHKERR( MPI_File_write_at_all_begin( self.ob_mpi, offset, m.buf, m.count, m.dtype) ) def Write_at_all_end( self, buf: BufSpec, Status status: Optional[Status] = None, ) -> None: """ Complete a split collective write using explict offset """ cdef _p_msg_io m = message_io_write(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_write_at_all_end( self.ob_mpi, m.buf, statusp) ) # individual file pointer def Read_all_begin( self, buf: BufSpec, ) -> None: """ Start a split collective read using individual file pointer """ cdef _p_msg_io m = message_io_read(buf) with nogil: CHKERR( MPI_File_read_all_begin( self.ob_mpi, m.buf, m.count, m.dtype) ) def Read_all_end( self, buf: BufSpec, Status status: Optional[Status] = None, ) -> None: """ Complete a split collective read using individual file pointer """ cdef _p_msg_io m = message_io_read(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_read_all_end( self.ob_mpi, m.buf, statusp) ) def Write_all_begin( self, buf: BufSpec, ) -> None: """ Start a split collective write using individual file pointer """ cdef _p_msg_io m = message_io_write(buf) with nogil: CHKERR( MPI_File_write_all_begin( self.ob_mpi, m.buf, m.count, m.dtype) ) def Write_all_end( self, buf: BufSpec, Status status: Optional[Status] = None, ) -> None: """ Complete a split collective write using individual file pointer """ cdef _p_msg_io m = message_io_write(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_write_all_end( self.ob_mpi, m.buf, statusp) ) # shared file pointer def Read_ordered_begin( self, buf: BufSpec, ) -> None: """ Start a split collective read using shared file pointer """ cdef _p_msg_io m = message_io_read(buf) with nogil: CHKERR( MPI_File_read_ordered_begin( self.ob_mpi, m.buf, m.count, m.dtype) ) def Read_ordered_end( self, buf: BufSpec, Status status: Optional[Status] = None, ) -> None: """ Complete a split collective read using shared file pointer """ cdef _p_msg_io m = message_io_read(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_read_ordered_end( self.ob_mpi, m.buf, statusp) ) def Write_ordered_begin( self, buf: BufSpec, ) -> None: """ Start a split collective write using shared file pointer """ cdef _p_msg_io m = message_io_write(buf) with nogil: CHKERR( MPI_File_write_ordered_begin( self.ob_mpi, m.buf, m.count, m.dtype) ) def Write_ordered_end( self, buf: BufSpec, Status status: Optional[Status] = None, ) -> None: """ Complete a split collective write using shared file pointer """ cdef _p_msg_io m = message_io_write(buf) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_File_write_ordered_end( self.ob_mpi, m.buf, statusp) ) # File Interoperability # --------------------- def Get_type_extent(self, Datatype datatype: Datatype) -> int: """ Return the extent of datatype in the file """ cdef MPI_Aint extent = 0 with nogil: CHKERR( MPI_File_get_type_extent( self.ob_mpi, datatype.ob_mpi, &extent) ) return extent # Consistency and Semantics # ------------------------- def Set_atomicity(self, bint flag: bool) -> None: """ Set the atomicity mode """ with nogil: CHKERR( MPI_File_set_atomicity(self.ob_mpi, flag) ) def Get_atomicity(self) -> bool: """ Return the atomicity mode """ cdef int flag = 0 with nogil: CHKERR( MPI_File_get_atomicity(self.ob_mpi, &flag) ) return flag property atomicity: """atomicity""" def __get__(self) -> bool: return self.Get_atomicity() def __set__(self, value: bool): self.Set_atomicity(value) def Sync(self) -> None: """ Causes all previous writes to be transferred to the storage device """ with nogil: CHKERR( MPI_File_sync(self.ob_mpi) ) # Error Handling # -------------- def Get_errhandler(self) -> Errhandler: """ Get the error handler for a file """ cdef Errhandler errhandler = Errhandler.__new__(Errhandler) CHKERR( MPI_File_get_errhandler(self.ob_mpi, &errhandler.ob_mpi) ) return errhandler def Set_errhandler(self, Errhandler errhandler: Errhandler) -> None: """ Set the error handler for a file """ CHKERR( MPI_File_set_errhandler(self.ob_mpi, errhandler.ob_mpi) ) def Call_errhandler(self, int errorcode: int) -> None: """ Call the error handler installed on a file """ CHKERR( MPI_File_call_errhandler(self.ob_mpi, errorcode) ) # Fortran Handle # -------------- def py2f(self) -> int: """ """ return MPI_File_c2f(self.ob_mpi) @classmethod def f2py(cls, arg: int) -> File: """ """ cdef File file = File.__new__(File) file.ob_mpi = MPI_File_f2c(arg) return file cdef File __FILE_NULL__ = new_File(MPI_FILE_NULL) # Predefined file handles # ----------------------- FILE_NULL = __FILE_NULL__ #: Null file handle # User-defined data representations # --------------------------------- def Register_datarep( datarep: str, read_fn: Callable[[Buffer, Datatype, int, Buffer, int], None], write_fn: Callable[[Buffer, Datatype, int, Buffer, int], None], extent_fn: Callable[[Datatype], int], ) -> None: """ Register user-defined data representations """ cdef char *cdatarep = NULL datarep = asmpistr(datarep, &cdatarep) cdef object state = _p_datarep(read_fn, write_fn, extent_fn) cdef MPI_Datarep_conversion_function *rd = MPI_CONVERSION_FN_NULL cdef MPI_Datarep_conversion_function *wr = MPI_CONVERSION_FN_NULL cdef MPI_Datarep_extent_function *ex = datarep_extent_fn cdef void* xs = state if read_fn is not None: rd = datarep_read_fn if write_fn is not None: wr = datarep_write_fn CHKERR ( MPI_Register_datarep(cdatarep, rd, wr, ex, xs) ) datarep_registry[datarep] = state mpi4py-3.1.6/src/mpi4py/MPI/Group.pyx000066400000000000000000000157471460670727200172700ustar00rootroot00000000000000cdef class Group: """ Group of processes """ def __cinit__(self, Group group: Optional[Group] = None): self.ob_mpi = MPI_GROUP_NULL if group is None: return self.ob_mpi = group.ob_mpi def __dealloc__(self): if not (self.flags & PyMPI_OWNED): return CHKERR( del_Group(&self.ob_mpi) ) def __richcmp__(self, other, int op): if not isinstance(other, Group): return NotImplemented cdef Group s = self, o = other if op == Py_EQ: return (s.ob_mpi == o.ob_mpi) elif op == Py_NE: return (s.ob_mpi != o.ob_mpi) cdef mod = type(self).__module__ cdef cls = type(self).__name__ raise TypeError("unorderable type: '%s.%s'" % (mod, cls)) def __bool__(self) -> bool: return self.ob_mpi != MPI_GROUP_NULL # Group Accessors # --------------- def Get_size(self) -> int: """ Return the size of a group """ cdef int size = -1 CHKERR( MPI_Group_size(self.ob_mpi, &size) ) return size property size: """number of processes in group""" def __get__(self) -> int: return self.Get_size() def Get_rank(self) -> int: """ Return the rank of this process in a group """ cdef int rank = -1 CHKERR( MPI_Group_rank(self.ob_mpi, &rank) ) return rank property rank: """rank of this process in group""" def __get__(self) -> int: return self.Get_rank() @classmethod def Translate_ranks( cls, Group group1: Group, ranks1: Sequence[int], Group group2: Optional[Group] = None, ) -> List[int]: """ Translate the ranks of processes in one group to those in another group """ cdef MPI_Group grp1 = MPI_GROUP_NULL cdef MPI_Group grp2 = MPI_GROUP_NULL cdef int i = 0, n = 0, *iranks1 = NULL, *iranks2 = NULL cdef tmp1 = getarray(ranks1, &n, &iranks1) cdef tmp2 = newarray(n, &iranks2) # grp1 = group1.ob_mpi if group2 is not None: grp2 = group2.ob_mpi else: CHKERR( MPI_Comm_group(MPI_COMM_WORLD, &grp2) ) try: CHKERR( MPI_Group_translate_ranks(grp1, n, iranks1, grp2, iranks2) ) finally: if group2 is None: CHKERR( MPI_Group_free(&grp2) ) # cdef object ranks2 = [iranks2[i] for i from 0 <= i < n] return ranks2 @classmethod def Compare(cls, Group group1: Group, Group group2: Group) -> int: """ Compare two groups """ cdef int flag = MPI_UNEQUAL CHKERR( MPI_Group_compare(group1.ob_mpi, group2.ob_mpi, &flag) ) return flag # Group Constructors # ------------------ def Dup(self) -> Group: """ Duplicate a group """ cdef Group group = Group.__new__(Group) CHKERR( MPI_Group_union(self.ob_mpi, MPI_GROUP_EMPTY, &group.ob_mpi) ) return group @classmethod def Union(cls, Group group1: Group, Group group2: Group) -> Group: """ Produce a group by combining two existing groups """ cdef Group group = Group.__new__(Group) CHKERR( MPI_Group_union( group1.ob_mpi, group2.ob_mpi, &group.ob_mpi) ) return group @classmethod def Intersection(cls, Group group1: Group, Group group2: Group) -> Group: """ Produce a group as the intersection of two existing groups """ cdef Group group = Group.__new__(Group) CHKERR( MPI_Group_intersection( group1.ob_mpi, group2.ob_mpi, &group.ob_mpi) ) return group Intersect = Intersection @classmethod def Difference(cls, Group group1: Group, Group group2: Group) -> Group: """ Produce a group from the difference of two existing groups """ cdef Group group = Group.__new__(Group) CHKERR( MPI_Group_difference( group1.ob_mpi, group2.ob_mpi, &group.ob_mpi) ) return group def Incl(self, ranks: Sequence[int]) -> Group: """ Produce a group by reordering an existing group and taking only listed members """ cdef int n = 0, *iranks = NULL ranks = getarray(ranks, &n, &iranks) cdef Group group = Group.__new__(Group) CHKERR( MPI_Group_incl(self.ob_mpi, n, iranks, &group.ob_mpi) ) return group def Excl(self, ranks: Sequence[int]) -> Group: """ Produce a group by reordering an existing group and taking only unlisted members """ cdef int n = 0, *iranks = NULL ranks = getarray(ranks, &n, &iranks) cdef Group group = Group.__new__(Group) CHKERR( MPI_Group_excl(self.ob_mpi, n, iranks, &group.ob_mpi) ) return group def Range_incl(self, ranks: Sequence[Tuple[int, int, int]]) -> Group: """ Create a new group from ranges of of ranks in an existing group """ cdef int *p = NULL, (*ranges)[3]# = NULL ## XXX cython fails ranges = NULL cdef int i = 0, n = len(ranks) cdef tmp1 = allocate(n, sizeof(int[3]), &ranges) for i from 0 <= i < n: p = ranges[i] p[0], p[1], p[2] = ranks[i] cdef Group group = Group.__new__(Group) CHKERR( MPI_Group_range_incl(self.ob_mpi, n, ranges, &group.ob_mpi) ) return group def Range_excl(self, ranks: Sequence[Tuple[int, int, int]]) -> Group: """ Create a new group by excluding ranges of processes from an existing group """ cdef int *p = NULL, (*ranges)[3]# = NULL ## XXX cython fails ranges = NULL cdef int i = 0, n = len(ranks) cdef tmp1 = allocate(n, sizeof(int[3]), &ranges) for i from 0 <= i < n: p = ranges[i] p[0], p[1], p[2] = ranks[i] cdef Group group = Group.__new__(Group) CHKERR( MPI_Group_range_excl(self.ob_mpi, n, ranges, &group.ob_mpi) ) return group # Group Destructor # ---------------- def Free(self) -> None: """ Free a group """ CHKERR( MPI_Group_free(&self.ob_mpi) ) if self is __GROUP_EMPTY__: self.ob_mpi = MPI_GROUP_EMPTY # Fortran Handle # -------------- def py2f(self) -> int: """ """ return MPI_Group_c2f(self.ob_mpi) @classmethod def f2py(cls, arg: int) -> Group: """ """ cdef Group group = Group.__new__(Group) group.ob_mpi = MPI_Group_f2c(arg) return group cdef Group __GROUP_NULL__ = new_Group ( MPI_GROUP_NULL ) cdef Group __GROUP_EMPTY__ = new_Group ( MPI_GROUP_EMPTY ) # Predefined group handles # ------------------------ GROUP_NULL = __GROUP_NULL__ #: Null group handle GROUP_EMPTY = __GROUP_EMPTY__ #: Empty group handle mpi4py-3.1.6/src/mpi4py/MPI/Info.pyx000066400000000000000000000174171460670727200170630ustar00rootroot00000000000000cdef class Info: """ Info object """ def __cinit__(self, Info info: Optional[Info] =None): self.ob_mpi = MPI_INFO_NULL if info is None: return self.ob_mpi = info.ob_mpi def __dealloc__(self): if not (self.flags & PyMPI_OWNED): return CHKERR( del_Info(&self.ob_mpi) ) def __richcmp__(self, other, int op): if not isinstance(other, Info): return NotImplemented cdef Info s = self, o = other if op == Py_EQ: return (s.ob_mpi == o.ob_mpi) elif op == Py_NE: return (s.ob_mpi != o.ob_mpi) cdef mod = type(self).__module__ cdef cls = type(self).__name__ raise TypeError("unorderable type: '%s.%s'" % (mod, cls)) def __bool__(self) -> bool: return self.ob_mpi != MPI_INFO_NULL @classmethod def Create(cls) -> Info: """ Create a new, empty info object """ cdef Info info = Info.__new__(Info) CHKERR( MPI_Info_create(&info.ob_mpi) ) return info def Free(self) -> None: """ Free a info object """ CHKERR( MPI_Info_free(&self.ob_mpi) ) if self is __INFO_ENV__: self.ob_mpi = MPI_INFO_ENV def Dup(self) -> Info: """ Duplicate an existing info object, creating a new object, with the same (key, value) pairs and the same ordering of keys """ cdef Info info = Info.__new__(Info) CHKERR( MPI_Info_dup(self.ob_mpi, &info.ob_mpi) ) return info def Get(self, key: str, int maxlen: int = -1) -> Optional[str]: """ Retrieve the value associated with a key """ if maxlen < 0: maxlen = MPI_MAX_INFO_VAL if maxlen > MPI_MAX_INFO_VAL: maxlen = MPI_MAX_INFO_VAL cdef char *ckey = NULL cdef char *cvalue = NULL cdef int flag = 0 key = asmpistr(key, &ckey) cdef tmp = allocate((maxlen+1), sizeof(char), &cvalue) cvalue[0] = 0 # just in case CHKERR( MPI_Info_get(self.ob_mpi, ckey, maxlen, cvalue, &flag) ) cvalue[maxlen] = 0 # just in case if not flag: return None return mpistr(cvalue) def Set(self, key: str, value: str) -> None: """ Add the (key, value) pair to info, and overrides the value if a value for the same key was previously set """ cdef char *ckey = NULL cdef char *cvalue = NULL key = asmpistr(key, &ckey) value = asmpistr(value, &cvalue) CHKERR( MPI_Info_set(self.ob_mpi, ckey, cvalue) ) def Delete(self, key: str) -> None: """ Remove a (key, value) pair from info """ cdef char *ckey = NULL key = asmpistr(key, &ckey) CHKERR( MPI_Info_delete(self.ob_mpi, ckey) ) def Get_nkeys(self) -> int: """ Return the number of currently defined keys in info """ cdef int nkeys = 0 CHKERR( MPI_Info_get_nkeys(self.ob_mpi, &nkeys) ) return nkeys def Get_nthkey(self, int n: int) -> str: """ Return the nth defined key in info. Keys are numbered in the range [0, N) where N is the value returned by `Info.Get_nkeys()` """ cdef char ckey[MPI_MAX_INFO_KEY+1] ckey[0] = 0 # just in case CHKERR( MPI_Info_get_nthkey(self.ob_mpi, n, ckey) ) ckey[MPI_MAX_INFO_KEY] = 0 # just in case return mpistr(ckey) # Fortran Handle # -------------- def py2f(self) -> int: """ """ return MPI_Info_c2f(self.ob_mpi) @classmethod def f2py(cls, arg: int) -> Info: """ """ cdef Info info = Info.__new__(Info) info.ob_mpi = MPI_Info_f2c(arg) return info # Python mapping emulation # ------------------------ def __len__(self) -> int: if not self: return 0 return self.Get_nkeys() def __contains__(self, key: str) -> bool: if not self: return False cdef char *ckey = NULL cdef int dummy = 0 cdef int haskey = 0 key = asmpistr(key, &ckey) CHKERR( MPI_Info_get_valuelen(self.ob_mpi, ckey, &dummy, &haskey) ) return haskey def __iter__(self) -> Iterator[str]: return iter(self.keys()) def __getitem__(self, key: str) -> str: if not self: raise KeyError(key) cdef object value = self.Get(key) if value is None: raise KeyError(key) return value def __setitem__(self, key: str, value: str) -> None: if not self: raise KeyError(key) self.Set(key, value) def __delitem__(self, key: str) -> None: if not self: raise KeyError(key) if key not in self: raise KeyError(key) self.Delete(key) def get(self, key: str, default: Optional[str] = None) -> Optional[str]: """info get""" if not self: return default cdef object value = self.Get(key) if value is None: return default return value def keys(self) -> List[str]: """info keys""" if not self: return [] cdef list keys = [] cdef int k = 0, nkeys = self.Get_nkeys() cdef object key for k from 0 <= k < nkeys: key = self.Get_nthkey(k) keys.append(key) return keys def values(self) -> List[str]: """info values""" if not self: return [] cdef list values = [] cdef int k = 0, nkeys = self.Get_nkeys() cdef object key, val for k from 0 <= k < nkeys: key = self.Get_nthkey(k) val = self.Get(key) values.append(val) return values def items(self) -> List[Tuple[str, str]]: """info items""" if not self: return [] cdef list items = [] cdef int k = 0, nkeys = self.Get_nkeys() cdef object key, value for k from 0 <= k < nkeys: key = self.Get_nthkey(k) value = self.Get(key) items.append((key, value)) return items def update( self, other: Union[Info, Mapping[str, str], Iterable[Tuple[str, str]]] = (), **kwds: str, ) -> None: """info update""" if not self: raise KeyError cdef object key, value if hasattr(other, 'keys'): for key in other.keys(): self.Set(key, other[key]) else: for key, value in other: self.Set(key, value) for key, value in kwds.items(): self.Set(key, value) def pop(self, key: str, *default: str) -> str: """info pop""" cdef object value = None if self: value = self.Get(key) if value is not None: self.Delete(key) return value if default: value, = default return value raise KeyError(key) def popitem(self) -> Tuple[str, str]: """info popitem""" if not self: raise KeyError cdef object key, value cdef int nkeys = self.Get_nkeys() if nkeys == 0: raise KeyError key = self.Get_nthkey(nkeys - 1) value = self.Get(key) self.Delete(key) return (key, value) def copy(self) -> Info: """info copy""" if not self: return Info() return self.Dup() def clear(self) -> None: """info clear""" if not self: return None cdef object key cdef int k = 0, nkeys = self.Get_nkeys() while k < nkeys: key = self.Get_nthkey(0) self.Delete(key) k += 1 cdef Info __INFO_NULL__ = new_Info(MPI_INFO_NULL) cdef Info __INFO_ENV__ = new_Info(MPI_INFO_ENV) # Predefined info handles # ----------------------- INFO_NULL = __INFO_NULL__ #: Null info handle INFO_ENV = __INFO_ENV__ #: Environment info handle mpi4py-3.1.6/src/mpi4py/MPI/MPI.pyx000066400000000000000000000231451460670727200166100ustar00rootroot00000000000000__doc__ = """ Message Passing Interface. """ from mpi4py.libmpi cimport * include "stdlib.pxi" include "typing.pxi" include "atimport.pxi" bootstrap() initialize() include "asstring.pxi" include "asbuffer.pxi" include "asmemory.pxi" include "asarray.pxi" include "helpers.pxi" include "attrimpl.pxi" include "mpierrhdl.pxi" include "msgbuffer.pxi" include "msgpickle.pxi" include "CAPI.pxi" # Assorted constants # ------------------ UNDEFINED = MPI_UNDEFINED #: Undefined integer value ANY_SOURCE = MPI_ANY_SOURCE #: Wildcard source value for receives ANY_TAG = MPI_ANY_TAG #: Wildcard tag value for receives PROC_NULL = MPI_PROC_NULL #: Special process rank for send/receive ROOT = MPI_ROOT #: Root process for collective inter-communications BOTTOM = __BOTTOM__ #: Special address for buffers IN_PLACE = __IN_PLACE__ #: *In-place* option for collective communications # Predefined Attribute Keyvals # ---------------------------- KEYVAL_INVALID = MPI_KEYVAL_INVALID TAG_UB = MPI_TAG_UB HOST = MPI_HOST IO = MPI_IO WTIME_IS_GLOBAL = MPI_WTIME_IS_GLOBAL UNIVERSE_SIZE = MPI_UNIVERSE_SIZE APPNUM = MPI_APPNUM LASTUSEDCODE = MPI_LASTUSEDCODE WIN_BASE = MPI_WIN_BASE WIN_SIZE = MPI_WIN_SIZE WIN_DISP_UNIT = MPI_WIN_DISP_UNIT WIN_CREATE_FLAVOR = MPI_WIN_CREATE_FLAVOR WIN_FLAVOR = MPI_WIN_CREATE_FLAVOR WIN_MODEL = MPI_WIN_MODEL include "Exception.pyx" include "Datatype.pyx" include "Status.pyx" include "Request.pyx" include "Message.pyx" include "Op.pyx" include "Group.pyx" include "Info.pyx" include "Errhandler.pyx" include "Comm.pyx" include "Win.pyx" include "File.pyx" # Memory Allocation # ----------------- def Alloc_mem(Aint size: int, Info info: Info = INFO_NULL) -> memory: """ Allocate memory for message passing and RMA """ cdef void *base = NULL CHKERR( MPI_Alloc_mem(size, info.ob_mpi, &base) ) return tomemory(base, size) def Free_mem(mem: memory) -> None: """ Free memory allocated with `Alloc_mem()` """ cdef void *base = NULL cdef memory m = asmemory(mem, &base, NULL) CHKERR( MPI_Free_mem(base) ) m.release() # Initialization and Exit # ----------------------- def Init() -> None: """ Initialize the MPI execution environment """ CHKERR( MPI_Init(NULL, NULL) ) initialize() def Finalize() -> None: """ Terminate the MPI execution environment """ finalize() CHKERR( MPI_Finalize() ) # Levels of MPI threading support # ------------------------------- THREAD_SINGLE = MPI_THREAD_SINGLE #: Only one thread will execute THREAD_FUNNELED = MPI_THREAD_FUNNELED #: MPI calls are *funneled* to the main thread THREAD_SERIALIZED = MPI_THREAD_SERIALIZED #: MPI calls are *serialized* THREAD_MULTIPLE = MPI_THREAD_MULTIPLE #: Multiple threads may call MPI def Init_thread(int required: int = THREAD_MULTIPLE) -> int: """ Initialize the MPI execution environment """ cdef int provided = MPI_THREAD_SINGLE CHKERR( MPI_Init_thread(NULL, NULL, required, &provided) ) initialize() return provided def Query_thread() -> int: """ Return the level of thread support provided by the MPI library """ cdef int provided = MPI_THREAD_SINGLE CHKERR( MPI_Query_thread(&provided) ) return provided def Is_thread_main() -> bool: """ Indicate whether this thread called `Init` or `Init_thread` """ cdef int flag = 1 CHKERR( MPI_Is_thread_main(&flag) ) return flag def Is_initialized() -> bool: """ Indicates whether `Init` has been called """ cdef int flag = 0 CHKERR( MPI_Initialized(&flag) ) return flag def Is_finalized() -> bool: """ Indicates whether `Finalize` has completed """ cdef int flag = 0 CHKERR( MPI_Finalized(&flag) ) return flag # Implementation Information # -------------------------- # MPI Version Number # ----------------- VERSION = MPI_VERSION SUBVERSION = MPI_SUBVERSION def Get_version() -> Tuple[int, int]: """ Obtain the version number of the MPI standard supported by the implementation as a tuple ``(version, subversion)`` """ cdef int version = 1 cdef int subversion = 0 CHKERR( MPI_Get_version(&version, &subversion) ) return (version, subversion) def Get_library_version() -> str: """ Obtain the version string of the MPI library """ cdef char name[MPI_MAX_LIBRARY_VERSION_STRING+1] cdef int nlen = 0 CHKERR( MPI_Get_library_version(name, &nlen) ) return tompistr(name, nlen) # Environmental Inquires # ---------------------- def Get_processor_name() -> str: """ Obtain the name of the calling processor """ cdef char name[MPI_MAX_PROCESSOR_NAME+1] cdef int nlen = 0 CHKERR( MPI_Get_processor_name(name, &nlen) ) return tompistr(name, nlen) # Timers and Synchronization # -------------------------- def Wtime() -> float: """ Return an elapsed time on the calling processor """ return MPI_Wtime() def Wtick() -> float: """ Return the resolution of `Wtime` """ return MPI_Wtick() # Control of Profiling # -------------------- def Pcontrol(int level: int) -> None: """ Control profiling """ if level < 0 or level > 2: CHKERR( MPI_ERR_ARG ) CHKERR( MPI_Pcontrol(level) ) # Maximum string sizes # -------------------- # MPI-1 MAX_PROCESSOR_NAME = MPI_MAX_PROCESSOR_NAME MAX_ERROR_STRING = MPI_MAX_ERROR_STRING # MPI-2 MAX_PORT_NAME = MPI_MAX_PORT_NAME MAX_INFO_KEY = MPI_MAX_INFO_KEY MAX_INFO_VAL = MPI_MAX_INFO_VAL MAX_OBJECT_NAME = MPI_MAX_OBJECT_NAME MAX_DATAREP_STRING = MPI_MAX_DATAREP_STRING # MPI-3 MAX_LIBRARY_VERSION_STRING = MPI_MAX_LIBRARY_VERSION_STRING # -------------------------------------------------------------------- cdef extern from *: int PyMPI_Get_vendor(const char**,int*,int*,int*) nogil def get_vendor() -> Tuple[str, Tuple[int, int, int]]: """ Infomation about the underlying MPI implementation Returns: - a string with the name of the MPI implementation - an integer 3-tuple version ``(major, minor, micro)`` """ cdef const char *name=NULL cdef int major=0, minor=0, micro=0 CHKERR( PyMPI_Get_vendor(&name, &major, &minor, µ) ) return (mpistr(name), (major, minor, micro)) # -------------------------------------------------------------------- cdef extern from "Python.h": ctypedef ssize_t Py_intptr_t ctypedef size_t Py_uintptr_t cdef inline int _mpi_type(object arg, type cls) except -1: if isinstance(arg, type): if issubclass(arg, cls): return 1 else: if isinstance(arg, cls): return 1 return 0 def _sizeof(arg: Any) -> int: """ Size in bytes of the underlying MPI handle """ if _mpi_type(arg, Status): return sizeof(MPI_Status) if _mpi_type(arg, Datatype): return sizeof(MPI_Datatype) if _mpi_type(arg, Request): return sizeof(MPI_Request) if _mpi_type(arg, Message): return sizeof(MPI_Message) if _mpi_type(arg, Op): return sizeof(MPI_Op) if _mpi_type(arg, Group): return sizeof(MPI_Group) if _mpi_type(arg, Info): return sizeof(MPI_Info) if _mpi_type(arg, Errhandler): return sizeof(MPI_Errhandler) if _mpi_type(arg, Comm): return sizeof(MPI_Comm) if _mpi_type(arg, Win): return sizeof(MPI_Win) if _mpi_type(arg, File): return sizeof(MPI_File) raise TypeError("expecting an MPI type or instance") def _addressof(arg: Any) -> int: """ Memory address of the underlying MPI handle """ cdef void *ptr = NULL if isinstance(arg, Status): ptr = &(arg).ob_mpi elif isinstance(arg, Datatype): ptr = &(arg).ob_mpi elif isinstance(arg, Request): ptr = &(arg).ob_mpi elif isinstance(arg, Message): ptr = &(arg).ob_mpi elif isinstance(arg, Op): ptr = &(arg).ob_mpi elif isinstance(arg, Group): ptr = &(arg).ob_mpi elif isinstance(arg, Info): ptr = &(arg).ob_mpi elif isinstance(arg, Errhandler): ptr = &(arg).ob_mpi elif isinstance(arg, Comm): ptr = &(arg).ob_mpi elif isinstance(arg, Win): ptr = &(arg).ob_mpi elif isinstance(arg, File): ptr = &(arg).ob_mpi else: raise TypeError("expecting an MPI instance") return PyLong_FromVoidPtr(ptr) def _handleof(arg: Any) -> int: """ Unsigned integer value with the underlying MPI handle """ if isinstance(arg, Status): raise NotImplementedError elif isinstance(arg, Datatype): return ((arg).ob_mpi) elif isinstance(arg, Request): return ((arg).ob_mpi) elif isinstance(arg, Message): return ((arg).ob_mpi) elif isinstance(arg, Op): return ((arg).ob_mpi) elif isinstance(arg, Group): return ((arg).ob_mpi) elif isinstance(arg, Info): return ((arg).ob_mpi) elif isinstance(arg, Errhandler): return ((arg).ob_mpi) elif isinstance(arg, Comm): return ((arg).ob_mpi) elif isinstance(arg, Win): return ((arg).ob_mpi) elif isinstance(arg, File): return ((arg).ob_mpi) else: raise TypeError("expecting an MPI instance") # -------------------------------------------------------------------- mpi4py-3.1.6/src/mpi4py/MPI/Message.pyx000066400000000000000000000143551460670727200175520ustar00rootroot00000000000000cdef class Message: """ Matched message handle """ def __cinit__(self, Message message: Optional[Message] = None): self.ob_mpi = MPI_MESSAGE_NULL if message is None: return self.ob_mpi = message.ob_mpi self.ob_buf = message.ob_buf def __dealloc__(self): if not (self.flags & PyMPI_OWNED): return CHKERR( del_Message(&self.ob_mpi) ) def __richcmp__(self, other, int op): if not isinstance(other, Message): return NotImplemented cdef Message s = self, o = other if op == Py_EQ: return (s.ob_mpi == o.ob_mpi) elif op == Py_NE: return (s.ob_mpi != o.ob_mpi) cdef mod = type(self).__module__ cdef cls = type(self).__name__ raise TypeError("unorderable type: '%s.%s'" % (mod, cls)) def __bool__(self) -> bool: return self.ob_mpi != MPI_MESSAGE_NULL # Matching Probe # -------------- @classmethod def Probe( cls, Comm comm: Comm, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Optional[Status] = None, ) -> Message: """ Blocking test for a matched message """ cdef MPI_Message cmessage = MPI_MESSAGE_NULL cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Mprobe( source, tag, comm.ob_mpi, &cmessage, statusp) ) cdef Message message = Message.__new__(cls) message.ob_mpi = cmessage return message @classmethod def Iprobe( cls, Comm comm: Comm, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Optional[Status] = None, ) -> Optional[Message]: """ Nonblocking test for a matched message """ cdef int flag = 0 cdef MPI_Message cmessage = MPI_MESSAGE_NULL cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Improbe( source, tag, comm.ob_mpi, &flag, &cmessage, statusp) ) if flag == 0: return None cdef Message message = Message.__new__(cls) message.ob_mpi = cmessage return message # Matched receives # ---------------- def Recv( self, buf: BufSpec, Status status: Optional[Status] = None, ) -> None: """ Blocking receive of matched message """ cdef MPI_Message message = self.ob_mpi cdef int source = MPI_ANY_SOURCE if message == MPI_MESSAGE_NO_PROC: source = MPI_PROC_NULL cdef _p_msg_p2p rmsg = message_p2p_recv(buf, source) cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Mrecv( rmsg.buf, rmsg.count, rmsg.dtype, &message, statusp) ) if self is not __MESSAGE_NO_PROC__: self.ob_mpi = message def Irecv(self, buf: BufSpec) -> Request: """ Nonblocking receive of matched message """ cdef MPI_Message message = self.ob_mpi cdef int source = MPI_ANY_SOURCE if message == MPI_MESSAGE_NO_PROC: source = MPI_PROC_NULL cdef _p_msg_p2p rmsg = message_p2p_recv(buf, source) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Imrecv( rmsg.buf, rmsg.count, rmsg.dtype, &message, &request.ob_mpi) ) if self is not __MESSAGE_NO_PROC__: self.ob_mpi = message request.ob_buf = rmsg return request # Python Communication # -------------------- # @classmethod def probe( cls, Comm comm: Comm, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Optional[Status] = None, ) -> Message: """Blocking test for a matched message""" cdef Message message = Message.__new__(cls) cdef MPI_Status *statusp = arg_Status(status) message.ob_buf = PyMPI_mprobe(source, tag, comm.ob_mpi, &message.ob_mpi, statusp) return message # @classmethod def iprobe( cls, Comm comm: Comm, int source: int = ANY_SOURCE, int tag: int = ANY_TAG, Status status: Optional[Status] = None, ) -> Optional[Message]: """Nonblocking test for a matched message""" cdef int flag = 0 cdef Message message = Message.__new__(cls) cdef MPI_Status *statusp = arg_Status(status) message.ob_buf = PyMPI_improbe(source, tag, comm.ob_mpi, &flag, &message.ob_mpi, statusp) if flag == 0: return None return message # def recv(self, Status status: Optional[Status] = None) -> Any: """Blocking receive of matched message""" cdef object rmsg = self.ob_buf cdef MPI_Message message = self.ob_mpi cdef MPI_Status *statusp = arg_Status(status) rmsg = PyMPI_mrecv(rmsg, &message, statusp) if self is not __MESSAGE_NO_PROC__: self.ob_mpi = message if self.ob_mpi == MPI_MESSAGE_NULL: self.ob_buf = None return rmsg # def irecv(self) -> Request: """Nonblocking receive of matched message""" cdef object rmsg = self.ob_buf cdef MPI_Message message = self.ob_mpi cdef Request request = Request.__new__(Request) request.ob_buf = PyMPI_imrecv(rmsg, &message, &request.ob_mpi) if self is not __MESSAGE_NO_PROC__: self.ob_mpi = message if self.ob_mpi == MPI_MESSAGE_NULL: self.ob_buf = None return request # Fortran Handle # -------------- def py2f(self) -> int: """ """ return MPI_Message_c2f(self.ob_mpi) @classmethod def f2py(cls, arg: int) -> Message: """ """ cdef Message message = Message.__new__(Message) message.ob_mpi = MPI_Message_f2c(arg) return message cdef Message __MESSAGE_NULL__ = new_Message ( MPI_MESSAGE_NULL ) cdef Message __MESSAGE_NO_PROC__ = new_Message ( MPI_MESSAGE_NO_PROC ) # Predefined message handles # -------------------------- MESSAGE_NULL = __MESSAGE_NULL__ #: Null message handle MESSAGE_NO_PROC = __MESSAGE_NO_PROC__ #: No-proc message handle mpi4py-3.1.6/src/mpi4py/MPI/Op.pyx000066400000000000000000000135711460670727200165430ustar00rootroot00000000000000cdef class Op: """ Operation object """ def __cinit__(self, Op op: Optional[Op] = None): self.ob_mpi = MPI_OP_NULL if op is None: return self.ob_mpi = op.ob_mpi self.ob_func = op.ob_func self.ob_usrid = op.ob_usrid def __dealloc__(self): if not (self.flags & PyMPI_OWNED): return CHKERR( del_Op(&self.ob_mpi) ) op_user_del(&self.ob_usrid) def __richcmp__(self, other, int op): if not isinstance(other, Op): return NotImplemented cdef Op s = self, o = other if op == Py_EQ: return (s.ob_mpi == o.ob_mpi) elif op == Py_NE: return (s.ob_mpi != o.ob_mpi) cdef mod = type(self).__module__ cdef cls = type(self).__name__ raise TypeError("unorderable type: '%s.%s'" % (mod, cls)) def __bool__(self) -> bool: return self.ob_mpi != MPI_OP_NULL def __call__(self, x: Any, y: Any) -> Any: if self.ob_func != NULL: return self.ob_func(x, y) else: return op_user_py(self.ob_usrid, x, y, None) @classmethod def Create( cls, function: Callable[[Buffer, Buffer, Datatype], None], bint commute: bool = False, ) -> Op: """ Create a user-defined operation """ cdef Op op = Op.__new__(Op) cdef MPI_User_function *cfunction = NULL op.ob_usrid = op_user_new(function, &cfunction) CHKERR( MPI_Op_create(cfunction, commute, &op.ob_mpi) ) return op def Free(self) -> None: """ Free the operation """ CHKERR( MPI_Op_free(&self.ob_mpi) ) op_user_del(&self.ob_usrid) if self is __MAX__ : self.ob_mpi = MPI_MAX elif self is __MIN__ : self.ob_mpi = MPI_MIN elif self is __SUM__ : self.ob_mpi = MPI_SUM elif self is __PROD__ : self.ob_mpi = MPI_PROD elif self is __LAND__ : self.ob_mpi = MPI_LAND elif self is __BAND__ : self.ob_mpi = MPI_BAND elif self is __LOR__ : self.ob_mpi = MPI_LOR elif self is __BOR__ : self.ob_mpi = MPI_BOR elif self is __LXOR__ : self.ob_mpi = MPI_LXOR elif self is __BXOR__ : self.ob_mpi = MPI_BXOR elif self is __MAXLOC__ : self.ob_mpi = MPI_MAXLOC elif self is __MINLOC__ : self.ob_mpi = MPI_MINLOC elif self is __REPLACE__ : self.ob_mpi = MPI_REPLACE elif self is __NO_OP__ : self.ob_mpi = MPI_NO_OP # Process-local reduction # ----------------------- def Is_commutative(self) -> bool: """ Query reduction operations for their commutativity """ cdef int flag = 0 CHKERR( MPI_Op_commutative(self.ob_mpi, &flag) ) return flag property is_commutative: """is commutative""" def __get__(self) -> bool: return self.Is_commutative() def Reduce_local(self, inbuf: BufSpec, inoutbuf: BufSpec) -> None: """ Apply a reduction operator to local data """ # get *in* and *inout* buffers cdef _p_msg_cco m = message_cco() m.for_cro_send(inbuf, 0) m.for_cro_recv(inoutbuf, 0) # check counts and datatypes if m.scount != m.rcount: raise ValueError( "mismatch in inbuf count %d and inoutbuf count %d" % (m.scount, m.rcount)) if (m.stype != m.rtype): raise ValueError( "mismatch in inbuf and inoutbuf MPI datatypes") # do local reduction with nogil: CHKERR( MPI_Reduce_local( m.sbuf, m.rbuf, m.rcount, m.rtype, self.ob_mpi) ) property is_predefined: """is a predefined operation""" def __get__(self) -> bool: cdef MPI_Op op = self.ob_mpi return (op == MPI_OP_NULL or op == MPI_MAX or op == MPI_MIN or op == MPI_SUM or op == MPI_PROD or op == MPI_LAND or op == MPI_BAND or op == MPI_LOR or op == MPI_BOR or op == MPI_LXOR or op == MPI_BXOR or op == MPI_MAXLOC or op == MPI_MINLOC or op == MPI_REPLACE or op == MPI_NO_OP) # Fortran Handle # -------------- def py2f(self) -> int: """ """ return MPI_Op_c2f(self.ob_mpi) @classmethod def f2py(cls, arg: int) -> Op: """ """ cdef Op op = Op.__new__(Op) op.ob_mpi = MPI_Op_f2c(arg) return op cdef Op __OP_NULL__ = new_Op( MPI_OP_NULL ) cdef Op __MAX__ = new_Op( MPI_MAX ) cdef Op __MIN__ = new_Op( MPI_MIN ) cdef Op __SUM__ = new_Op( MPI_SUM ) cdef Op __PROD__ = new_Op( MPI_PROD ) cdef Op __LAND__ = new_Op( MPI_LAND ) cdef Op __BAND__ = new_Op( MPI_BAND ) cdef Op __LOR__ = new_Op( MPI_LOR ) cdef Op __BOR__ = new_Op( MPI_BOR ) cdef Op __LXOR__ = new_Op( MPI_LXOR ) cdef Op __BXOR__ = new_Op( MPI_BXOR ) cdef Op __MAXLOC__ = new_Op( MPI_MAXLOC ) cdef Op __MINLOC__ = new_Op( MPI_MINLOC ) cdef Op __REPLACE__ = new_Op( MPI_REPLACE ) cdef Op __NO_OP__ = new_Op( MPI_NO_OP ) # Predefined operation handles # ---------------------------- OP_NULL = __OP_NULL__ #: Null MAX = __MAX__ #: Maximum MIN = __MIN__ #: Minimum SUM = __SUM__ #: Sum PROD = __PROD__ #: Product LAND = __LAND__ #: Logical and BAND = __BAND__ #: Bit-wise and LOR = __LOR__ #: Logical or BOR = __BOR__ #: Bit-wise or LXOR = __LXOR__ #: Logical xor BXOR = __BXOR__ #: Bit-wise xor MAXLOC = __MAXLOC__ #: Maximum and location MINLOC = __MINLOC__ #: Minimum and location REPLACE = __REPLACE__ #: Replace (for RMA) NO_OP = __NO_OP__ #: No-op (for RMA) mpi4py-3.1.6/src/mpi4py/MPI/Request.pyx000066400000000000000000000332751460670727200176200ustar00rootroot00000000000000cdef class Request: """ Request handle """ def __cinit__(self, Request request: Optional[Request] = None): self.ob_mpi = MPI_REQUEST_NULL if request is None: return self.ob_mpi = request.ob_mpi self.ob_buf = request.ob_buf def __dealloc__(self): if not (self.flags & PyMPI_OWNED): return CHKERR( del_Request(&self.ob_mpi) ) def __richcmp__(self, other, int op): if not isinstance(other, Request): return NotImplemented cdef Request s = self, o = other if op == Py_EQ: return (s.ob_mpi == o.ob_mpi) elif op == Py_NE: return (s.ob_mpi != o.ob_mpi) cdef mod = type(self).__module__ cdef cls = type(self).__name__ raise TypeError("unorderable type: '%s.%s'" % (mod, cls)) def __bool__(self) -> bool: return self.ob_mpi != MPI_REQUEST_NULL # Completion Operations # --------------------- def Wait(self, Status status: Optional[Status] = None) -> Literal[True]: """ Wait for a send or receive to complete """ cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Wait( &self.ob_mpi, statusp) ) if self.ob_mpi == MPI_REQUEST_NULL: self.ob_buf = None return True def Test(self, Status status: Optional[Status] = None) -> bool: """ Test for the completion of a send or receive """ cdef int flag = 0 cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Test( &self.ob_mpi, &flag, statusp) ) if self.ob_mpi == MPI_REQUEST_NULL: self.ob_buf = None return flag def Free(self) -> None: """ Free a communication request """ with nogil: CHKERR( MPI_Request_free(&self.ob_mpi) ) def Get_status(self, Status status: Optional[Status] = None) -> bool: """ Non-destructive test for the completion of a request """ cdef int flag = 0 cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Request_get_status( self.ob_mpi, &flag, statusp) ) return flag # Multiple Completions # -------------------- @classmethod def Waitany( cls, requests: Sequence[Request], Status status: Optional[Status] = None, ) -> int: """ Wait for any previously initiated request to complete """ cdef int count = 0 cdef MPI_Request *irequests = NULL cdef int index = MPI_UNDEFINED cdef MPI_Status *statusp = arg_Status(status) # cdef tmp = acquire_rs(requests, None, &count, &irequests, NULL) try: with nogil: CHKERR( MPI_Waitany( count, irequests, &index, statusp) ) finally: release_rs(requests, None, count, irequests, 0, NULL) return index @classmethod def Testany( cls, requests: Sequence[Request], Status status: Optional[Status] = None, ) -> Tuple[int, bool]: """ Test for completion of any previously initiated request """ cdef int count = 0 cdef MPI_Request *irequests = NULL cdef int index = MPI_UNDEFINED cdef int flag = 0 cdef MPI_Status *statusp = arg_Status(status) # cdef tmp = acquire_rs(requests, None, &count, &irequests, NULL) try: with nogil: CHKERR( MPI_Testany( count, irequests, &index, &flag, statusp) ) finally: release_rs(requests, None, count, irequests, 0, NULL) # return (index, flag) @classmethod def Waitall( cls, requests: Sequence[Request], statuses: Optional[List[Status]] = None, ) -> Literal[True]: """ Wait for all previously initiated requests to complete """ cdef int count = 0 cdef MPI_Request *irequests = NULL cdef MPI_Status *istatuses = MPI_STATUSES_IGNORE # cdef tmp = acquire_rs(requests, statuses, &count, &irequests, &istatuses) try: with nogil: CHKERR( MPI_Waitall( count, irequests, istatuses) ) finally: release_rs(requests, statuses, count, irequests, count, istatuses) return True @classmethod def Testall( cls, requests: Sequence[Request], statuses: Optional[List[Status]] = None, ) -> bool: """ Test for completion of all previously initiated requests """ cdef int count = 0 cdef MPI_Request *irequests = NULL cdef int flag = 0 cdef MPI_Status *istatuses = MPI_STATUSES_IGNORE # cdef tmp = acquire_rs(requests, statuses, &count, &irequests, &istatuses) try: with nogil: CHKERR( MPI_Testall( count, irequests, &flag, istatuses) ) finally: release_rs(requests, statuses,count, irequests, count, istatuses) return flag @classmethod def Waitsome( cls, requests: Sequence[Request], statuses: Optional[List[Status]] = None, ) -> Optional[List[int]]: """ Wait for some previously initiated requests to complete """ cdef int incount = 0 cdef MPI_Request *irequests = NULL cdef int outcount = MPI_UNDEFINED, *iindices = NULL cdef MPI_Status *istatuses = MPI_STATUSES_IGNORE # cdef tmp1 = acquire_rs(requests, statuses, &incount, &irequests, &istatuses) cdef tmp2 = newarray(incount, &iindices) try: with nogil: CHKERR( MPI_Waitsome( incount, irequests, &outcount, iindices, istatuses) ) finally: release_rs(requests, statuses, incount, irequests, outcount, istatuses) # cdef int i = 0 cdef object indices = None if outcount != MPI_UNDEFINED: indices = [iindices[i] for i from 0 <= i < outcount] return indices @classmethod def Testsome( cls, requests: Sequence[Request], statuses: Optional[List[Status]] = None, ) -> Optional[List[int]]: """ Test for completion of some previously initiated requests """ cdef int incount = 0 cdef MPI_Request *irequests = NULL cdef int outcount = MPI_UNDEFINED, *iindices = NULL cdef MPI_Status *istatuses = MPI_STATUSES_IGNORE # cdef tmp1 = acquire_rs(requests, statuses, &incount, &irequests, &istatuses) cdef tmp2 = newarray(incount, &iindices) try: with nogil: CHKERR( MPI_Testsome( incount, irequests, &outcount, iindices, istatuses) ) finally: release_rs(requests, statuses, incount, irequests, outcount, istatuses) # cdef int i = 0 cdef object indices = None if outcount != MPI_UNDEFINED: indices = [iindices[i] for i from 0 <= i < outcount] return indices # Cancel # ------ def Cancel(self) -> None: """ Cancel a communication request """ with nogil: CHKERR( MPI_Cancel(&self.ob_mpi) ) # Fortran Handle # -------------- def py2f(self) -> int: """ """ return MPI_Request_c2f(self.ob_mpi) @classmethod def f2py(cls, arg: int) -> Request: """ """ cdef Request request = Request.__new__(Request) if issubclass(cls, Prequest): request = Prequest.__new__(Prequest) if issubclass(cls, Grequest): request = Grequest.__new__(Grequest) request.ob_mpi = MPI_Request_f2c(arg) return request # Python Communication # -------------------- # def wait( self, Status status: Optional[Status] = None, ) -> Any: """ Wait for a send or receive to complete """ cdef msg = PyMPI_wait(self, status) return msg # def test( self, Status status: Optional[Status] = None, ) -> Tuple[bool, Optional[Any]]: """ Test for the completion of a send or receive """ cdef int flag = 0 cdef msg = PyMPI_test(self, &flag, status) return (flag, msg) # def get_status( self, Status status: Optional[Status] = None, ) -> bool: """ Non-destructive test for the completion of a request """ cdef int flag = 0 cdef MPI_Status *statusp = arg_Status(status) with nogil: CHKERR( MPI_Request_get_status( self.ob_mpi, &flag, statusp) ) return flag # @classmethod def waitany( cls, requests: Sequence[Request], Status status: Optional[Status] = None ) -> Tuple[int, Any]: """ Wait for any previously initiated request to complete """ cdef int index = MPI_UNDEFINED cdef msg = PyMPI_waitany(requests, &index, status) return (index, msg) # @classmethod def testany( cls, requests: Sequence[Request], Status status: Optional[Status] = None, ) -> Tuple[int, bool, Optional[Any]]: """ Test for completion of any previously initiated request """ cdef int index = MPI_UNDEFINED cdef int flag = 0 cdef msg = PyMPI_testany(requests, &index, &flag, status) return (index, flag, msg) # @classmethod def waitall( cls, requests: Sequence[Request], statuses: Optional[List[Status]] = None, ) -> List[Any]: """ Wait for all previously initiated requests to complete """ cdef msg = PyMPI_waitall(requests, statuses) return msg # @classmethod def testall( cls, requests: Sequence[Request], statuses: Optional[List[Status]] = None ) -> Tuple[bool, Optional[List[Any]]]: """ Test for completion of all previously initiated requests """ cdef int flag = 0 cdef msg = PyMPI_testall(requests, &flag, statuses) return (flag, msg) # @classmethod def waitsome( cls, requests: Sequence[Request], statuses: Optional[List[Status]] = None, ) -> Tuple[Optional[List[int]], Optional[List[Any]]]: """ Wait for some previously initiated requests to complete """ return PyMPI_waitsome(requests, statuses) # @classmethod def testsome( cls, requests: Sequence[Request], statuses: Optional[List[Status]] = None, ) -> Tuple[Optional[List[int]], Optional[List[Any]]]: """ Test for completion of some previously initiated requests """ return PyMPI_testsome(requests, statuses) # def cancel(self) -> None: """ Cancel a communication request """ with nogil: CHKERR( MPI_Cancel(&self.ob_mpi) ) cdef class Prequest(Request): """ Persistent request handle """ def __cinit__(self, Request request: Optional[Request] = None): if self.ob_mpi == MPI_REQUEST_NULL: return (request) def Start(self) -> None: """ Initiate a communication with a persistent request """ with nogil: CHKERR( MPI_Start(&self.ob_mpi) ) @classmethod def Startall(cls, requests: List[Prequest]) -> None: """ Start a collection of persistent requests """ cdef int count = 0 cdef MPI_Request *irequests = NULL cdef tmp = acquire_rs(requests, None, &count, &irequests, NULL) # try: with nogil: CHKERR( MPI_Startall(count, irequests) ) finally: release_rs(requests, None, count, irequests, 0, NULL) cdef class Grequest(Request): """ Generalized request handle """ def __cinit__(self, Request request: Optional[Request] = None): self.ob_grequest = self.ob_mpi if self.ob_mpi == MPI_REQUEST_NULL: return (request) @classmethod def Start( cls, query_fn: Callable[..., None], free_fn: Callable[..., None], cancel_fn: Callable[..., None], args: Optional[Tuple[Any]] = None, kargs: Optional[Dict[str, Any]] = None, ) -> Grequest: """ Create and return a user-defined request """ cdef Grequest request = Grequest.__new__(Grequest) cdef _p_greq state = \ _p_greq(query_fn, free_fn, cancel_fn, args, kargs) with nogil: CHKERR( MPI_Grequest_start( greq_query_fn, greq_free_fn, greq_cancel_fn, state, &request.ob_mpi) ) Py_INCREF(state) request.ob_grequest = request.ob_mpi return request def Complete(self) -> None: """ Notify that a user-defined request is complete """ if self.ob_mpi != MPI_REQUEST_NULL: if self.ob_mpi != self.ob_grequest: raise MPIException(MPI_ERR_REQUEST) cdef MPI_Request grequest = self.ob_grequest self.ob_grequest = self.ob_mpi ## or MPI_REQUEST_NULL ?? with nogil: CHKERR( MPI_Grequest_complete(grequest) ) self.ob_grequest = self.ob_mpi ## or MPI_REQUEST_NULL ?? cdef Request __REQUEST_NULL__ = new_Request(MPI_REQUEST_NULL) # Predefined request handles # -------------------------- REQUEST_NULL = __REQUEST_NULL__ #: Null request handle mpi4py-3.1.6/src/mpi4py/MPI/Status.pyx000066400000000000000000000125741460670727200174520ustar00rootroot00000000000000cdef class Status: """ Status object """ def __cinit__(self, Status status: Optional[Status] = None): cdef MPI_Status *s = &self.ob_mpi CHKERR( PyMPI_Status_set_source (s, MPI_ANY_SOURCE ) ) CHKERR( PyMPI_Status_set_tag (s, MPI_ANY_TAG ) ) CHKERR( PyMPI_Status_set_error (s, MPI_SUCCESS ) ) if status is None: return self.ob_mpi = status.ob_mpi def __richcmp__(self, other, int op): if not isinstance(other, Status): return NotImplemented cdef Status s = self, o = other cdef int ne = memcmp(&s.ob_mpi, &o.ob_mpi, sizeof(MPI_Status)) if op == Py_EQ: return (ne == 0) elif op == Py_NE: return (ne != 0) cdef mod = type(self).__module__ cdef cls = type(self).__name__ raise TypeError("unorderable type: '%s.%s'" % (mod, cls)) def Get_source(self) -> int: """ Get message source """ cdef int source = MPI_ANY_SOURCE CHKERR( PyMPI_Status_get_source(&self.ob_mpi, &source) ) return source def Set_source(self, int source: int) -> None: """ Set message source """ CHKERR( PyMPI_Status_set_source(&self.ob_mpi, source) ) property source: """source""" def __get__(self) -> int: return self.Get_source() def __set__(self, value: int): self.Set_source(value) def Get_tag(self) -> int: """ Get message tag """ cdef int tag = MPI_ANY_TAG CHKERR( PyMPI_Status_get_tag(&self.ob_mpi, &tag) ) return tag def Set_tag(self, int tag: int) -> None: """ Set message tag """ CHKERR( PyMPI_Status_set_tag(&self.ob_mpi, tag) ) property tag: """tag""" def __get__(self) -> int: return self.Get_tag() def __set__(self, value: int): self.Set_tag(value) def Get_error(self) -> int: """ Get message error """ cdef int error = MPI_SUCCESS CHKERR( PyMPI_Status_get_error(&self.ob_mpi, &error) ) return error def Set_error(self, int error: int) -> None: """ Set message error """ CHKERR( PyMPI_Status_set_error(&self.ob_mpi, error) ) property error: """error""" def __get__(self) -> int: return self.Get_error() def __set__(self, value: int): self.Set_error(value) def Get_count(self, Datatype datatype: Datatype = BYTE) -> int: """ Get the number of *top level* elements """ cdef MPI_Datatype dtype = datatype.ob_mpi cdef int count = MPI_UNDEFINED CHKERR( MPI_Get_count(&self.ob_mpi, dtype, &count) ) return count property count: """byte count""" def __get__(self) -> int: return self.Get_count(__BYTE__) def Get_elements(self, Datatype datatype: Datatype) -> int: """ Get the number of basic elements in a datatype """ cdef MPI_Datatype dtype = datatype.ob_mpi cdef MPI_Count elements = MPI_UNDEFINED CHKERR( MPI_Get_elements_x(&self.ob_mpi, dtype, &elements) ) return elements def Set_elements( self, Datatype datatype: Datatype, Count count: int, ) -> None: """ Set the number of elements in a status .. note:: This should be only used when implementing query callback functions for generalized requests """ cdef MPI_Datatype dtype = datatype.ob_mpi CHKERR( MPI_Status_set_elements_x(&self.ob_mpi, dtype, count) ) def Is_cancelled(self) -> bool: """ Test to see if a request was cancelled """ cdef int flag = 0 CHKERR( MPI_Test_cancelled(&self.ob_mpi, &flag) ) return flag def Set_cancelled(self, bint flag: bool) -> None: """ Set the cancelled state associated with a status .. note:: This should be only used when implementing query callback functions for generalized requests """ CHKERR( MPI_Status_set_cancelled(&self.ob_mpi, flag) ) property cancelled: """ cancelled state """ def __get__(self) -> bool: return self.Is_cancelled() def __set__(self, value: int): self.Set_cancelled(value) # Fortran Handle # -------------- def py2f(self) -> List[int]: """ """ cdef Status status = self cdef Py_ssize_t i = 0 cdef Py_ssize_t n = (sizeof(MPI_Status)/sizeof(int)) cdef MPI_Status *c_status = &status.ob_mpi cdef MPI_Fint *f_status = NULL cdef tmp = allocate(n+1, sizeof(MPI_Fint), &f_status) CHKERR( MPI_Status_c2f(c_status, f_status) ) return [f_status[i] for i from 0 <= i < n] @classmethod def f2py(cls, arg: List[int]) -> Status: """ """ cdef Status status = Status.__new__(Status) cdef Py_ssize_t i = 0 cdef Py_ssize_t n = (sizeof(MPI_Status)/sizeof(int)) cdef MPI_Status *c_status = &status.ob_mpi cdef MPI_Fint *f_status = NULL cdef tmp = allocate(n+1, sizeof(MPI_Fint), &f_status) for i from 0 <= i < n: f_status[i] = arg[i] CHKERR( MPI_Status_f2c(f_status, c_status) ) return status mpi4py-3.1.6/src/mpi4py/MPI/Win.pyx000066400000000000000000000546631460670727200167310ustar00rootroot00000000000000# Create flavors # -------------- WIN_FLAVOR_CREATE = MPI_WIN_FLAVOR_CREATE WIN_FLAVOR_ALLOCATE = MPI_WIN_FLAVOR_ALLOCATE WIN_FLAVOR_DYNAMIC = MPI_WIN_FLAVOR_DYNAMIC WIN_FLAVOR_SHARED = MPI_WIN_FLAVOR_SHARED # Memory model # ------------ WIN_SEPARATE = MPI_WIN_SEPARATE WIN_UNIFIED = MPI_WIN_UNIFIED # Assertion modes # --------------- MODE_NOCHECK = MPI_MODE_NOCHECK MODE_NOSTORE = MPI_MODE_NOSTORE MODE_NOPUT = MPI_MODE_NOPUT MODE_NOPRECEDE = MPI_MODE_NOPRECEDE MODE_NOSUCCEED = MPI_MODE_NOSUCCEED # Lock types # ---------- LOCK_EXCLUSIVE = MPI_LOCK_EXCLUSIVE LOCK_SHARED = MPI_LOCK_SHARED cdef class Win: """ Window handle """ def __cinit__(self, Win win: Optional[Win] = None): self.ob_mpi = MPI_WIN_NULL if win is None: return self.ob_mpi = win.ob_mpi self.ob_mem = win.ob_mem def __dealloc__(self): if not (self.flags & PyMPI_OWNED): return CHKERR( del_Win(&self.ob_mpi) ) def __richcmp__(self, other, int op): if not isinstance(other, Win): return NotImplemented cdef Win s = self, o = other if op == Py_EQ: return (s.ob_mpi == o.ob_mpi) elif op == Py_NE: return (s.ob_mpi != o.ob_mpi) cdef mod = type(self).__module__ cdef cls = type(self).__name__ raise TypeError("unorderable type: '%s.%s'" % (mod, cls)) def __bool__(self) -> bool: return self.ob_mpi != MPI_WIN_NULL # Window Creation # --------------- @classmethod def Create( cls, memory: Union[Buffer, Bottom, None], int disp_unit: int = 1, Info info: Info = INFO_NULL, Intracomm comm: Intracomm = COMM_SELF, ) -> Win: """ Create an window object for one-sided communication """ cdef void *base = NULL cdef MPI_Aint size = 0 if is_BOTTOM(memory): base = MPI_BOTTOM memory = None elif memory is not None: memory = getbuffer_w(memory, &base, &size) cdef Win win = Win.__new__(Win) with nogil: CHKERR( MPI_Win_create( base, size, disp_unit, info.ob_mpi, comm.ob_mpi, &win.ob_mpi) ) win_set_eh(win.ob_mpi) win.ob_mem = memory return win @classmethod def Allocate( cls, Aint size: int, int disp_unit: int = 1, Info info: Info = INFO_NULL, Intracomm comm: Intracomm = COMM_SELF, ) -> Win: """ Create an window object for one-sided communication """ cdef void *base = NULL cdef Win win = Win.__new__(Win) with nogil: CHKERR( MPI_Win_allocate( size, disp_unit, info.ob_mpi, comm.ob_mpi, &base, &win.ob_mpi) ) win_set_eh(win.ob_mpi) return win @classmethod def Allocate_shared( cls, Aint size: int, int disp_unit: int = 1, Info info: Info = INFO_NULL, Intracomm comm: Intracomm = COMM_SELF, ) -> Win: """ Create an window object for one-sided communication """ cdef void *base = NULL cdef Win win = Win.__new__(Win) with nogil: CHKERR( MPI_Win_allocate_shared( size, disp_unit, info.ob_mpi, comm.ob_mpi, &base, &win.ob_mpi) ) win_set_eh(win.ob_mpi) return win def Shared_query(self, int rank: int) -> Tuple[memory, int]: """ Query the process-local address for remote memory segments created with `Win.Allocate_shared()` """ cdef void *base = NULL cdef MPI_Aint size = 0 cdef int disp_unit = 1 with nogil: CHKERR( MPI_Win_shared_query( self.ob_mpi, rank, &size, &disp_unit, &base) ) return (asbuffer(self, base, size, 0), disp_unit) @classmethod def Create_dynamic( cls, Info info: Info = INFO_NULL, Intracomm comm: Intracomm = COMM_SELF, ) -> Win: """ Create an window object for one-sided communication """ cdef Win win = Win.__new__(Win) with nogil: CHKERR( MPI_Win_create_dynamic( info.ob_mpi, comm.ob_mpi, &win.ob_mpi) ) win_set_eh(win.ob_mpi) win.ob_mem = {} return win def Attach(self, memory: Buffer) -> None: """ Attach a local memory region """ cdef void *base = NULL cdef MPI_Aint size = 0 memory = getbuffer_w(memory, &base, &size) with nogil: CHKERR( MPI_Win_attach(self.ob_mpi, base, size) ) try: (self.ob_mem)[base] = memory except: pass def Detach(self, memory: Buffer) -> None: """ Detach a local memory region """ cdef void *base = NULL memory = getbuffer_w(memory, &base, NULL) with nogil: CHKERR( MPI_Win_detach(self.ob_mpi, base) ) try: del (self.ob_mem)[base] except: pass def Free(self) -> None: """ Free a window """ with nogil: CHKERR( MPI_Win_free(&self.ob_mpi) ) self.ob_mem = None # Window Info # ----------- def Set_info(self, Info info: Info) -> None: """ Set new values for the hints associated with a window """ with nogil: CHKERR( MPI_Win_set_info(self.ob_mpi, info.ob_mpi) ) def Get_info(self) -> Info: """ Return the hints for a windows that are currently in use """ cdef Info info = Info.__new__(Info) with nogil: CHKERR( MPI_Win_get_info( self.ob_mpi, &info.ob_mpi) ) return info property info: """window info""" def __get__(self) -> Info: return self.Get_info() def __set__(self, value: Info): self.Set_info(value) # Window Group # ------------- def Get_group(self) -> Group: """ Return a duplicate of the group of the communicator used to create the window """ cdef Group group = Group() with nogil: CHKERR( MPI_Win_get_group(self.ob_mpi, &group.ob_mpi) ) return group property group: """window group""" def __get__(self) -> Group: return self.Get_group() # Window Attributes # ----------------- def Get_attr(self, int keyval: int) -> Optional[Union[int, Any]]: """ Retrieve attribute value by key """ cdef void *attrval = NULL cdef int flag = 0 CHKERR( MPI_Win_get_attr(self.ob_mpi, keyval, &attrval, &flag) ) if flag == 0: return None if attrval == NULL: return 0 # MPI-2 predefined attribute keyvals if keyval == MPI_WIN_BASE: return attrval elif keyval == MPI_WIN_SIZE: return (attrval)[0] elif keyval == MPI_WIN_DISP_UNIT: return (attrval)[0] # MPI-3 predefined attribute keyvals elif keyval == MPI_WIN_CREATE_FLAVOR: return (attrval)[0] elif keyval == MPI_WIN_MODEL: return (attrval)[0] # user-defined attribute keyval return PyMPI_attr_get(self.ob_mpi, keyval, attrval) def Set_attr(self, int keyval: int, attrval: Any) -> None: """ Store attribute value associated with a key """ PyMPI_attr_set(self.ob_mpi, keyval, attrval) def Delete_attr(self, int keyval: int) -> None: """ Delete attribute value associated with a key """ CHKERR( MPI_Win_delete_attr(self.ob_mpi, keyval) ) @classmethod def Create_keyval( cls, copy_fn: Optional[Callable[[Win, int, Any], Any]] = None, delete_fn: Optional[Callable[[Win, int, Any], None]] = None, nopython: bool = False, ) -> int: """ Create a new attribute key for windows """ cdef object state = _p_keyval(copy_fn, delete_fn, nopython) cdef int keyval = MPI_KEYVAL_INVALID cdef MPI_Win_copy_attr_function *_copy = PyMPI_attr_copy_fn cdef MPI_Win_delete_attr_function *_del = PyMPI_attr_delete_fn cdef void *extra_state = state CHKERR( MPI_Win_create_keyval(_copy, _del, &keyval, extra_state) ) win_keyval[keyval] = state return keyval @classmethod def Free_keyval(cls, int keyval: int) -> int: """ Free an attribute key for windows """ cdef int keyval_save = keyval CHKERR( MPI_Win_free_keyval(&keyval) ) try: del win_keyval[keyval_save] except KeyError: pass return keyval property attrs: "window attributes" def __get__(self) -> Tuple[int, int, int]: cdef void *base = NULL cdef MPI_Aint size = 0 cdef int disp_unit = 1 win_get_base(self.ob_mpi, &base) win_get_size(self.ob_mpi, &size) win_get_unit(self.ob_mpi, &disp_unit) return (base, size, disp_unit) property flavor: """window create flavor""" def __get__(self) -> int: cdef int keyval = MPI_WIN_CREATE_FLAVOR cdef int *attrval = NULL cdef int flag = 0 if keyval != MPI_KEYVAL_INVALID: CHKERR( MPI_Win_get_attr(self.ob_mpi, keyval, &attrval, &flag) ) if flag and attrval != NULL: return attrval[0] return MPI_WIN_FLAVOR_CREATE property model: """window memory model""" def __get__(self) -> int: cdef int keyval = MPI_WIN_MODEL cdef int *attrval = NULL cdef int flag = 0 if keyval != MPI_KEYVAL_INVALID: CHKERR( MPI_Win_get_attr(self.ob_mpi, keyval, &attrval, &flag) ) if flag and attrval != NULL: return attrval[0] return MPI_WIN_SEPARATE def tomemory(self) -> memory: """ Return window memory buffer """ return getbuffer(self, 0, 1) # buffer interface (PEP 3118) def __getbuffer__(self, Py_buffer *view, int flags): if view.obj == Py_None: Py_CLEAR(view.obj) cdef void *base = NULL cdef MPI_Aint size = 0 win_get_base(self.ob_mpi, &base) win_get_size(self.ob_mpi, &size) PyBuffer_FillInfo(view, self, base, size, 0, flags) # buffer interface (legacy) def __getsegcount__(self, Py_ssize_t *lenp): if lenp == NULL: return 1 cdef MPI_Aint size = 0 win_get_size(self.ob_mpi, &size) lenp[0] = size return 1 def __getreadbuffer__(self, Py_ssize_t idx, void **bufp): if idx != 0: raise SystemError("accessing non-existent buffer segment") cdef MPI_Aint size = 0 win_get_base(self.ob_mpi, bufp) win_get_size(self.ob_mpi, &size) return size def __getwritebuffer__(self, Py_ssize_t idx, void **bufp): if idx != 0: raise SystemError("accessing non-existent buffer segment") cdef MPI_Aint size = 0 win_get_base(self.ob_mpi, bufp) win_get_size(self.ob_mpi, &size) return size # Communication Operations # ------------------------ def Put( self, origin: BufSpec, int target_rank: int, target: Optional[TargetSpec] = None, ) -> None: """ Put data into a memory window on a remote process. """ cdef _p_msg_rma msg = message_rma() msg.for_put(origin, target_rank, target) with nogil: CHKERR( MPI_Put( msg.oaddr, msg.ocount, msg.otype, target_rank, msg.tdisp, msg.tcount, msg.ttype, self.ob_mpi) ) def Get( self, origin: BufSpec, int target_rank: int, target: Optional[TargetSpec] = None, ) -> None: """ Get data from a memory window on a remote process. """ cdef _p_msg_rma msg = message_rma() msg.for_get(origin, target_rank, target) with nogil: CHKERR( MPI_Get( msg.oaddr, msg.ocount, msg.otype, target_rank, msg.tdisp, msg.tcount, msg.ttype, self.ob_mpi) ) def Accumulate( self, origin: BufSpec, int target_rank: int, target: Optional[TargetSpec] = None, Op op: Op = SUM, ) -> None: """ Accumulate data into the target process """ cdef _p_msg_rma msg = message_rma() msg.for_acc(origin, target_rank, target) with nogil: CHKERR( MPI_Accumulate( msg.oaddr, msg.ocount, msg.otype, target_rank, msg.tdisp, msg.tcount, msg.ttype, op.ob_mpi, self.ob_mpi) ) def Get_accumulate( self, origin: BufSpec, result: BufSpec, int target_rank: int, target: Optional[TargetSpec] = None, Op op: Op = SUM, ) -> None: """ Fetch-and-accumulate data into the target process """ cdef _p_msg_rma msg = message_rma() msg.for_get_acc(origin, result, target_rank, target) with nogil: CHKERR( MPI_Get_accumulate( msg.oaddr, msg.ocount, msg.otype, msg.raddr, msg.rcount, msg.rtype, target_rank, msg.tdisp, msg.tcount, msg.ttype, op.ob_mpi, self.ob_mpi) ) def Fetch_and_op( self, origin: BufSpec, result: BufSpec, int target_rank: int, Aint target_disp: int = 0, Op op: Op = SUM, ) -> None: """ Perform one-sided read-modify-write """ cdef _p_msg_rma msg = message_rma() msg.for_fetch_op(origin, result, target_rank, target_disp) with nogil: CHKERR( MPI_Fetch_and_op( msg.oaddr, msg.raddr, msg.ttype, target_rank, target_disp, op.ob_mpi, self.ob_mpi) ) def Compare_and_swap( self, origin: BufSpec, compare: BufSpec, result: BufSpec, int target_rank: int, Aint target_disp: int = 0, ) -> None: """ Perform one-sided atomic compare-and-swap """ cdef _p_msg_rma msg = message_rma() msg.for_cmp_swap(origin, compare, result, target_rank, target_disp) with nogil: CHKERR( MPI_Compare_and_swap( msg.oaddr, msg.caddr, msg.raddr, msg.ttype, target_rank, target_disp, self.ob_mpi) ) # Request-based RMA Communication Operations # ------------------------------------------ def Rput( self, origin: BufSpec, int target_rank: int, target: Optional[TargetSpec] = None, ) -> Request: """ Put data into a memory window on a remote process. """ cdef _p_msg_rma msg = message_rma() msg.for_put(origin, target_rank, target) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Rput( msg.oaddr, msg.ocount, msg.otype, target_rank, msg.tdisp, msg.tcount, msg.ttype, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = msg return request def Rget( self, origin: BufSpec, int target_rank: int, target: Optional[TargetSpec] = None, ) -> Request: """ Get data from a memory window on a remote process. """ cdef _p_msg_rma msg = message_rma() msg.for_get(origin, target_rank, target) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Rget( msg.oaddr, msg.ocount, msg.otype, target_rank, msg.tdisp, msg.tcount, msg.ttype, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = msg return request def Raccumulate( self, origin: BufSpec, int target_rank: int, target: Optional[TargetSpec] = None, Op op: Op = SUM, ) -> Request: """ Fetch-and-accumulate data into the target process """ cdef _p_msg_rma msg = message_rma() msg.for_acc(origin, target_rank, target) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Raccumulate( msg.oaddr, msg.ocount, msg.otype, target_rank, msg.tdisp, msg.tcount, msg.ttype, op.ob_mpi, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = msg return request def Rget_accumulate( self, origin: BufSpec, result: BufSpec, int target_rank: int, target: Optional[TargetSpec] = None, Op op: Op = SUM, ) -> Request: """ Accumulate data into the target process using remote memory access. """ cdef _p_msg_rma msg = message_rma() msg.for_get_acc(origin, result, target_rank, target) cdef Request request = Request.__new__(Request) with nogil: CHKERR( MPI_Rget_accumulate( msg.oaddr, msg.ocount, msg.otype, msg.raddr, msg.rcount, msg.rtype, target_rank, msg.tdisp, msg.tcount, msg.ttype, op.ob_mpi, self.ob_mpi, &request.ob_mpi) ) request.ob_buf = msg return request # Synchronization Calls # --------------------- # Fence # ----- def Fence(self, int assertion: int = 0) -> None: """ Perform an MPI fence synchronization on a window """ with nogil: CHKERR( MPI_Win_fence(assertion, self.ob_mpi) ) # General Active Target Synchronization # ------------------------------------- def Start(self, Group group: Group, int assertion: int = 0) -> None: """ Start an RMA access epoch for MPI """ with nogil: CHKERR( MPI_Win_start( group.ob_mpi, assertion, self.ob_mpi) ) def Complete(self) -> None: """ Completes an RMA operations begun after an `Win.Start()` """ with nogil: CHKERR( MPI_Win_complete(self.ob_mpi) ) def Post(self, Group group: Group, int assertion: int = 0) -> None: """ Start an RMA exposure epoch """ with nogil: CHKERR( MPI_Win_post( group.ob_mpi, assertion, self.ob_mpi) ) def Wait(self) -> Literal[True]: """ Complete an RMA exposure epoch begun with `Win.Post()` """ with nogil: CHKERR( MPI_Win_wait(self.ob_mpi) ) return True def Test(self) -> bool: """ Test whether an RMA exposure epoch has completed """ cdef int flag = 0 with nogil: CHKERR( MPI_Win_test(self.ob_mpi, &flag) ) return flag # Lock # ---- def Lock( self, int rank: int, int lock_type: int = LOCK_EXCLUSIVE, int assertion: int = 0, ) -> None: """ Begin an RMA access epoch at the target process """ with nogil: CHKERR( MPI_Win_lock( lock_type, rank, assertion, self.ob_mpi) ) def Unlock(self, int rank: int) -> None: """ Complete an RMA access epoch at the target process """ with nogil: CHKERR( MPI_Win_unlock(rank, self.ob_mpi) ) def Lock_all(self, int assertion: int = 0) -> None: """ Begin an RMA access epoch at all processes """ with nogil: CHKERR( MPI_Win_lock_all(assertion, self.ob_mpi) ) def Unlock_all(self) -> None: """ Complete an RMA access epoch at all processes """ with nogil: CHKERR( MPI_Win_unlock_all(self.ob_mpi) ) # Flush and Sync # -------------- def Flush(self, int rank: int) -> None: """ Complete all outstanding RMA operations at the given target """ with nogil: CHKERR( MPI_Win_flush(rank, self.ob_mpi) ) def Flush_all(self) -> None: """ Complete all outstanding RMA operations at all targets """ with nogil: CHKERR( MPI_Win_flush_all(self.ob_mpi) ) def Flush_local(self, int rank: int) -> None: """ Complete locally all outstanding RMA operations at the given target """ with nogil: CHKERR( MPI_Win_flush_local(rank, self.ob_mpi) ) def Flush_local_all(self) -> None: """ Complete locally all outstanding RMA opera- tions at all targets """ with nogil: CHKERR( MPI_Win_flush_local_all(self.ob_mpi) ) def Sync(self) -> None: """ Synchronize public and private copies of the given window """ with nogil: CHKERR( MPI_Win_sync(self.ob_mpi) ) # Error Handling # -------------- def Get_errhandler(self) -> Errhandler: """ Get the error handler for a window """ cdef Errhandler errhandler = Errhandler.__new__(Errhandler) CHKERR( MPI_Win_get_errhandler(self.ob_mpi, &errhandler.ob_mpi) ) return errhandler def Set_errhandler(self, Errhandler errhandler: Errhandler) -> None: """ Set the error handler for a window """ CHKERR( MPI_Win_set_errhandler(self.ob_mpi, errhandler.ob_mpi) ) def Call_errhandler(self, int errorcode: int) -> None: """ Call the error handler installed on a window """ CHKERR( MPI_Win_call_errhandler(self.ob_mpi, errorcode) ) # Naming Objects # -------------- def Get_name(self) -> str: """ Get the print name associated with the window """ cdef char name[MPI_MAX_OBJECT_NAME+1] cdef int nlen = 0 CHKERR( MPI_Win_get_name(self.ob_mpi, name, &nlen) ) return tompistr(name, nlen) def Set_name(self, name: str) -> None: """ Set the print name associated with the window """ cdef char *cname = NULL name = asmpistr(name, &cname) CHKERR( MPI_Win_set_name(self.ob_mpi, cname) ) property name: """window name""" def __get__(self) -> str: return self.Get_name() def __set__(self, value: str): self.Set_name(value) # Fortran Handle # -------------- def py2f(self) -> int: """ """ return MPI_Win_c2f(self.ob_mpi) @classmethod def f2py(cls, arg: int) -> Win: """ """ cdef Win win = Win.__new__(Win) win.ob_mpi = MPI_Win_f2c(arg) return win cdef Win __WIN_NULL__ = new_Win(MPI_WIN_NULL) # Predefined window handles # ------------------------- WIN_NULL = __WIN_NULL__ #: Null window handle mpi4py-3.1.6/src/mpi4py/MPI/asarray.pxi000066400000000000000000000113541460670727200176040ustar00rootroot00000000000000# ----------------------------------------------------------------------------- ctypedef fused integral_t: int MPI_Aint cdef inline object newarray(int n, integral_t **p): return allocate(n, sizeof(integral_t), p) cdef inline object getarray(object ob, int *n, integral_t **p): cdef Py_ssize_t olen = len(ob) cdef integral_t *base = NULL cdef int i = 0, size = downcast(olen) cdef object mem = newarray(size, &base) for i from 0 <= i < size: base[i] = ob[i] n[0] = size p[0] = base return mem cdef inline object chkarray(object ob, int n, integral_t **p): cdef int size = 0 cdef object mem = getarray(ob, &size, p) if n != size: raise ValueError( "expecting %d items, got %d" % (n, size)) return mem # ----------------------------------------------------------------------------- cdef inline object asarray_Datatype(object sequence, int size, MPI_Datatype **p): cdef int i = 0 cdef MPI_Datatype *array = NULL if size != len(sequence): raise ValueError( "expecting %d items, got %d" % (size, len(sequence))) cdef object ob = allocate(size, sizeof(MPI_Datatype), &array) for i from 0 <= i < size: array[i] = (sequence[i]).ob_mpi p[0] = array return ob cdef inline object asarray_Info(object sequence, int size, MPI_Info **p): cdef int i = 0 cdef MPI_Info *array = NULL cdef MPI_Info info = MPI_INFO_NULL cdef object ob if sequence is None or isinstance(sequence, Info): if sequence is not None: info = (sequence).ob_mpi ob = allocate(size, sizeof(MPI_Info), &array) for i from 0 <= i < size: array[i] = info else: if size != len(sequence): raise ValueError( "expecting %d items, got %d" % (size, len(sequence))) ob = allocate(size, sizeof(MPI_Datatype), &array) for i from 0 <= i < size: array[i] = (sequence[i]).ob_mpi p[0] = array return ob # ----------------------------------------------------------------------------- cdef inline int is_string(object obj): return (isinstance(obj, str) or isinstance(obj, bytes) or isinstance(obj, unicode)) cdef inline object asstring(object ob, char *s[]): cdef Py_ssize_t n = 0 cdef char *p = NULL, *q = NULL ob = asmpistr(ob, &p) PyBytes_AsStringAndSize(ob, &p, &n) cdef object mem = allocate(n+1, sizeof(char), &q) memcpy(q, p, n) q[n] = 0; s[0] = q; return mem cdef inline object asarray_str(object sequence, char ***p): cdef char** array = NULL cdef Py_ssize_t i = 0, size = len(sequence) cdef object ob = allocate(size+1, sizeof(char*), &array) for i from 0 <= i < size: sequence[i] = asstring(sequence[i], &array[i]) array[size] = NULL p[0] = array return (sequence, ob) cdef inline object asarray_argv(object sequence, char ***p): if sequence is None: p[0] = MPI_ARGV_NULL return None if is_string(sequence): sequence = [sequence] else: sequence = list(sequence) return asarray_str(sequence, p) cdef inline object asarray_cmds(object sequence, int *count, char ***p): if is_string(sequence): raise ValueError("expecting a sequence of strings") sequence = list(sequence) count[0] = len(sequence) return asarray_str(sequence, p) cdef inline object asarray_argvs(object sequence, int size, char ****p): if sequence is None: p[0] = MPI_ARGVS_NULL return None if is_string(sequence): sequence = [sequence] * size else: sequence = list(sequence) if size != len(sequence): raise ValueError( "expecting %d items, got %d" % (size, len(sequence))) cdef int i = 0 cdef char*** array = NULL cdef object ob = allocate(size+1, sizeof(char**), &array) cdef object argv for i from 0 <= i < size: argv = sequence[i] if argv is None: argv = [] sequence[i] = asarray_argv(argv, &array[i]) array[size] = NULL p[0] = array return (sequence, ob) cdef inline object asarray_nprocs(object sequence, int size, int **p): cdef object ob cdef int *array = NULL cdef int i = 0, value = 1 if sequence is None or is_integral(sequence): if sequence is not None: value = sequence ob = newarray(size, &array) for i from 0 <= i < size: array[i] = value else: ob = chkarray(sequence, size, &array) p[0] = array return ob # ----------------------------------------------------------------------------- mpi4py-3.1.6/src/mpi4py/MPI/asbuffer.pxi000066400000000000000000000341671460670727200177460ustar00rootroot00000000000000#------------------------------------------------------------------------------ cdef extern from "Python.h": int PyIndex_Check(object) int PySlice_Check(object) int PySlice_GetIndicesEx(object, Py_ssize_t, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *) except -1 Py_ssize_t PyNumber_AsSsize_t(object, object) except? -1 #------------------------------------------------------------------------------ # Python 3 buffer interface (PEP 3118) cdef extern from "Python.h": ctypedef struct Py_buffer: PyObject *obj void *buf Py_ssize_t len Py_ssize_t itemsize bint readonly char *format #int ndim #Py_ssize_t *shape #Py_ssize_t *strides #Py_ssize_t *suboffsets cdef enum: PyBUF_SIMPLE PyBUF_WRITABLE PyBUF_FORMAT PyBUF_ND PyBUF_STRIDES PyBUF_ANY_CONTIGUOUS int PyObject_CheckBuffer(object) int PyObject_GetBuffer(object, Py_buffer *, int) except -1 void PyBuffer_Release(Py_buffer *) int PyBuffer_FillInfo(Py_buffer *, object, void *, Py_ssize_t, bint, int) except -1 # Python 2 buffer interface (legacy) cdef extern from *: int _Py2_IsBuffer(object) int _Py2_AsBuffer(object, bint *, void **, Py_ssize_t *) except -1 cdef extern from "Python.h": object PyLong_FromVoidPtr(void*) void* PyLong_AsVoidPtr(object) except? NULL cdef extern from *: void *emptybuffer '((void*)"")' cdef char BYTE_FMT[2] BYTE_FMT[0] = c'B' BYTE_FMT[1] = 0 #------------------------------------------------------------------------------ cdef extern from *: char* PyByteArray_AsString(object) except NULL Py_ssize_t PyByteArray_Size(object) except -1 cdef type array_array cdef type numpy_array cdef int pypy_have_numpy = 0 if PYPY: from array import array as array_array try: from _numpypy.multiarray import ndarray as numpy_array pypy_have_numpy = 1 except ImportError: try: from numpypy import ndarray as numpy_array pypy_have_numpy = 1 except ImportError: try: from numpy import ndarray as numpy_array pypy_have_numpy = 1 except ImportError: pass cdef int PyPy_GetBuffer(object obj, Py_buffer *view, int flags) except -1: cdef object addr cdef void *buf = NULL cdef Py_ssize_t size = 0 cdef bint readonly = 0 try: if not isinstance(obj, bytes): if PyObject_CheckBuffer(obj): return PyObject_GetBuffer(obj, view, flags) except SystemError: pass except TypeError: pass if isinstance(obj, bytes): buf = PyBytes_AsString(obj) size = PyBytes_Size(obj) readonly = 1 elif isinstance(obj, bytearray): buf = PyByteArray_AsString(obj) size = PyByteArray_Size(obj) readonly = 0 elif isinstance(obj, array_array): addr, size = obj.buffer_info() buf = PyLong_AsVoidPtr(addr) size *= obj.itemsize readonly = 0 elif pypy_have_numpy and isinstance(obj, numpy_array): addr, readonly = obj.__array_interface__['data'] buf = PyLong_AsVoidPtr(addr) size = obj.nbytes else: _Py2_AsBuffer(obj, &readonly, &buf, &size) if buf == NULL and size == 0: buf = emptybuffer PyBuffer_FillInfo(view, obj, buf, size, readonly, flags) if (flags & PyBUF_FORMAT) == PyBUF_FORMAT: view.format = BYTE_FMT return 0 #------------------------------------------------------------------------------ cdef int Py27_GetBuffer(object obj, Py_buffer *view, int flags) except -1: # Python 3 buffer interface (PEP 3118) if PyObject_CheckBuffer(obj): return PyObject_GetBuffer(obj, view, flags) # Python 2 buffer interface (legacy) _Py2_AsBuffer(obj, &view.readonly, &view.buf, &view.len) if view.buf == NULL and view.len == 0: view.buf = emptybuffer PyBuffer_FillInfo(view, obj, view.buf, view.len, view.readonly, flags) if (flags & PyBUF_FORMAT) == PyBUF_FORMAT: view.format = BYTE_FMT return 0 #------------------------------------------------------------------------------ include "asdlpack.pxi" include "ascaibuf.pxi" cdef int PyMPI_GetBuffer(object obj, Py_buffer *view, int flags) except -1: try: if PYPY: return PyPy_GetBuffer(obj, view, flags) if PY2: return Py27_GetBuffer(obj, view, flags) return PyObject_GetBuffer(obj, view, flags) except BaseException: try: return Py_GetDLPackBuffer(obj, view, flags) except NotImplementedError: pass except BaseException: raise try: return Py_GetCAIBuffer(obj, view, flags) except NotImplementedError: pass except BaseException: raise raise #------------------------------------------------------------------------------ @cython.final cdef class memory: """ Memory buffer """ cdef Py_buffer view def __cinit__(self, *args): if args: PyMPI_GetBuffer(args[0], &self.view, PyBUF_SIMPLE) else: PyBuffer_FillInfo(&self.view, NULL, NULL, 0, 0, PyBUF_SIMPLE) def __dealloc__(self): PyBuffer_Release(&self.view) @staticmethod def allocate( Aint nbytes: int, bint clear: bool = False, ) -> memory: """Memory allocation""" cdef void *buf = NULL cdef Py_ssize_t size = nbytes if size < 0: raise ValueError("expecting non-negative size") cdef object ob = rawalloc(size, 1, clear, &buf) cdef memory mem = memory.__new__(memory) PyBuffer_FillInfo(&mem.view, ob, buf, size, 0, PyBUF_SIMPLE) return mem @staticmethod def frombuffer( obj: Buffer, bint readonly: bool = False, ) -> memory: """Memory from buffer-like object""" cdef int flags = PyBUF_SIMPLE if not readonly: flags |= PyBUF_WRITABLE cdef memory mem = memory.__new__(memory) PyMPI_GetBuffer(obj, &mem.view, flags) mem.view.readonly = readonly return mem @staticmethod def fromaddress( address: int, Aint nbytes: int, bint readonly: bool = False, ) -> memory: """Memory from address and size in bytes""" cdef void *buf = PyLong_AsVoidPtr(address) cdef Py_ssize_t size = nbytes if size < 0: raise ValueError("expecting non-negative buffer length") elif size > 0 and buf == NULL: raise ValueError("expecting non-NULL address") cdef memory mem = memory.__new__(memory) PyBuffer_FillInfo(&mem.view, NULL, buf, size, readonly, PyBUF_SIMPLE) return mem # properties property address: """Memory address""" def __get__(self) -> int: return PyLong_FromVoidPtr(self.view.buf) property obj: """The underlying object of the memory""" def __get__(self) -> Optional[Buffer]: if self.view.obj == NULL: return None return self.view.obj property nbytes: """Memory size (in bytes)""" def __get__(self) -> int: return self.view.len property readonly: """Boolean indicating whether the memory is read-only""" def __get__(self) -> bool: return self.view.readonly property format: """A string with the format of each element""" def __get__(self) -> str: if self.view.format != NULL: return pystr(self.view.format) return pystr(BYTE_FMT) property itemsize: """The size in bytes of each element""" def __get__(self) -> int: return self.view.itemsize # convenience methods def tobytes(self, order: Optional[str] = None) -> bytes: """Return the data in the buffer as a byte string""" return PyBytes_FromStringAndSize(self.view.buf, self.view.len) def toreadonly(self) -> memory: """Return a readonly version of the memory object""" cdef void *buf = self.view.buf cdef Py_ssize_t size = self.view.len cdef object obj = self if self.view.obj != NULL: obj = self.view.obj cdef memory mem = memory.__new__(memory) PyBuffer_FillInfo(&mem.view, obj, buf, size, 1, PyBUF_SIMPLE) return mem def release(self) -> None: """Release the underlying buffer exposed by the memory object""" PyBuffer_Release(&self.view) PyBuffer_FillInfo(&self.view, NULL, NULL, 0, 0, PyBUF_SIMPLE) # buffer interface (PEP 3118) def __getbuffer__(self, Py_buffer *view, int flags): if view.obj == Py_None: Py_CLEAR(view.obj) PyBuffer_FillInfo(view, self, self.view.buf, self.view.len, self.view.readonly, flags) # buffer interface (legacy) def __getsegcount__(self, Py_ssize_t *lenp): if lenp != NULL: lenp[0] = self.view.len return 1 def __getreadbuffer__(self, Py_ssize_t idx, void **p): if idx != 0: raise SystemError("accessing non-existent buffer segment") p[0] = self.view.buf return self.view.len def __getwritebuffer__(self, Py_ssize_t idx, void **p): if self.view.readonly: raise TypeError("memory buffer is read-only") if idx != 0: raise SystemError("accessing non-existent buffer segment") p[0] = self.view.buf return self.view.len # sequence interface (basic) def __len__(self): return self.view.len def __getitem__(self, object item): cdef Py_ssize_t start=0, stop=0, step=1, slen=0 cdef unsigned char *buf = self.view.buf cdef Py_ssize_t blen = self.view.len if PyIndex_Check(item): start = PyNumber_AsSsize_t(item, IndexError) if start < 0: start += blen if start < 0 or start >= blen: raise IndexError("index out of range") return buf[start] elif PySlice_Check(item): PySlice_GetIndicesEx(item, blen, &start, &stop, &step, &slen) if step != 1: raise IndexError("slice with step not supported") return asbuffer(self, buf+start, slen, self.view.readonly) else: raise TypeError("index must be integer or slice") def __setitem__(self, object item, object value): if self.view.readonly: raise TypeError("memory buffer is read-only") cdef Py_ssize_t start=0, stop=0, step=1, slen=0 cdef unsigned char *buf = self.view.buf cdef Py_ssize_t blen = self.view.len cdef memory inmem if PyIndex_Check(item): start = PyNumber_AsSsize_t(item, IndexError) if start < 0: start += blen if start < 0 or start >= blen: raise IndexError("index out of range") buf[start] = value elif PySlice_Check(item): PySlice_GetIndicesEx(item, blen, &start, &stop, &step, &slen) if step != 1: raise IndexError("slice with step not supported") if PyIndex_Check(value): memset(buf+start, value, slen) else: inmem = getbuffer(value, 1, 0) if inmem.view.len != slen: raise ValueError("slice length does not match buffer") memmove(buf+start, inmem.view.buf, slen) else: raise TypeError("index must be integer or slice") #------------------------------------------------------------------------------ cdef inline memory newbuffer(): return memory.__new__(memory) cdef inline memory getbuffer(object ob, bint readonly, bint format): cdef memory buf = newbuffer() cdef int flags = PyBUF_ANY_CONTIGUOUS if not readonly: flags |= PyBUF_WRITABLE if format: flags |= PyBUF_FORMAT PyMPI_GetBuffer(ob, &buf.view, flags) return buf cdef inline object getformat(memory buf): cdef Py_buffer *view = &buf.view # if view.obj == NULL: if view.format != NULL: return pystr(view.format) else: return "B" elif view.format != NULL: # XXX this is a hack if view.format != BYTE_FMT: return pystr(view.format) # cdef object ob = view.obj cdef object format = None try: # numpy.ndarray format = ob.dtype.char except (AttributeError, TypeError): try: # array.array format = ob.typecode except (AttributeError, TypeError): if view.format != NULL: format = pystr(view.format) return format cdef inline memory getbuffer_r(object ob, void **base, MPI_Aint *size): cdef memory buf = getbuffer(ob, 1, 0) if base != NULL: base[0] = buf.view.buf if size != NULL: size[0] = buf.view.len return buf cdef inline memory getbuffer_w(object ob, void **base, MPI_Aint *size): cdef memory buf = getbuffer(ob, 0, 0) if base != NULL: base[0] = buf.view.buf if size != NULL: size[0] = buf.view.len return buf cdef inline memory asbuffer(object ob, void *base, MPI_Aint size, bint ro): cdef memory buf = newbuffer() PyBuffer_FillInfo(&buf.view, ob, base, size, ro, PyBUF_SIMPLE) return buf #------------------------------------------------------------------------------ cdef inline memory asmemory(object ob, void **base, MPI_Aint *size): cdef memory mem if type(ob) is memory: mem = ob else: mem = getbuffer(ob, 1, 0) if base != NULL: base[0] = mem.view.buf if size != NULL: size[0] = mem.view.len return mem cdef inline memory tomemory(void *base, MPI_Aint size): cdef memory mem = memory.__new__(memory) PyBuffer_FillInfo(&mem.view, NULL, base, size, 0, PyBUF_SIMPLE) return mem #------------------------------------------------------------------------------ mpi4py-3.1.6/src/mpi4py/MPI/ascaibuf.pxi000066400000000000000000000114371460670727200177210ustar00rootroot00000000000000#------------------------------------------------------------------------------ # CUDA array interface for interoperating Python CUDA GPU libraries # See https://numba.pydata.org/numba-doc/latest/cuda/cuda_array_interface.html cdef inline int cuda_is_contig(tuple shape, tuple strides, Py_ssize_t itemsize, char order) except -1: cdef Py_ssize_t i, ndim = len(shape) cdef Py_ssize_t start, step, index, dim, size = itemsize if order == c'F': start = 0 step = 1 else: start = ndim - 1 step = -1 for i from 0 <= i < ndim: index = start + step * i dim = shape[index] if dim > 1 and size != strides[index]: return 0 size *= dim return 1 cdef inline char* cuda_get_format(char typekind, Py_ssize_t itemsize) nogil: if typekind == c'b': if itemsize == sizeof(char): return b"?" if typekind == c'i': if itemsize == sizeof(char): return b"b" if itemsize == sizeof(short): return b"h" if itemsize == sizeof(int): return b"i" if itemsize == sizeof(long): return b"l" if itemsize == sizeof(long long): return b"q" if typekind == c'u': if itemsize == sizeof(char): return b"B" if itemsize == sizeof(short): return b"H" if itemsize == sizeof(int): return b"I" if itemsize == sizeof(long): return b"L" if itemsize == sizeof(long long): return b"Q" if typekind == c'f': if itemsize == sizeof(float)//2: return b"e" if itemsize == sizeof(float): return b"f" if itemsize == sizeof(double): return b"d" if itemsize == sizeof(long double): return b"g" if typekind == c'c': if itemsize == 2*sizeof(float)//2: return b"Ze" if itemsize == 2*sizeof(float): return b"Zf" if itemsize == 2*sizeof(double): return b"Zd" if itemsize == 2*sizeof(long double): return b"Zg" return BYTE_FMT #------------------------------------------------------------------------------ cdef int Py_CheckCAIBuffer(object obj): try: return hasattr(obj, '__cuda_array_interface__') except: return 0 cdef int Py_GetCAIBuffer(object obj, Py_buffer *view, int flags) except -1: cdef dict cuda_array_interface cdef tuple data cdef str typestr cdef tuple shape cdef tuple strides cdef list descr cdef object dev_ptr, mask cdef void *buf = NULL cdef bint readonly = 0 cdef Py_ssize_t s, size = 1 cdef Py_ssize_t itemsize = 1 cdef char typekind = c'u' cdef bint fixnull = 0 try: cuda_array_interface = obj.__cuda_array_interface__ except AttributeError: raise NotImplementedError("missing CUDA array interface") # mandatory data = cuda_array_interface['data'] typestr = cuda_array_interface['typestr'] shape = cuda_array_interface['shape'] # optional strides = cuda_array_interface.get('strides') descr = cuda_array_interface.get('descr') mask = cuda_array_interface.get('mask') dev_ptr, readonly = data for s in shape: size *= s if dev_ptr is None and size == 0: dev_ptr = 0 # XXX buf = PyLong_AsVoidPtr(dev_ptr) typekind = ord(typestr[1]) itemsize = int(typestr[2:]) if mask is not None: raise BufferError( "__cuda_array_interface__: " "cannot handle masked arrays" ) if size < 0: raise BufferError( "__cuda_array_interface__: " "buffer with negative size (shape:%s, size:%d)" % (shape, size) ) if (strides is not None and not cuda_is_contig(shape, strides, itemsize, c'C') and not cuda_is_contig(shape, strides, itemsize, c'F')): raise BufferError( "__cuda_array_interface__: " "buffer is not contiguous (shape:%s, strides:%s, itemsize:%d)" % (shape, strides, itemsize) ) if descr is not None and (len(descr) != 1 or descr[0] != ('', typestr)): PyErr_WarnEx(RuntimeWarning, b"__cuda_array_interface__: " b"ignoring 'descr' key", 1) if PYPY and readonly and ((flags & PyBUF_WRITABLE) == PyBUF_WRITABLE): raise BufferError("Object is not writable") fixnull = (buf == NULL and size == 0) if fixnull: buf = &fixnull PyBuffer_FillInfo(view, obj, buf, size*itemsize, readonly, flags) if fixnull: view.buf = NULL if (flags & PyBUF_FORMAT) == PyBUF_FORMAT: view.format = cuda_get_format(typekind, itemsize) if view.format != BYTE_FMT: view.itemsize = itemsize return 0 #------------------------------------------------------------------------------ mpi4py-3.1.6/src/mpi4py/MPI/asdlpack.pxi000066400000000000000000000165531460670727200177320ustar00rootroot00000000000000#------------------------------------------------------------------------------ # From dlpack.h (as of v0.6) cdef extern from * nogil: ctypedef unsigned char uint8_t ctypedef unsigned short uint16_t ctypedef signed long long int64_t ctypedef unsigned long long uint64_t ctypedef enum DLDeviceType: kDLCPU = 1 kDLCUDA = 2 kDLCUDAHost = 3 kDLOpenCL = 4 kDLVulkan = 7 kDLMetal = 8 kDLVPI = 9 kDLROCM = 10 kDLROCMHost = 11 kDLExtDev = 12 kDLCUDAManaged = 13 ctypedef struct DLDevice: DLDeviceType device_type int device_id ctypedef enum DLDataTypeCode: kDLInt = 0 kDLUInt = 1 kDLFloat = 2 kDLOpaqueHandle = 3 kDLBfloat = 4 kDLComplex = 5 ctypedef struct DLDataType: uint8_t code uint8_t bits uint16_t lanes ctypedef struct DLTensor: void *data DLDevice device int ndim DLDataType dtype int64_t *shape int64_t *strides uint64_t byte_offset ctypedef struct DLManagedTensor: DLTensor dl_tensor void *manager_ctx void (*deleter)(DLManagedTensor *) #------------------------------------------------------------------------------ cdef extern from "Python.h": void* PyCapsule_GetPointer(object, const char[]) except? NULL int PyCapsule_SetName(object, const char[]) except -1 int PyCapsule_IsValid(object, const char[]) #------------------------------------------------------------------------------ cdef inline int dlpack_is_contig(const DLTensor *dltensor, char order) nogil: cdef int i, ndim = dltensor.ndim cdef int64_t *shape = dltensor.shape cdef int64_t *strides = dltensor.strides cdef int64_t start, step, index, dim, size = 1 if strides == NULL: if ndim > 1 and order == c'F': return 0 return 1 if order == c'F': start = 0 step = 1 else: start = ndim - 1 step = -1 for i from 0 <= i < ndim: index = start + step * i dim = shape[index] if dim > 1 and size != strides[index]: return 0 size *= dim return 1 cdef inline int dlpack_check_shape(const DLTensor *dltensor) except -1: cdef int i, ndim = dltensor.ndim if ndim < 0: raise BufferError("dlpack: number of dimensions is negative") if ndim > 0 and dltensor.shape == NULL: raise BufferError("dlpack: shape is NULL") for i from 0 <= i < ndim: if dltensor.shape[i] < 0: raise BufferError("dlpack: shape item is negative") if dltensor.strides != NULL: for i from 0 <= i < ndim: if dltensor.strides[i] < 0: raise BufferError("dlpack: strides item is negative") return 0 cdef inline int dlpack_check_contig(const DLTensor *dltensor) except -1: if dltensor.strides == NULL: return 0 if dlpack_is_contig(dltensor, c'C'): return 0 if dlpack_is_contig(dltensor, c'F'): return 0 raise BufferError("dlpack: buffer is not contiguous") cdef inline void *dlpack_get_data(const DLTensor *dltensor) nogil: return dltensor.data + dltensor.byte_offset cdef inline Py_ssize_t dlpack_get_size(const DLTensor *dltensor) nogil: cdef int i, ndim = dltensor.ndim cdef int64_t *shape = dltensor.shape cdef Py_ssize_t bits = dltensor.dtype.bits cdef Py_ssize_t lanes = dltensor.dtype.lanes cdef Py_ssize_t size = 1 for i from 0 <= i < ndim: size *= shape[i] size *= (bits * lanes + 7) // 8 return size cdef inline char *dlpack_get_format(const DLTensor *dltensor) nogil: cdef unsigned int code = dltensor.dtype.code cdef unsigned int bits = dltensor.dtype.bits if dltensor.dtype.lanes != 1: if code == kDLFloat and dltensor.dtype.lanes == 2: if bits == 8*sizeof(float): return b"Zf" if bits == 8*sizeof(double): return b"Zd" if bits == 8*sizeof(long double): return b"Zg" return BYTE_FMT if code == kDLInt: if bits == 8*sizeof(char): return b"b" if bits == 8*sizeof(short): return b"h" if bits == 8*sizeof(int): return b"i" if bits == 8*sizeof(long): return b"l" if bits == 8*sizeof(long long): return b"q" if code == kDLUInt: if bits == 8*sizeof(char): return b"B" if bits == 8*sizeof(short): return b"H" if bits == 8*sizeof(int): return b"I" if bits == 8*sizeof(long): return b"L" if bits == 8*sizeof(long long): return b"Q" if code == kDLFloat: if bits == 8*sizeof(float)//2: return b"e" if bits == 8*sizeof(float): return b"f" if bits == 8*sizeof(double): return b"d" if bits == 8*sizeof(long double): return b"g" if code == kDLComplex: if bits == 8*2*sizeof(float)//2: return b"Ze" if bits == 8*2*sizeof(float): return b"Zf" if bits == 8*2*sizeof(double): return b"Zd" if bits == 8*2*sizeof(long double): return b"Zg" return BYTE_FMT cdef inline Py_ssize_t dlpack_get_itemsize(const DLTensor *dltensor) nogil: cdef unsigned int code = dltensor.dtype.code cdef unsigned int bits = dltensor.dtype.bits if dltensor.dtype.lanes != 1: if code == kDLFloat and dltensor.dtype.lanes == 2: if (bits == 8*sizeof(float) or bits == 8*sizeof(double) or bits == 8*sizeof(long double)): return bits // 8 * 2 return 1 return (bits + 7) // 8 #------------------------------------------------------------------------------ cdef int Py_CheckDLPackBuffer(object obj): try: return hasattr(obj, '__dlpack__') except: return 0 cdef int Py_GetDLPackBuffer(object obj, Py_buffer *view, int flags) except -1: cdef object dlpack cdef object dlpack_device cdef unsigned device_type cdef int device_id cdef object capsule cdef DLManagedTensor *managed cdef const DLTensor *dltensor cdef void *buf cdef Py_ssize_t size cdef bint readonly cdef bint fixnull try: dlpack = obj.__dlpack__ dlpack_device = obj.__dlpack_device__ except AttributeError: raise NotImplementedError("dlpack: missing support") device_type, device_id = dlpack_device() if device_type == kDLCPU: capsule = dlpack() else: capsule = dlpack(stream=-1) if not PyCapsule_IsValid(capsule, b"dltensor"): raise BufferError("dlpack: invalid capsule object") managed = PyCapsule_GetPointer(capsule, b"dltensor") dltensor = &managed.dl_tensor try: dlpack_check_shape(dltensor) dlpack_check_contig(dltensor) buf = dlpack_get_data(dltensor) size = dlpack_get_size(dltensor) readonly = 0 fixnull = (buf == NULL and size == 0) if fixnull: buf = &fixnull PyBuffer_FillInfo(view, obj, buf, size, readonly, flags) if fixnull: view.buf = NULL if (flags & PyBUF_FORMAT) == PyBUF_FORMAT: view.format = dlpack_get_format(dltensor) if view.format != BYTE_FMT: view.itemsize = dlpack_get_itemsize(dltensor) finally: if managed.deleter != NULL: managed.deleter(managed) PyCapsule_SetName(capsule, b"used_dltensor") del capsule return 0 #------------------------------------------------------------------------------ mpi4py-3.1.6/src/mpi4py/MPI/asmemory.pxi000066400000000000000000000045751460670727200200050ustar00rootroot00000000000000#------------------------------------------------------------------------------ cdef extern from "Python.h": enum: PY_SSIZE_T_MAX void *PyMem_Malloc(size_t) void *PyMem_Calloc(size_t, size_t) void *PyMem_Realloc(void*, size_t) void PyMem_Free(void*) cdef extern from * nogil: """ #if PY_VERSION_HEX < 0x03050000 # define PyMPI_RawMalloc(n) malloc(((n)!=0)?(n):1) # define PyMPI_RawCalloc(n,s) ((n)&&(s))?calloc((n),(s)):calloc(1,1) # define PyMPI_RawRealloc(p,n) realloc((p),(n)?(n):1)) # define PyMPI_RawFree free #else # define PyMPI_RawMalloc PyMem_RawMalloc # define PyMPI_RawCalloc PyMem_RawCalloc # define PyMPI_RawRealloc PyMem_RawRealloc # define PyMPI_RawFree PyMem_RawFree #endif """ void *PyMPI_RawMalloc(size_t) void *PyMPI_RawCalloc(size_t, size_t) void *PyMPI_RawRealloc(void*, size_t) void PyMPI_RawFree(void*) #------------------------------------------------------------------------------ @cython.final @cython.internal cdef class _p_mem: cdef void *buf cdef size_t len cdef void (*free)(void*) def __cinit__(self): self.buf = NULL self.len = 0 self.free = NULL def __dealloc__(self): if self.free: self.free(self.buf) cdef inline _p_mem allocate(Py_ssize_t m, size_t b, void *buf): if m > PY_SSIZE_T_MAX/b: raise MemoryError("memory allocation size too large") if m < 0: raise RuntimeError("memory allocation with negative size") cdef _p_mem ob = _p_mem.__new__(_p_mem) ob.len = m * b ob.free = PyMem_Free ob.buf = PyMem_Malloc(m * b) if ob.buf == NULL: raise MemoryError if buf != NULL: (buf)[0] = ob.buf return ob cdef inline _p_mem rawalloc(Py_ssize_t m, size_t b, bint clear, void *buf): if m > PY_SSIZE_T_MAX/b: raise MemoryError("memory allocation size too large") if m < 0: raise RuntimeError("memory allocation with negative size") cdef _p_mem ob = _p_mem.__new__(_p_mem) ob.len = m * b ob.free = PyMPI_RawFree if clear: ob.buf = PyMPI_RawCalloc(m, b) else: ob.buf = PyMPI_RawMalloc(m * b) if ob.buf == NULL: raise MemoryError if buf != NULL: (buf)[0] = ob.buf return ob #------------------------------------------------------------------------------ mpi4py-3.1.6/src/mpi4py/MPI/asstring.pxi000066400000000000000000000024571460670727200200000ustar00rootroot00000000000000#------------------------------------------------------------------------------ cdef extern from "Python.h": int PyUnicode_Check(object) object PyUnicode_AsUTF8String(object) object PyUnicode_AsASCIIString(object) object PyUnicode_FromString(const char[]) object PyUnicode_FromStringAndSize(const char[],Py_ssize_t) object PyBytes_FromString(const char[]) object PyBytes_FromStringAndSize(const char[],Py_ssize_t) int PyBytes_AsStringAndSize(object,char*[],Py_ssize_t*) except -1 #------------------------------------------------------------------------------ cdef inline object asmpistr(object ob, char *s[]): if PyUnicode_Check(ob): if PY3: ob = PyUnicode_AsUTF8String(ob); else: ob = PyUnicode_AsASCIIString(ob); PyBytes_AsStringAndSize(ob, s, NULL) return ob cdef inline object tompistr(const char s[], int n): if PY3: return PyUnicode_FromStringAndSize(s, n) else: return PyBytes_FromStringAndSize(s, n) cdef inline object mpistr(const char s[]): if PY3: return PyUnicode_FromString(s) else: return PyBytes_FromString(s) cdef inline object pystr(const char s[]): if PY3: return PyUnicode_FromString(s) else: return PyBytes_FromString(s) #------------------------------------------------------------------------------ mpi4py-3.1.6/src/mpi4py/MPI/atimport.pxi000066400000000000000000000206721460670727200200040ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef extern from "atimport.h": pass # ----------------------------------------------------------------------------- cdef extern from "Python.h": enum: PY3 "(PY_MAJOR_VERSION>=3)" enum: PY2 "(PY_MAJOR_VERSION==2)" enum: PYPY "PyMPI_RUNTIME_PYPY" void PySys_WriteStderr(char*,...) int Py_AtExit(void (*)()) ctypedef struct PyObject PyObject *Py_None void Py_CLEAR(PyObject*) void Py_INCREF(object) void Py_DECREF(object) # ----------------------------------------------------------------------------- cdef extern from *: const char *Py_GETENV(const char[]) enum: USE_MATCHED_RECV "PyMPI_USE_MATCHED_RECV" ctypedef struct Options: int initialize int threads int thread_level int finalize int fast_reduce int recv_mprobe int errors cdef Options options options.initialize = 1 options.threads = 1 options.thread_level = MPI_THREAD_MULTIPLE options.finalize = 1 options.fast_reduce = 1 options.recv_mprobe = 1 options.errors = 1 cdef object getEnv(object rc, const char name[], object value): cdef bytes oname = b"MPI4PY_RC_" + name.upper() cdef const char *cvalue = Py_GETENV(oname) if cvalue == NULL: return value cdef object ovalue = pystr(cvalue) cdef bytes bvalue = PyBytes_FromString(cvalue).lower() if bvalue in (b'true', b'yes', b'on', b'y', b'1'): ovalue = True if bvalue in (b'false', b'no', b'off', b'n', b'0'): ovalue = False try: setattr(rc, pystr(name), ovalue) except: pass return ovalue cdef int warnOpt(object name, object value) except -1: cdef object warn from warnings import warn warn("mpi4py.rc: '%s': unexpected value %r" % (name, value)) cdef int getOptions(Options* opts) except -1: cdef object rc opts.initialize = 1 opts.threads = 1 opts.thread_level = MPI_THREAD_MULTIPLE opts.finalize = 1 opts.fast_reduce = 1 opts.recv_mprobe = 1 opts.errors = 1 try: from mpi4py import rc except: return 0 # cdef object initialize = True cdef object threads = True cdef object thread_level = 'multiple' cdef object finalize = None cdef object fast_reduce = True cdef object recv_mprobe = True cdef object errors = 'exception' try: initialize = rc.initialize except: pass try: threads = rc.threads except: pass try: threads = rc.threaded # backward except: pass # compatibility try: thread_level = rc.thread_level except: pass try: finalize = rc.finalize except: pass try: fast_reduce = rc.fast_reduce except: pass try: recv_mprobe = rc.recv_mprobe except: pass try: errors = rc.errors except: pass initialize = getEnv(rc, b"initialize", initialize) threads = getEnv(rc, b"threads", threads) thread_level = getEnv(rc, b"thread_level", thread_level) finalize = getEnv(rc, b"finalize", finalize) fast_reduce = getEnv(rc, b"fast_reduce", fast_reduce) recv_mprobe = getEnv(rc, b"recv_mprobe", recv_mprobe) errors = getEnv(rc, b"errors", errors) # if initialize in (True, 'yes'): opts.initialize = 1 elif initialize in (False, 'no'): opts.initialize = 0 else: warnOpt("initialize", initialize) # if threads in (True, 'yes'): opts.threads = 1 elif threads in (False, 'no'): opts.threads = 0 else: warnOpt("threads", threads) # if thread_level == 'single': opts.thread_level = MPI_THREAD_SINGLE elif thread_level == 'funneled': opts.thread_level = MPI_THREAD_FUNNELED elif thread_level == 'serialized': opts.thread_level = MPI_THREAD_SERIALIZED elif thread_level == 'multiple': opts.thread_level = MPI_THREAD_MULTIPLE else: warnOpt("thread_level", thread_level) # if finalize is None: opts.finalize = opts.initialize elif finalize in (True, 'yes'): opts.finalize = 1 elif finalize in (False, 'no'): opts.finalize = 0 else: warnOpt("finalize", finalize) # if fast_reduce in (True, 'yes'): opts.fast_reduce = 1 elif fast_reduce in (False, 'no'): opts.fast_reduce = 0 else: warnOpt("fast_reduce", fast_reduce) # if recv_mprobe in (True, 'yes'): opts.recv_mprobe = 1 and USE_MATCHED_RECV elif recv_mprobe in (False, 'no'): opts.recv_mprobe = 0 else: warnOpt("recv_mprobe", recv_mprobe) # if errors == 'default': opts.errors = 0 elif errors == 'exception': opts.errors = 1 elif errors == 'fatal': opts.errors = 2 else: warnOpt("errors", errors) # return 0 # ----------------------------------------------------------------------------- cdef extern from *: int PyMPI_Commctx_finalize() nogil cdef int bootstrap() except -1: # Get options from 'mpi4py.rc' module getOptions(&options) # Cleanup at (the very end of) Python exit if Py_AtExit(atexit) < 0: PySys_WriteStderr(b"warning: could not register " b"cleanup with Py_AtExit()%s", b"\n") # Do we have to initialize MPI? cdef int initialized = 1 MPI_Initialized(&initialized) if initialized: options.finalize = 0 return 0 if not options.initialize: return 0 # MPI initialization cdef int ierr = MPI_SUCCESS cdef int required = MPI_THREAD_SINGLE cdef int provided = MPI_THREAD_SINGLE if options.threads: required = options.thread_level ierr = MPI_Init_thread(NULL, NULL, required, &provided) if ierr != MPI_SUCCESS: raise RuntimeError( "MPI_Init_thread() failed [error code: %d]" % ierr) else: ierr = MPI_Init(NULL, NULL) if ierr != MPI_SUCCESS: raise RuntimeError( "MPI_Init() failed [error code: %d]" % ierr) return 0 cdef inline int mpi_active() nogil: cdef int ierr = MPI_SUCCESS # MPI initialized ? cdef int initialized = 0 ierr = MPI_Initialized(&initialized) if not initialized or ierr != MPI_SUCCESS: return 0 # MPI finalized ? cdef int finalized = 1 ierr = MPI_Finalized(&finalized) if finalized or ierr != MPI_SUCCESS: return 0 # MPI should be active ... return 1 cdef int initialize() nogil except -1: if not mpi_active(): return 0 comm_set_eh(MPI_COMM_SELF) comm_set_eh(MPI_COMM_WORLD) return 0 cdef void finalize() nogil: if not mpi_active(): return PyMPI_Commctx_finalize() cdef int abort_status = 0 cdef void atexit() nogil: if not mpi_active(): return if abort_status: MPI_Abort(MPI_COMM_WORLD, abort_status) finalize() if options.finalize: MPI_Finalize() def _set_abort_status(object status: Any) -> None: "Helper for ``python -m mpi4py.run ...``" global abort_status try: abort_status = status except: abort_status = 1 if status else 0 # ----------------------------------------------------------------------------- # Vile hack for raising a exception and not contaminate the traceback cdef extern from *: enum: PyMPI_ERR_UNAVAILABLE cdef extern from "Python.h": void PyErr_SetObject(object, object) void *PyExc_RuntimeError void *PyExc_NotImplementedError cdef object MPIException = PyExc_RuntimeError cdef int PyMPI_Raise(int ierr) except -1 with gil: if ierr == PyMPI_ERR_UNAVAILABLE: PyErr_SetObject(PyExc_NotImplementedError, None) return 0 if (MPIException) != NULL: PyErr_SetObject(MPIException, ierr) else: PyErr_SetObject(PyExc_RuntimeError, ierr) return 0 cdef inline int CHKERR(int ierr) nogil except -1: if ierr == MPI_SUCCESS: return 0 PyMPI_Raise(ierr) return -1 cdef inline void print_traceback(): cdef object sys, traceback import sys, traceback traceback.print_exc() try: sys.stderr.flush() except: pass # ----------------------------------------------------------------------------- # PyPy: Py_IsInitialized() cannot be called without the GIL cdef extern from "Python.h": int _Py_IsInitialized"Py_IsInitialized"() nogil cdef object _pypy_sentinel = None cdef inline int Py_IsInitialized() nogil: if PYPY and (_pypy_sentinel) == NULL: return 0 return _Py_IsInitialized() # ----------------------------------------------------------------------------- mpi4py-3.1.6/src/mpi4py/MPI/attrimpl.pxi000066400000000000000000000146251460670727200200020ustar00rootroot00000000000000#------------------------------------------------------------------------------ @cython.final @cython.internal cdef class _p_keyval: cdef public object copy_fn cdef public object delete_fn cdef public bint nopython def __cinit__(self, copy_fn, delete_fn, nopython): if copy_fn is False: copy_fn = None if delete_fn is False: delete_fn = None if delete_fn is True: delete_fn = None self.copy_fn = copy_fn self.delete_fn = delete_fn self.nopython = nopython cdef dict type_keyval = {} cdef dict comm_keyval = {} cdef dict win_keyval = {} _keyval_registry = { 'Datatype' : type_keyval, 'Comm' : comm_keyval, 'Win' : win_keyval, } #------------------------------------------------------------------------------ ctypedef fused PyMPI_attr_type: MPI_Datatype MPI_Comm MPI_Win cdef inline object PyMPI_attr_call( object function, PyMPI_attr_type hdl, int keyval, object attrval): cdef object ob cdef object result if PyMPI_attr_type is MPI_Datatype: ob = new_Datatype(hdl) if PyMPI_attr_type is MPI_Comm: ob = new_Comm(hdl) if PyMPI_attr_type is MPI_Win: ob = new_Win (hdl) try: result = function(ob, keyval, attrval) finally: if PyMPI_attr_type is MPI_Datatype: (ob).ob_mpi = MPI_DATATYPE_NULL if PyMPI_attr_type is MPI_Comm: (ob).ob_mpi = MPI_COMM_NULL if PyMPI_attr_type is MPI_Win: (ob).ob_mpi = MPI_WIN_NULL return result cdef inline int PyMPI_attr_copy( PyMPI_attr_type hdl, int keyval, void *extra_state, void *attrval_in, void *attrval_out, int *flag) except -1: if flag != NULL: flag[0] = 0 cdef _p_keyval state = <_p_keyval>extra_state if state.copy_fn is None: return 0 cdef int p = not state.nopython if p: assert attrval_in != NULL cdef object attrval if p: attrval = attrval_in else: attrval = PyLong_FromVoidPtr(attrval_in) if state.copy_fn is not True: attrval = PyMPI_attr_call(state.copy_fn, hdl, keyval, attrval) if attrval is NotImplemented: return 0 cdef void **outval = attrval_out if p: outval[0] = attrval else: outval[0] = PyLong_AsVoidPtr(attrval) if flag != NULL: flag[0] = 1 if p: Py_INCREF(attrval) Py_INCREF(state) return 0 cdef inline int PyMPI_attr_delete( PyMPI_attr_type hdl, int keyval, void *attrval_in, void *extra_state) except -1: cdef _p_keyval state = <_p_keyval>extra_state cdef int p = not state.nopython if p: assert attrval_in != NULL cdef object attrval if p: attrval = attrval_in else: attrval = PyLong_FromVoidPtr(attrval_in) if state.delete_fn is not None: PyMPI_attr_call(state.delete_fn, hdl, keyval, attrval) if p: Py_DECREF(attrval) Py_DECREF(state) return 0 cdef inline int PyMPI_attr_copy_cb( PyMPI_attr_type hdl, int keyval, void *extra_state, void *attrval_in, void *attrval_out, int *flag, ) except MPI_ERR_UNKNOWN with gil: cdef int ierr = MPI_SUCCESS cdef object exc try: PyMPI_attr_copy(hdl, keyval, extra_state, attrval_in, attrval_out, flag) except MPIException as exc: print_traceback() ierr = exc.Get_error_code() except: print_traceback() ierr = MPI_ERR_OTHER return ierr cdef inline int PyMPI_attr_delete_cb( PyMPI_attr_type hdl, int keyval, void *attrval, void *extra_state, ) except MPI_ERR_UNKNOWN with gil: cdef int ierr = MPI_SUCCESS cdef object exc try: PyMPI_attr_delete(hdl, keyval, attrval, extra_state) except MPIException as exc: print_traceback() ierr = exc.Get_error_code() except: print_traceback() ierr = MPI_ERR_OTHER return ierr @cython.callspec("MPIAPI") cdef int PyMPI_attr_copy_fn(PyMPI_attr_type hdl, int keyval, void *extra_state, void *attrval_in, void *attrval_out, int *flag) nogil: if flag != NULL: flag[0] = 0 if extra_state == NULL: return MPI_ERR_INTERN if attrval_out == NULL: return MPI_ERR_INTERN if not Py_IsInitialized(): return MPI_SUCCESS return PyMPI_attr_copy_cb(hdl, keyval, extra_state, attrval_in, attrval_out, flag) @cython.callspec("MPIAPI") cdef int PyMPI_attr_delete_fn(PyMPI_attr_type hdl, int keyval, void *attrval, void *extra_state) nogil: if extra_state == NULL: return MPI_ERR_INTERN if not Py_IsInitialized(): return MPI_SUCCESS return PyMPI_attr_delete_cb(hdl, keyval, attrval, extra_state) #------------------------------------------------------------------------------ cdef inline _p_keyval PyMPI_attr_state( PyMPI_attr_type hdl, int keyval): hdl # unused if PyMPI_attr_type is MPI_Datatype: return <_p_keyval>type_keyval.get(keyval) elif PyMPI_attr_type is MPI_Comm: return <_p_keyval>comm_keyval.get(keyval) elif PyMPI_attr_type is MPI_Win: return <_p_keyval>win_keyval.get(keyval) cdef inline object PyMPI_attr_get( PyMPI_attr_type hdl, int keyval, void *attrval): cdef _p_keyval state = PyMPI_attr_state(hdl, keyval) if state is not None and not state.nopython: return attrval else: return PyLong_FromVoidPtr(attrval) cdef inline int PyMPI_attr_set( PyMPI_attr_type hdl, int keyval, object attrval, ) except -1: cdef _p_keyval state = PyMPI_attr_state(hdl, keyval) cdef void *valptr = NULL if state is not None and not state.nopython: valptr = attrval else: valptr = PyLong_AsVoidPtr(attrval) if PyMPI_attr_type is MPI_Datatype: CHKERR( MPI_Type_set_attr(hdl, keyval, valptr) ) if PyMPI_attr_type is MPI_Comm: CHKERR( MPI_Comm_set_attr(hdl, keyval, valptr) ) if PyMPI_attr_type is MPI_Win: CHKERR( MPI_Win_set_attr(hdl, keyval, valptr) ) if state is not None: if not state.nopython: Py_INCREF(attrval) Py_INCREF(state) return 0 #------------------------------------------------------------------------------ mpi4py-3.1.6/src/mpi4py/MPI/commimpl.pxi000066400000000000000000000122641460670727200177600ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef memory _buffer = None cdef inline int attach_buffer(ob, void **p, int *n) except -1: global _buffer cdef void *bptr = NULL cdef MPI_Aint blen = 0 _buffer = getbuffer_w(ob, &bptr, &blen) p[0] = bptr n[0] = clipcount(blen) return 0 cdef inline object detach_buffer(void *p, int n): global _buffer cdef object ob = None try: if (_buffer is not None and _buffer.view.buf == p and _buffer.view.obj != NULL): ob = _buffer.view.obj else: ob = tomemory(p, n) finally: _buffer = None return ob # ----------------------------------------------------------------------------- cdef object __UNWEIGHTED__ = MPI_UNWEIGHTED cdef object __WEIGHTS_EMPTY__ = MPI_WEIGHTS_EMPTY cdef inline bint is_weights(object obj, object CONST): if PYPY: return type(obj) is type(CONST) and obj == CONST else: return obj is CONST cdef inline bint is_UNWEIGHTED(object weights): return is_weights(weights, __UNWEIGHTED__) cdef inline bint is_WEIGHTS_EMPTY(object weights): return is_weights(weights, __WEIGHTS_EMPTY__) cdef object asarray_weights(object weights, int nweight, int **iweight): if weights is None: iweight[0] = MPI_UNWEIGHTED return None if is_UNWEIGHTED(weights): iweight[0] = MPI_UNWEIGHTED return None if is_WEIGHTS_EMPTY(weights): if nweight > 0: raise ValueError("empty weights but nonzero degree") iweight[0] = MPI_WEIGHTS_EMPTY return None return chkarray(weights, nweight, iweight) # ----------------------------------------------------------------------------- cdef inline int comm_neighbors_count(MPI_Comm comm, int *incoming, int *outgoing, ) except -1: cdef int topo = MPI_UNDEFINED cdef int size=0, ndims=0, rank=0, nneighbors=0 cdef int indegree=0, outdegree=0, weighted=0 CHKERR( MPI_Topo_test(comm, &topo) ) if topo == MPI_UNDEFINED: # XXX CHKERR( MPI_Comm_size(comm, &size) ) indegree = outdegree = size elif topo == MPI_CART: CHKERR( MPI_Cartdim_get(comm, &ndims) ) indegree = outdegree = 2*ndims elif topo == MPI_GRAPH: CHKERR( MPI_Comm_rank(comm, &rank) ) CHKERR( MPI_Graph_neighbors_count( comm, rank, &nneighbors) ) indegree = outdegree = nneighbors elif topo == MPI_DIST_GRAPH: CHKERR( MPI_Dist_graph_neighbors_count( comm, &indegree, &outdegree, &weighted) ) if incoming != NULL: incoming[0] = indegree if outgoing != NULL: outgoing[0] = outdegree return 0 # ----------------------------------------------------------------------------- cdef object Lock = None if PY3: try: from _thread import allocate_lock as Lock except ImportError: from _dummy_thread import allocate_lock as Lock else: try: from thread import allocate_lock as Lock except ImportError: from dummy_thread import allocate_lock as Lock cdef int lock_keyval = MPI_KEYVAL_INVALID cdef dict lock_registry = {} cdef inline int lock_free_cb(MPI_Comm comm) \ except MPI_ERR_UNKNOWN with gil: try: del lock_registry[comm] except KeyError: pass return MPI_SUCCESS @cython.callspec("MPIAPI") cdef int lock_free_fn(MPI_Comm comm, int keyval, void *attrval, void *xstate) nogil: if comm == MPI_COMM_SELF: return MPI_Comm_free_keyval(&lock_keyval) if not Py_IsInitialized(): return MPI_SUCCESS if lock_registry == NULL: return MPI_SUCCESS return lock_free_cb(comm) cdef inline dict PyMPI_Lock_table(MPI_Comm comm): cdef dict table cdef int found = 0 cdef void *attrval = NULL if lock_keyval == MPI_KEYVAL_INVALID: CHKERR( MPI_Comm_create_keyval( MPI_COMM_NULL_COPY_FN, lock_free_fn, &lock_keyval, NULL) ) lock_registry[MPI_COMM_SELF] = table = {} CHKERR( MPI_Comm_set_attr(MPI_COMM_SELF, lock_keyval, table) ) CHKERR( MPI_Comm_get_attr(comm, lock_keyval, &attrval, &found) ) if not found: lock_registry[comm] = table = {} CHKERR( MPI_Comm_set_attr(comm, lock_keyval, table) ) else: if PYPY: table = lock_registry[comm] else: table = attrval return table cdef inline object PyMPI_Lock(MPI_Comm comm, object key): cdef dict table = PyMPI_Lock_table(comm) cdef object lock try: lock = table[key] except KeyError: lock = table[key] = Lock() return lock def _comm_lock(Comm comm: Comm, object key: Hashable = None) -> Lock: "Create/get communicator lock" return PyMPI_Lock(comm.ob_mpi, key) def _comm_lock_table(Comm comm: Comm) -> Dict[Hashable, Lock]: "Internal communicator lock table" return PyMPI_Lock_table(comm.ob_mpi) _lock_table = _comm_lock_table # backward-compatibility # ----------------------------------------------------------------------------- mpi4py-3.1.6/src/mpi4py/MPI/drepimpl.pxi000066400000000000000000000122241460670727200177530ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef dict datarep_registry = {} @cython.final @cython.internal cdef class _p_datarep: cdef object read_fn cdef object write_fn cdef object extent_fn def __cinit__(self, read_fn, write_fn, extent_fn): self.read_fn = read_fn self.write_fn = write_fn self.extent_fn = extent_fn cdef int read(self, void *userbuf, MPI_Datatype datatype, int count, void *filebuf, MPI_Offset position, ) except -1: cdef MPI_Aint lb=0, extent=0 cdef int ierr = MPI_Type_get_extent(datatype, &lb, &extent) if ierr != MPI_SUCCESS: return ierr cdef MPI_Aint ulen = (position+count) * extent cdef MPI_Aint flen = PY_SSIZE_T_MAX # XXX cdef object ubuf = tomemory(userbuf, ulen) cdef object fbuf = tomemory(filebuf, flen) cdef Datatype dtype = Datatype.__new__(Datatype) dtype.ob_mpi = datatype try: self.read_fn(ubuf, dtype, count, fbuf, position) finally: dtype.ob_mpi = MPI_DATATYPE_NULL return MPI_SUCCESS cdef int write(self, void *userbuf, MPI_Datatype datatype, int count, void *filebuf, MPI_Offset position, ) except -1: cdef MPI_Aint lb=0, extent=0 cdef int ierr = MPI_Type_get_extent(datatype, &lb, &extent) if ierr != MPI_SUCCESS: return ierr cdef MPI_Aint ulen = (position+count) * extent cdef MPI_Aint flen = PY_SSIZE_T_MAX # XXX cdef object ubuf = tomemory(userbuf, ulen) cdef object fbuf = tomemory(filebuf, flen) cdef Datatype dtype = Datatype.__new__(Datatype) dtype.ob_mpi = datatype try: self.write_fn(ubuf, dtype, count, fbuf, position) finally: dtype.ob_mpi = MPI_DATATYPE_NULL return MPI_SUCCESS cdef int extent(self, MPI_Datatype datatype, MPI_Aint *file_extent, ) except -1: cdef Datatype dtype = Datatype.__new__(Datatype) dtype.ob_mpi = datatype try: file_extent[0] = self.extent_fn(dtype) finally: dtype.ob_mpi = MPI_DATATYPE_NULL return MPI_SUCCESS # --- cdef int datarep_read( void *userbuf, MPI_Datatype datatype, int count, void *filebuf, MPI_Offset position, void *extra_state, ) except MPI_ERR_UNKNOWN with gil: cdef _p_datarep state = <_p_datarep>extra_state cdef int ierr = MPI_SUCCESS cdef object exc try: state.read(userbuf, datatype, count, filebuf, position) except MPIException as exc: print_traceback() ierr = exc.Get_error_code() except: print_traceback() ierr = MPI_ERR_OTHER return ierr cdef int datarep_write( void *userbuf, MPI_Datatype datatype, int count, void *filebuf, MPI_Offset position, void *extra_state, ) except MPI_ERR_UNKNOWN with gil: cdef _p_datarep state = <_p_datarep>extra_state cdef int ierr = MPI_SUCCESS cdef object exc try: state.write(userbuf, datatype, count, filebuf, position) except MPIException as exc: print_traceback() ierr = exc.Get_error_code() except: print_traceback() ierr = MPI_ERR_OTHER return ierr cdef int datarep_extent( MPI_Datatype datatype, MPI_Aint *file_extent, void *extra_state, ) except MPI_ERR_UNKNOWN with gil: cdef _p_datarep state = <_p_datarep>extra_state cdef int ierr = MPI_SUCCESS cdef object exc try: state.extent(datatype, file_extent) except MPIException as exc: print_traceback() ierr = exc.Get_error_code() except: print_traceback() ierr = MPI_ERR_OTHER return ierr # --- @cython.callspec("MPIAPI") cdef int datarep_read_fn( void *userbuf, MPI_Datatype datatype, int count, void *filebuf, MPI_Offset position, void *extra_state ) nogil: if extra_state == NULL: return MPI_ERR_INTERN if not Py_IsInitialized(): return MPI_ERR_INTERN return datarep_read(userbuf, datatype, count, filebuf, position, extra_state) @cython.callspec("MPIAPI") cdef int datarep_write_fn( void *userbuf, MPI_Datatype datatype, int count, void *filebuf, MPI_Offset position, void *extra_state ) nogil: if extra_state == NULL: return MPI_ERR_INTERN if not Py_IsInitialized(): return MPI_ERR_INTERN return datarep_write(userbuf, datatype, count, filebuf, position, extra_state) @cython.callspec("MPIAPI") cdef int datarep_extent_fn( MPI_Datatype datatype, MPI_Aint *file_extent, void *extra_state ) nogil: if extra_state == NULL: return MPI_ERR_INTERN if not Py_IsInitialized(): return MPI_ERR_INTERN return datarep_extent(datatype, file_extent, extra_state) # ----------------------------------------------------------------------------- mpi4py-3.1.6/src/mpi4py/MPI/helpers.pxi000066400000000000000000000270511460670727200176050ustar00rootroot00000000000000#------------------------------------------------------------------------------ cdef extern from "Python.h": enum: Py_LT enum: Py_LE enum: Py_EQ enum: Py_NE enum: Py_GT enum: Py_GE #------------------------------------------------------------------------------ cdef enum PyMPI_OBJECT_FLAGS: PyMPI_OWNED = 1<<1 #------------------------------------------------------------------------------ # Status cdef extern from * nogil: int PyMPI_Status_get_source(MPI_Status*, int*) int PyMPI_Status_set_source(MPI_Status*, int) int PyMPI_Status_get_tag(MPI_Status*, int*) int PyMPI_Status_set_tag(MPI_Status*, int) int PyMPI_Status_get_error(MPI_Status*, int*) int PyMPI_Status_set_error(MPI_Status*, int) cdef inline MPI_Status *arg_Status(object status): if status is None: return MPI_STATUS_IGNORE return &((status).ob_mpi) #------------------------------------------------------------------------------ # Datatype cdef inline int builtin_Datatype(MPI_Datatype ob): cdef int ni = 0, na = 0, nt = 0, combiner = MPI_UNDEFINED if ob == MPI_DATATYPE_NULL: return 1 cdef int ierr = MPI_Type_get_envelope(ob, &ni, &na, &nt, &combiner) if ierr != MPI_SUCCESS: return 0 # XXX return (combiner == MPI_COMBINER_NAMED or combiner == MPI_COMBINER_F90_INTEGER or combiner == MPI_COMBINER_F90_REAL or combiner == MPI_COMBINER_F90_COMPLEX) cdef inline Datatype new_Datatype(MPI_Datatype ob): cdef Datatype datatype = Datatype.__new__(Datatype) datatype.ob_mpi = ob return datatype cdef inline Datatype ref_Datatype(MPI_Datatype ob): cdef Datatype datatype = Datatype.__new__(Datatype) datatype.ob_mpi = ob return datatype cdef inline int del_Datatype(MPI_Datatype* ob): if ob == NULL : return 0 if ob[0] == MPI_DATATYPE_NULL : return 0 if ob[0] == MPI_UB : return 0 if ob[0] == MPI_LB : return 0 if ob[0] == MPI_PACKED : return 0 if ob[0] == MPI_BYTE : return 0 if ob[0] == MPI_AINT : return 0 if ob[0] == MPI_OFFSET : return 0 if ob[0] == MPI_COUNT : return 0 if ob[0] == MPI_CHAR : return 0 if ob[0] == MPI_WCHAR : return 0 if ob[0] == MPI_SIGNED_CHAR : return 0 if ob[0] == MPI_SHORT : return 0 if ob[0] == MPI_INT : return 0 if ob[0] == MPI_LONG : return 0 if ob[0] == MPI_LONG_LONG : return 0 if ob[0] == MPI_UNSIGNED_CHAR : return 0 if ob[0] == MPI_UNSIGNED_SHORT : return 0 if ob[0] == MPI_UNSIGNED : return 0 if ob[0] == MPI_UNSIGNED_LONG : return 0 if ob[0] == MPI_UNSIGNED_LONG_LONG : return 0 if ob[0] == MPI_FLOAT : return 0 if ob[0] == MPI_DOUBLE : return 0 if ob[0] == MPI_LONG_DOUBLE : return 0 if ob[0] == MPI_C_BOOL : return 0 if ob[0] == MPI_INT8_T : return 0 if ob[0] == MPI_INT16_T : return 0 if ob[0] == MPI_INT32_T : return 0 if ob[0] == MPI_INT64_T : return 0 if ob[0] == MPI_UINT8_T : return 0 if ob[0] == MPI_UINT16_T : return 0 if ob[0] == MPI_UINT32_T : return 0 if ob[0] == MPI_UINT64_T : return 0 if ob[0] == MPI_C_COMPLEX : return 0 if ob[0] == MPI_C_FLOAT_COMPLEX : return 0 if ob[0] == MPI_C_DOUBLE_COMPLEX : return 0 if ob[0] == MPI_C_LONG_DOUBLE_COMPLEX : return 0 if ob[0] == MPI_CXX_BOOL : return 0 if ob[0] == MPI_CXX_FLOAT_COMPLEX : return 0 if ob[0] == MPI_CXX_DOUBLE_COMPLEX : return 0 if ob[0] == MPI_CXX_LONG_DOUBLE_COMPLEX : return 0 if ob[0] == MPI_SHORT_INT : return 0 if ob[0] == MPI_2INT : return 0 if ob[0] == MPI_LONG_INT : return 0 if ob[0] == MPI_FLOAT_INT : return 0 if ob[0] == MPI_DOUBLE_INT : return 0 if ob[0] == MPI_LONG_DOUBLE_INT : return 0 if ob[0] == MPI_CHARACTER : return 0 if ob[0] == MPI_LOGICAL : return 0 if ob[0] == MPI_INTEGER : return 0 if ob[0] == MPI_REAL : return 0 if ob[0] == MPI_DOUBLE_PRECISION : return 0 if ob[0] == MPI_COMPLEX : return 0 if ob[0] == MPI_DOUBLE_COMPLEX : return 0 if ob[0] == MPI_LOGICAL1 : return 0 if ob[0] == MPI_LOGICAL2 : return 0 if ob[0] == MPI_LOGICAL4 : return 0 if ob[0] == MPI_LOGICAL8 : return 0 if ob[0] == MPI_INTEGER1 : return 0 if ob[0] == MPI_INTEGER2 : return 0 if ob[0] == MPI_INTEGER4 : return 0 if ob[0] == MPI_INTEGER8 : return 0 if ob[0] == MPI_INTEGER16 : return 0 if ob[0] == MPI_REAL2 : return 0 if ob[0] == MPI_REAL4 : return 0 if ob[0] == MPI_REAL8 : return 0 if ob[0] == MPI_REAL16 : return 0 if ob[0] == MPI_COMPLEX4 : return 0 if ob[0] == MPI_COMPLEX8 : return 0 if ob[0] == MPI_COMPLEX16 : return 0 if ob[0] == MPI_COMPLEX32 : return 0 # if not mpi_active(): return 0 if builtin_Datatype(ob[0]): return 0 # return MPI_Type_free(ob) #------------------------------------------------------------------------------ # Request include "reqimpl.pxi" cdef inline Request new_Request(MPI_Request ob): cdef Request request = Request.__new__(Request) request.ob_mpi = ob return request cdef inline int del_Request(MPI_Request* ob): if ob == NULL : return 0 if ob[0] == MPI_REQUEST_NULL : return 0 # if not mpi_active(): return 0 return MPI_Request_free(ob) #------------------------------------------------------------------------------ # Message cdef inline Message new_Message(MPI_Message ob): cdef Message message = Message.__new__(Message) message.ob_mpi = ob return message cdef inline int del_Message(MPI_Message* ob): if ob == NULL : return 0 if ob[0] == MPI_MESSAGE_NULL : return 0 if ob[0] == MPI_MESSAGE_NO_PROC : return 0 # if not mpi_active(): return 0 # ob[0] = MPI_MESSAGE_NULL return 0 #------------------------------------------------------------------------------ # Op include "opimpl.pxi" cdef inline Op new_Op(MPI_Op ob): cdef Op op = Op.__new__(Op) op.ob_mpi = ob if ob == MPI_OP_NULL : op.ob_func = NULL elif ob == MPI_MAX : op.ob_func = _op_MAX elif ob == MPI_MIN : op.ob_func = _op_MIN elif ob == MPI_SUM : op.ob_func = _op_SUM elif ob == MPI_PROD : op.ob_func = _op_PROD elif ob == MPI_LAND : op.ob_func = _op_LAND elif ob == MPI_BAND : op.ob_func = _op_BAND elif ob == MPI_LOR : op.ob_func = _op_LOR elif ob == MPI_BOR : op.ob_func = _op_BOR elif ob == MPI_LXOR : op.ob_func = _op_LXOR elif ob == MPI_BXOR : op.ob_func = _op_BXOR elif ob == MPI_MAXLOC : op.ob_func = _op_MAXLOC elif ob == MPI_MINLOC : op.ob_func = _op_MINLOC elif ob == MPI_REPLACE : op.ob_func = _op_REPLACE elif ob == MPI_NO_OP : op.ob_func = _op_NO_OP return op cdef inline int del_Op(MPI_Op* ob): if ob == NULL : return 0 if ob[0] == MPI_OP_NULL : return 0 if ob[0] == MPI_MAX : return 0 if ob[0] == MPI_MIN : return 0 if ob[0] == MPI_SUM : return 0 if ob[0] == MPI_PROD : return 0 if ob[0] == MPI_LAND : return 0 if ob[0] == MPI_BAND : return 0 if ob[0] == MPI_LOR : return 0 if ob[0] == MPI_BOR : return 0 if ob[0] == MPI_LXOR : return 0 if ob[0] == MPI_BXOR : return 0 if ob[0] == MPI_MAXLOC : return 0 if ob[0] == MPI_MINLOC : return 0 if ob[0] == MPI_REPLACE : return 0 if ob[0] == MPI_NO_OP : return 0 # if not mpi_active(): return 0 return MPI_Op_free(ob) #------------------------------------------------------------------------------ # Info cdef inline Info new_Info(MPI_Info ob): cdef Info info = Info.__new__(Info) info.ob_mpi = ob return info cdef inline int del_Info(MPI_Info* ob): if ob == NULL : return 0 if ob[0] == MPI_INFO_NULL : return 0 if ob[0] == MPI_INFO_ENV : return 0 # if not mpi_active(): return 0 return MPI_Info_free(ob) cdef inline MPI_Info arg_Info(object info): if info is None: return MPI_INFO_NULL return (info).ob_mpi #------------------------------------------------------------------------------ # Group cdef inline Group new_Group(MPI_Group ob): cdef Group group = Group.__new__(Group) group.ob_mpi = ob return group cdef inline int del_Group(MPI_Group* ob): if ob == NULL : return 0 if ob[0] == MPI_GROUP_NULL : return 0 if ob[0] == MPI_GROUP_EMPTY : return 0 # if not mpi_active(): return 0 return MPI_Group_free(ob) #------------------------------------------------------------------------------ # Comm include "commimpl.pxi" cdef inline Comm new_Comm(MPI_Comm ob): cdef Comm comm = Comm.__new__(Comm) comm.ob_mpi = ob return comm cdef inline Intracomm new_Intracomm(MPI_Comm ob): cdef Intracomm comm = Intracomm.__new__(Intracomm) comm.ob_mpi = ob return comm cdef inline Intercomm new_Intercomm(MPI_Comm ob): cdef Intercomm comm = Intercomm.__new__(Intercomm) comm.ob_mpi = ob return comm cdef inline int del_Comm(MPI_Comm* ob): if ob == NULL : return 0 if ob[0] == MPI_COMM_NULL : return 0 if ob[0] == MPI_COMM_SELF : return 0 if ob[0] == MPI_COMM_WORLD : return 0 # if not mpi_active(): return 0 return MPI_Comm_free(ob) #------------------------------------------------------------------------------ # Win include "winimpl.pxi" cdef inline Win new_Win(MPI_Win ob): cdef Win win = Win.__new__(Win) win.ob_mpi = ob return win cdef inline int del_Win(MPI_Win* ob): if ob == NULL : return 0 if ob[0] == MPI_WIN_NULL : return 0 # if not mpi_active(): return 0 return MPI_Win_free(ob) #------------------------------------------------------------------------------ # File include "drepimpl.pxi" cdef inline File new_File(MPI_File ob): cdef File file = File.__new__(File) file.ob_mpi = ob return file cdef inline int del_File(MPI_File* ob): if ob == NULL : return 0 if ob[0] == MPI_FILE_NULL : return 0 # if not mpi_active(): return 0 return MPI_File_close(ob) #------------------------------------------------------------------------------ # Errhandler cdef inline Errhandler new_Errhandler(MPI_Errhandler ob): cdef Errhandler errhandler = Errhandler.__new__(Errhandler) errhandler.ob_mpi = ob return errhandler cdef inline int del_Errhandler(MPI_Errhandler* ob): if ob == NULL : return 0 if ob[0] == MPI_ERRHANDLER_NULL : return 0 if ob[0] == MPI_ERRORS_RETURN : return 0 if ob[0] == MPI_ERRORS_ARE_FATAL : return 0 # if not mpi_active(): return 0 return MPI_Errhandler_free(ob) #------------------------------------------------------------------------------ mpi4py-3.1.6/src/mpi4py/MPI/mpierrhdl.pxi000066400000000000000000000021421460670727200201230ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef inline int comm_set_eh(MPI_Comm ob) nogil except -1: if ob == MPI_COMM_NULL: return 0 cdef int opt = options.errors if opt == 0: pass elif opt == 1: CHKERR( MPI_Comm_set_errhandler(ob, MPI_ERRORS_RETURN) ) elif opt == 2: CHKERR( MPI_Comm_set_errhandler(ob, MPI_ERRORS_ARE_FATAL) ) return 0 cdef inline int win_set_eh(MPI_Win ob) nogil except -1: if ob == MPI_WIN_NULL: return 0 cdef int opt = options.errors if opt == 0: pass elif opt == 1: CHKERR( MPI_Win_set_errhandler(ob, MPI_ERRORS_RETURN) ) elif opt == 2: CHKERR( MPI_Win_set_errhandler(ob, MPI_ERRORS_ARE_FATAL) ) return 0 cdef inline int file_set_eh(MPI_File ob) nogil except -1: if ob == MPI_FILE_NULL: return 0 cdef int opt = options.errors if opt == 0: pass elif opt == 1: CHKERR( MPI_File_set_errhandler(ob, MPI_ERRORS_RETURN) ) elif opt == 2: CHKERR( MPI_File_set_errhandler(ob, MPI_ERRORS_ARE_FATAL) ) return 0 # ----------------------------------------------------------------------------- mpi4py-3.1.6/src/mpi4py/MPI/msgbuffer.pxi000066400000000000000000001211431460670727200201200ustar00rootroot00000000000000#------------------------------------------------------------------------------ cdef extern from "Python.h": int is_list "PyList_Check" (object) int is_tuple "PyTuple_Check" (object) cdef extern from "Python.h": int PyIndex_Check(object) int PySequence_Check(object) object PyNumber_Index(object) Py_ssize_t PySequence_Size(object) except -1 cdef inline int is_integral(object ob): if not PyIndex_Check(ob): return 0 if not PySequence_Check(ob): return 1 try: PySequence_Size(ob) except: pass else: return 0 try: PyNumber_Index(ob) except: return 0 else: return 1 cdef inline int is_buffer(object ob): if PY3: return PyObject_CheckBuffer(ob) else: return PyObject_CheckBuffer(ob) or _Py2_IsBuffer(ob) cdef inline int is_dlpack_buffer(object ob): return Py_CheckDLPackBuffer(ob) cdef inline int is_cai_buffer(object ob): return Py_CheckCAIBuffer(ob) cdef inline int is_datatype(object ob): if isinstance(ob, Datatype): return 1 if PY3: if isinstance(ob, unicode): return 1 else: if isinstance(ob, bytes): return 1 return 0 #------------------------------------------------------------------------------ cdef extern from *: int INT_MAX cdef inline int downcast(MPI_Aint value) except? -1: if value > INT_MAX: raise OverflowError("integer %d does not fit in 'int'" % value) else: return value cdef inline int clipcount(MPI_Aint value): if value > INT_MAX: return INT_MAX else: return value #------------------------------------------------------------------------------ @cython.final @cython.internal cdef class Bottom(int): """ Type of `BOTTOM` """ def __cinit__(self): cdef MPI_Aint a = self, b = MPI_BOTTOM if a != b : raise ValueError("cannot create instance") def __repr__(self): return 'BOTTOM' @cython.final @cython.internal cdef class InPlace(int): """ Type of `IN_PLACE` """ def __cinit__(self): cdef MPI_Aint a = self, b = MPI_IN_PLACE if a != b : raise ValueError("cannot create instance") def __repr__(self): return 'IN_PLACE' cdef object __BOTTOM__ = Bottom(MPI_BOTTOM) cdef object __IN_PLACE__ = InPlace(MPI_IN_PLACE) cdef inline bint is_BOTTOM(object obj): if PYPY: return type(obj) is type(__BOTTOM__) and obj == __BOTTOM__ else: return obj is __BOTTOM__ cdef inline bint is_IN_PLACE(object obj): if PYPY: return type(obj) is type(__IN_PLACE__) and obj == __IN_PLACE__ else: return obj is __IN_PLACE__ #------------------------------------------------------------------------------ @cython.final @cython.internal cdef class _p_message: cdef memory buf cdef object count cdef object displ cdef Datatype type cdef _p_message message_basic(object o_buf, object o_type, bint readonly, # void **baddr, MPI_Aint *bsize, MPI_Datatype *btype, ): cdef _p_message m = _p_message.__new__(_p_message) # special-case for BOTTOM or None, # an explicit MPI datatype is required if is_BOTTOM(o_buf) or o_buf is None: if isinstance(o_type, Datatype): m.type = o_type else: m.type = TypeDict[o_type] m.buf = newbuffer() baddr[0] = MPI_BOTTOM bsize[0] = 0 btype[0] = m.type.ob_mpi return m # get buffer base address and length cdef bint fmt = (o_type is None) m.buf = getbuffer(o_buf, readonly, fmt) baddr[0] = m.buf.view.buf bsize[0] = m.buf.view.len # lookup datatype if not provided or not a Datatype if isinstance(o_type, Datatype): m.type = o_type elif o_type is None: m.type = TypeDict[getformat(m.buf)] else: m.type = TypeDict[o_type] btype[0] = m.type.ob_mpi # and we are done ... return m cdef _p_message message_simple(object msg, int readonly, int rank, int blocks, # void **_addr, int *_count, MPI_Datatype *_type, ): # special-case PROC_NULL target rank if rank == MPI_PROC_NULL: _addr[0] = NULL _count[0] = 0 _type[0] = MPI_BYTE return None # unpack message list/tuple cdef Py_ssize_t nargs = 0 cdef object o_buf = None cdef object o_count = None cdef object o_displ = None cdef object o_type = None if is_buffer(msg): o_buf = msg elif is_list(msg) or is_tuple(msg): nargs = len(msg) if nargs == 2: (o_buf, o_count) = msg if is_datatype(o_count): (o_count, o_type) = None, o_count elif is_tuple(o_count) or is_list(o_count): (o_count, o_displ) = o_count elif nargs == 3: (o_buf, o_count, o_type) = msg if is_tuple(o_count) or is_list(o_count): (o_count, o_displ) = o_count elif nargs == 4: (o_buf, o_count, o_displ, o_type) = msg else: raise ValueError("message: expecting 2 to 4 items") elif is_dlpack_buffer(msg): o_buf = msg elif is_cai_buffer(msg): o_buf = msg elif PYPY: o_buf = msg else: raise TypeError("message: expecting buffer or list/tuple") # buffer: address, length, and datatype cdef void *baddr = NULL cdef MPI_Aint bsize = 0 cdef MPI_Datatype btype = MPI_DATATYPE_NULL cdef _p_message m = message_basic(o_buf, o_type, readonly, &baddr, &bsize, &btype) # buffer: count and displacement cdef int count = 0 # number of datatype entries cdef int displ = 0 # from base buffer, in datatype entries cdef MPI_Aint extent = 0, lb = 0 # datatype extent cdef MPI_Aint length = bsize # in bytes cdef MPI_Aint offset = 0 # from base buffer, in bytes if o_displ is not None: displ = o_displ if displ < 0: raise ValueError( "message: negative diplacement %d" % displ) if displ > 0: if btype == MPI_DATATYPE_NULL: raise ValueError( "message: cannot handle diplacement, datatype is null") CHKERR( MPI_Type_get_extent(btype, &lb, &extent) ) if extent <= 0: raise ValueError( ("message: cannot handle diplacement, " "datatype extent %d (lb:%d, ub:%d)" ) % (extent, lb, lb+extent)) if displ*extent > length: raise ValueError( ("message: displacement %d out of bounds, " "number of datatype entries %d" ) % (displ, length//extent)) offset = displ*extent length -= offset if o_count is not None: count = o_count if count < 0: raise ValueError( "message: negative count %d" % count) if count > 0 and o_buf is None: raise ValueError( "message: buffer is None but count is %d" % count) elif length > 0: if extent == 0: if btype == MPI_DATATYPE_NULL: raise ValueError( "message: cannot infer count, datatype is null") CHKERR( MPI_Type_get_extent(btype, &lb, &extent) ) if extent <= 0: raise ValueError( ("message: cannot infer count, " "datatype extent %d (lb:%d, ub:%d)" ) % (extent, lb, lb+extent)) if (length % extent) != 0: raise ValueError( ("message: cannot infer count, " "buffer length %d is not a multiple of " "datatype extent %d (lb:%d, ub:%d)" ) % (length, extent, lb, lb+extent)) if blocks < 2: count = downcast(length // extent) else: if ((length // extent) % blocks) != 0: raise ValueError( ("message: cannot infer count, " "number of entries %d is not a multiple of " "required number of blocks %d" ) % (length//extent, blocks)) count = downcast((length // extent) // blocks) # return collected message data m.count = o_count if o_count is not None else count m.displ = o_displ if o_displ is not None else displ _addr[0] = (baddr + offset) _count[0] = count _type[0] = btype return m cdef _p_message message_vector(object msg, int readonly, int rank, int blocks, # void **_addr, int **_counts, int **_displs, MPI_Datatype *_type, ): # special-case PROC_NULL target rank if rank == MPI_PROC_NULL: _addr[0] = NULL _counts[0] = NULL _displs[0] = NULL _type[0] = MPI_BYTE return None # unpack message list/tuple cdef Py_ssize_t nargs = 0 cdef object o_buf = None cdef object o_counts = None cdef object o_displs = None cdef object o_type = None if is_buffer(msg): o_buf = msg elif is_list(msg) or is_tuple(msg): nargs = len(msg) if nargs == 2: (o_buf, o_counts) = msg if is_datatype(o_counts): (o_counts, o_type) = None, o_counts elif is_tuple(o_counts): (o_counts, o_displs) = o_counts elif nargs == 3: (o_buf, o_counts, o_type) = msg if is_tuple(o_counts): (o_counts, o_displs) = o_counts elif nargs == 4: (o_buf, o_counts, o_displs, o_type) = msg else: raise ValueError("message: expecting 2 to 4 items") elif is_dlpack_buffer(msg): o_buf = msg elif is_cai_buffer(msg): o_buf = msg elif PYPY: o_buf = msg else: raise TypeError("message: expecting buffer or list/tuple") # buffer: address, length, and datatype cdef void *baddr = NULL cdef MPI_Aint bsize = 0 cdef MPI_Datatype btype = MPI_DATATYPE_NULL cdef _p_message m = message_basic(o_buf, o_type, readonly, &baddr, &bsize, &btype) # counts and displacements cdef int *counts = NULL cdef int *displs = NULL cdef int i=0, val=0 cdef MPI_Aint extent=0, lb=0 cdef MPI_Aint asize=0, aval=0 if o_counts is None: if bsize > 0: if btype == MPI_DATATYPE_NULL: raise ValueError( "message: cannot infer count, " "datatype is null") CHKERR( MPI_Type_get_extent(btype, &lb, &extent) ) if extent <= 0: raise ValueError( ("message: cannot infer count, " "datatype extent %d (lb:%d, ub:%d)" ) % (extent, lb, lb+extent)) if (bsize % extent) != 0: raise ValueError( ("message: cannot infer count, " "buffer length %d is not a multiple of " "datatype extent %d (lb:%d, ub:%d)" ) % (bsize, extent, lb, lb+extent)) asize = bsize // extent o_counts = newarray(blocks, &counts) for i from 0 <= i < blocks: aval = (asize // blocks) + (asize % blocks > i) counts[i] = downcast(aval) elif is_integral(o_counts): val = o_counts o_counts = newarray(blocks, &counts) for i from 0 <= i < blocks: counts[i] = val else: o_counts = chkarray(o_counts, blocks, &counts) if o_displs is None: # contiguous val = 0 o_displs = newarray(blocks, &displs) for i from 0 <= i < blocks: displs[i] = val val += counts[i] elif is_integral(o_displs): # strided val = o_displs o_displs = newarray(blocks, &displs) for i from 0 <= i < blocks: displs[i] = val * i else: # general o_displs = chkarray(o_displs, blocks, &displs) # return collected message data m.count = o_counts m.displ = o_displs _addr[0] = baddr _counts[0] = counts _displs[0] = displs _type[0] = btype return m cdef tuple message_vector_w(object msg, int readonly, int blocks, # void **_addr, int **_counts, integral_t **_displs, MPI_Datatype **_types, ): cdef int i = 0 cdef Py_ssize_t nargs = len(msg) cdef object o_buffer, o_counts, o_displs, o_types if nargs == 2: o_buffer, o_types = msg o_counts = o_displs = None elif nargs == 3: o_buffer, (o_counts, o_displs), o_types = msg elif nargs == 4: o_buffer, o_counts, o_displs, o_types = msg else: raise ValueError("message: expecting 2 to 4 items") if is_BOTTOM(o_buffer): if o_counts is None: raise ValueError("message: BOTTOM requires counts") if o_displs is None: raise ValueError("message: BOTTOM requires displs") _addr[0] = MPI_BOTTOM elif readonly: o_buffer = getbuffer_r(o_buffer, _addr, NULL) else: o_buffer = getbuffer_w(o_buffer, _addr, NULL) if o_counts is None and o_displs is None: o_counts = newarray(blocks, _counts) o_displs = newarray(blocks, _displs) for i from 0 <= i < blocks: _counts[0][i] = 1 _displs[0][i] = 0 else: o_counts = chkarray(o_counts, blocks, _counts) o_displs = chkarray(o_displs, blocks, _displs) o_types = asarray_Datatype(o_types, blocks, _types) return (o_buffer, o_counts, o_displs, o_types) #------------------------------------------------------------------------------ @cython.final @cython.internal cdef class _p_msg_p2p: # raw C-side arguments cdef void *buf cdef int count cdef MPI_Datatype dtype # python-side argument cdef object _msg def __cinit__(self): self.buf = NULL self.count = 0 self.dtype = MPI_DATATYPE_NULL cdef int for_send(self, object msg, int rank) except -1: self._msg = message_simple(msg, 1, # readonly rank, 0, &self.buf, &self.count, &self.dtype) return 0 cdef int for_recv(self, object msg, int rank) except -1: self._msg = message_simple(msg, 0, # writable rank, 0, &self.buf, &self.count, &self.dtype) return 0 cdef inline _p_msg_p2p message_p2p_send(object sendbuf, int dest): cdef _p_msg_p2p msg = _p_msg_p2p.__new__(_p_msg_p2p) msg.for_send(sendbuf, dest) return msg cdef inline _p_msg_p2p message_p2p_recv(object recvbuf, int source): cdef _p_msg_p2p msg = _p_msg_p2p.__new__(_p_msg_p2p) msg.for_recv(recvbuf, source) return msg #------------------------------------------------------------------------------ @cython.final @cython.internal cdef class _p_msg_cco: # raw C-side arguments cdef void *sbuf, *rbuf cdef int scount, rcount cdef int *scounts, *rcounts cdef int *sdispls, *rdispls cdef MPI_Datatype stype, rtype # python-side arguments cdef object _smsg, _rmsg cdef object _rcnt def __cinit__(self): self.sbuf = self.rbuf = NULL self.scount = self.rcount = 0 self.scounts = self.rcounts = NULL self.sdispls = self.rdispls = NULL self.stype = self.rtype = MPI_DATATYPE_NULL # Collective Communication Operations # ----------------------------------- # sendbuf arguments cdef int for_cco_send(self, bint VECTOR, object amsg, int rank, int blocks) except -1: cdef bint readonly = 1 if not VECTOR: # block variant self._smsg = message_simple( amsg, readonly, rank, blocks, &self.sbuf, &self.scount, &self.stype) else: # vector variant self._smsg = message_vector( amsg, readonly, rank, blocks, &self.sbuf, &self.scounts, &self.sdispls, &self.stype) return 0 # recvbuf arguments cdef int for_cco_recv(self, bint VECTOR, object amsg, int rank, int blocks) except -1: cdef bint readonly = 0 if not VECTOR: # block variant self._rmsg = message_simple( amsg, readonly, rank, blocks, &self.rbuf, &self.rcount, &self.rtype) else: # vector variant self._rmsg = message_vector( amsg, readonly, rank, blocks, &self.rbuf, &self.rcounts, &self.rdispls, &self.rtype) return 0 # bcast cdef int for_bcast(self, object msg, int root, MPI_Comm comm) except -1: if comm == MPI_COMM_NULL: return 0 cdef int inter=0, rank=0, sending=0 CHKERR( MPI_Comm_test_inter(comm, &inter) ) if not inter: # intra-communication CHKERR( MPI_Comm_rank(comm, &rank) ) if root == rank: self.for_cco_send(0, msg, root, 0) sending = 1 else: self.for_cco_recv(0, msg, root, 0) sending = 0 else: # inter-communication if (root == MPI_ROOT or root == MPI_PROC_NULL): self.for_cco_send(0, msg, root, 0) sending = 1 else: self.for_cco_recv(0, msg, root, 0) sending = 0 if sending: self.rbuf = self.sbuf self.rcount = self.scount self.rtype = self.stype else: self.sbuf = self.rbuf self.scount = self.rcount self.stype = self.rtype return 0 # gather/gatherv cdef int for_gather(self, int v, object smsg, object rmsg, int root, MPI_Comm comm) except -1: if comm == MPI_COMM_NULL: return 0 cdef int inter=0, size=0, rank=0, null=MPI_PROC_NULL CHKERR( MPI_Comm_test_inter(comm, &inter) ) if not inter: # intra-communication CHKERR( MPI_Comm_size(comm, &size) ) CHKERR( MPI_Comm_rank(comm, &rank) ) if root == rank: self.for_cco_recv(v, rmsg, root, size) if is_IN_PLACE(smsg): self.sbuf = MPI_IN_PLACE self.scount = self.rcount self.stype = self.rtype else: self.for_cco_send(0, smsg, 0, 0) else: self.for_cco_recv(v, rmsg, null, size) self.for_cco_send(0, smsg, root, 0) else: # inter-communication CHKERR( MPI_Comm_remote_size(comm, &size) ) if (root == MPI_ROOT or root == MPI_PROC_NULL): self.for_cco_recv(v, rmsg, root, size) self.for_cco_send(0, smsg, null, 0) else: self.for_cco_recv(v, rmsg, null, size) self.for_cco_send(0, smsg, root, 0) return 0 # scatter/scatterv cdef int for_scatter(self, int v, object smsg, object rmsg, int root, MPI_Comm comm) except -1: if comm == MPI_COMM_NULL: return 0 cdef int inter=0, size=0, rank=0, null=MPI_PROC_NULL CHKERR( MPI_Comm_test_inter(comm, &inter) ) if not inter: # intra-communication CHKERR( MPI_Comm_size(comm, &size) ) CHKERR( MPI_Comm_rank(comm, &rank) ) if root == rank: self.for_cco_send(v, smsg, root, size) if is_IN_PLACE(rmsg): self.rbuf = MPI_IN_PLACE self.rcount = self.scount self.rtype = self.stype else: self.for_cco_recv(0, rmsg, root, 0) else: self.for_cco_send(v, smsg, null, size) self.for_cco_recv(0, rmsg, root, 0) else: # inter-communication CHKERR( MPI_Comm_remote_size(comm, &size) ) if (root == MPI_ROOT or root == MPI_PROC_NULL): self.for_cco_send(v, smsg, root, size) self.for_cco_recv(0, rmsg, null, 0) else: self.for_cco_send(v, smsg, null, size) self.for_cco_recv(0, rmsg, root, 0) return 0 # allgather/allgatherv cdef int for_allgather(self, int v, object smsg, object rmsg, MPI_Comm comm) except -1: if comm == MPI_COMM_NULL: return 0 cdef int inter=0, size=0 CHKERR( MPI_Comm_test_inter(comm, &inter) ) if not inter: # intra-communication CHKERR( MPI_Comm_size(comm, &size) ) else: # inter-communication CHKERR( MPI_Comm_remote_size(comm, &size) ) # self.for_cco_recv(v, rmsg, 0, size) if not inter and is_IN_PLACE(smsg): self.sbuf = MPI_IN_PLACE self.scount = self.rcount self.stype = self.rtype else: self.for_cco_send(0, smsg, 0, 0) return 0 # alltoall/alltoallv cdef int for_alltoall(self, int v, object smsg, object rmsg, MPI_Comm comm) except -1: if comm == MPI_COMM_NULL: return 0 cdef int inter=0, size=0 CHKERR( MPI_Comm_test_inter(comm, &inter) ) if not inter: # intra-communication CHKERR( MPI_Comm_size(comm, &size) ) else: # inter-communication CHKERR( MPI_Comm_remote_size(comm, &size) ) # self.for_cco_recv(v, rmsg, 0, size) if not inter and is_IN_PLACE(smsg): self.sbuf = MPI_IN_PLACE self.scount = self.rcount self.scounts = self.rcounts self.sdispls = self.rdispls self.stype = self.rtype else: self.for_cco_send(v, smsg, 0, size) return 0 # Neighbor Collectives # -------------------- # neighbor allgather/allgatherv cdef int for_neighbor_allgather(self, int v, object smsg, object rmsg, MPI_Comm comm) except -1: if comm == MPI_COMM_NULL: return 0 cdef int recvsize=0 comm_neighbors_count(comm, &recvsize, NULL) self.for_cco_send(0, smsg, 0, 0) self.for_cco_recv(v, rmsg, 0, recvsize) return 0 # neighbor alltoall/alltoallv cdef int for_neighbor_alltoall(self, int v, object smsg, object rmsg, MPI_Comm comm) except -1: if comm == MPI_COMM_NULL: return 0 cdef int sendsize=0, recvsize=0 comm_neighbors_count(comm, &recvsize, &sendsize) self.for_cco_send(v, smsg, 0, sendsize) self.for_cco_recv(v, rmsg, 0, recvsize) return 0 # Collective Reductions Operations # -------------------------------- # sendbuf cdef int for_cro_send(self, object amsg, int root) except -1: self._smsg = message_simple(amsg, 1, # readonly root, 0, &self.sbuf, &self.scount, &self.stype) return 0 # recvbuf cdef int for_cro_recv(self, object amsg, int root) except -1: self._rmsg = message_simple(amsg, 0, # writable root, 0, &self.rbuf, &self.rcount, &self.rtype) return 0 cdef int for_reduce(self, object smsg, object rmsg, int root, MPI_Comm comm) except -1: if comm == MPI_COMM_NULL: return 0 cdef int inter=0, rank=0, null=MPI_PROC_NULL CHKERR( MPI_Comm_test_inter(comm, &inter) ) if not inter: # intra-communication CHKERR( MPI_Comm_rank(comm, &rank) ) if root == rank: self.for_cro_recv(rmsg, root) if is_IN_PLACE(smsg): self.sbuf = MPI_IN_PLACE self.scount = self.rcount self.stype = self.rtype else: self.for_cro_send(smsg, root) else: self.for_cro_recv(rmsg, null) self.for_cro_send(smsg, root) self.rcount = self.scount self.rtype = self.stype else: # inter-communication if (root == MPI_ROOT or root == MPI_PROC_NULL): self.for_cro_recv(rmsg, root) self.scount = self.rcount self.stype = self.rtype else: self.for_cro_send(smsg, root) self.rcount = self.scount self.rtype = self.stype return 0 cdef int for_allreduce(self, object smsg, object rmsg, MPI_Comm comm) except -1: if comm == MPI_COMM_NULL: return 0 cdef int inter=0 CHKERR( MPI_Comm_test_inter(comm, &inter) ) # get send and recv buffers self.for_cro_recv(rmsg, 0) if not inter and is_IN_PLACE(smsg): self.sbuf = MPI_IN_PLACE self.scount = self.rcount self.stype = self.rtype else: self.for_cro_send(smsg, 0) # check counts and datatypes if self.sbuf != MPI_IN_PLACE: if self.stype != self.rtype: raise ValueError( "mismatch in send and receive MPI datatypes") if self.scount != self.rcount: raise ValueError( "mismatch in send count %d and receive count %d" % (self.scount, self.rcount)) return 0 cdef int for_reduce_scatter_block(self, object smsg, object rmsg, MPI_Comm comm) except -1: if comm == MPI_COMM_NULL: return 0 cdef int inter=0, size=0 CHKERR( MPI_Comm_test_inter(comm, &inter) ) CHKERR( MPI_Comm_size(comm, &size) ) # get send and recv buffers if not inter and is_IN_PLACE(smsg): self.for_cco_recv(0, rmsg, 0, size) self.sbuf = MPI_IN_PLACE else: self.for_cco_recv(0, rmsg, 0, 0) self.for_cco_send(0, smsg, 0, size) # check counts and datatypes if self.sbuf != MPI_IN_PLACE: if self.stype != self.rtype: raise ValueError( "mismatch in send and receive MPI datatypes") if self.scount != self.rcount: raise ValueError( "mismatch in send count %d receive count %d" % (self.scount, self.rcount*size)) return 0 cdef int for_reduce_scatter(self, object smsg, object rmsg, object rcnt, MPI_Comm comm) except -1: if comm == MPI_COMM_NULL: return 0 cdef int inter=0, size=0, rank=MPI_PROC_NULL CHKERR( MPI_Comm_test_inter(comm, &inter) ) CHKERR( MPI_Comm_size(comm, &size) ) CHKERR( MPI_Comm_rank(comm, &rank) ) # get send and recv buffers self.for_cro_recv(rmsg, 0) if not inter and is_IN_PLACE(smsg): self.sbuf = MPI_IN_PLACE else: self.for_cro_send(smsg, 0) # get receive counts if rcnt is None and not inter and self.sbuf != MPI_IN_PLACE: self._rcnt = newarray(size, &self.rcounts) CHKERR( MPI_Allgather(&self.rcount, 1, MPI_INT, self.rcounts, 1, MPI_INT, comm) ) else: self._rcnt = chkarray(rcnt, size, &self.rcounts) # total sum or receive counts cdef int i=0, sumrcounts=0 for i from 0 <= i < size: sumrcounts += self.rcounts[i] # check counts and datatypes if self.sbuf != MPI_IN_PLACE: if self.stype != self.rtype: raise ValueError( "mismatch in send and receive MPI datatypes") if self.scount != sumrcounts: raise ValueError( "mismatch in send count %d and sum(counts) %d" % (self.scount, sumrcounts)) if self.rcount != self.rcounts[rank]: raise ValueError( "mismatch in receive count %d and counts[%d] %d" % (self.rcount, rank, self.rcounts[rank])) else: if self.rcount != sumrcounts: raise ValueError( "mismatch in receive count %d and sum(counts) %d" % (self.rcount, sumrcounts)) return 0 cdef int for_scan(self, object smsg, object rmsg, MPI_Comm comm) except -1: if comm == MPI_COMM_NULL: return 0 # get send and recv buffers self.for_cro_recv(rmsg, 0) if is_IN_PLACE(smsg): self.sbuf = MPI_IN_PLACE self.scount = self.rcount self.stype = self.rtype else: self.for_cro_send(smsg, 0) # check counts and datatypes if self.sbuf != MPI_IN_PLACE: if self.stype != self.rtype: raise ValueError( "mismatch in send and receive MPI datatypes") if self.scount != self.rcount: raise ValueError( "mismatch in send count %d and receive count %d" % (self.scount, self.rcount)) return 0 cdef int for_exscan(self, object smsg, object rmsg, MPI_Comm comm) except -1: if comm == MPI_COMM_NULL: return 0 # get send and recv buffers self.for_cro_recv(rmsg, 0) if is_IN_PLACE(smsg): self.sbuf = MPI_IN_PLACE self.scount = self.rcount self.stype = self.rtype else: self.for_cro_send(smsg, 0) # check counts and datatypes if self.sbuf != MPI_IN_PLACE: if self.stype != self.rtype: raise ValueError( "mismatch in send and receive MPI datatypes") if self.scount != self.rcount: raise ValueError( "mismatch in send count %d and receive count %d" % (self.scount, self.rcount)) return 0 cdef inline _p_msg_cco message_cco(): cdef _p_msg_cco msg = _p_msg_cco.__new__(_p_msg_cco) return msg #------------------------------------------------------------------------------ @cython.final @cython.internal cdef class _p_msg_ccow: # raw C-side arguments cdef void *sbuf, *rbuf cdef int *scounts, *rcounts cdef int *sdispls, *rdispls cdef MPI_Aint *sdisplsA, *rdisplsA cdef MPI_Datatype *stypes, *rtypes # python-side arguments cdef object _smsg, _rmsg def __cinit__(self): self.sbuf = self.rbuf = NULL self.scounts = self.rcounts = NULL self.sdispls = self.rdispls = NULL self.sdisplsA = self.rdisplsA = NULL self.stypes = self.rtypes = NULL # alltoallw cdef int for_alltoallw(self, object smsg, object rmsg, MPI_Comm comm) except -1: if comm == MPI_COMM_NULL: return 0 cdef int inter=0, size=0 CHKERR( MPI_Comm_test_inter(comm, &inter) ) if not inter: # intra-communication CHKERR( MPI_Comm_size(comm, &size) ) else: # inter-communication CHKERR( MPI_Comm_remote_size(comm, &size) ) # self._rmsg = message_vector_w( rmsg, 0, size, &self.rbuf, &self.rcounts, &self.rdispls, &self.rtypes) if not inter and is_IN_PLACE(smsg): self.sbuf = MPI_IN_PLACE self.scounts = self.rcounts self.sdispls = self.rdispls self.stypes = self.rtypes return 0 self._smsg = message_vector_w( smsg, 1, size, &self.sbuf, &self.scounts, &self.sdispls, &self.stypes) return 0 # neighbor alltoallw cdef int for_neighbor_alltoallw(self, object smsg, object rmsg, MPI_Comm comm) except -1: if comm == MPI_COMM_NULL: return 0 cdef int sendsize=0, recvsize=0 comm_neighbors_count(comm, &recvsize, &sendsize) self._rmsg = message_vector_w( rmsg, 0, recvsize, &self.rbuf, &self.rcounts, &self.rdisplsA, &self.rtypes) self._smsg = message_vector_w( smsg, 1, sendsize, &self.sbuf, &self.scounts, &self.sdisplsA, &self.stypes) return 0 cdef inline _p_msg_ccow message_ccow(): cdef _p_msg_ccow msg = _p_msg_ccow.__new__(_p_msg_ccow) return msg #------------------------------------------------------------------------------ @cython.final @cython.internal cdef class _p_msg_rma: # raw origin arguments cdef void* oaddr cdef int ocount cdef MPI_Datatype otype # raw compare arguments cdef void* caddr cdef int ccount cdef MPI_Datatype ctype # raw result arguments cdef void* raddr cdef int rcount cdef MPI_Datatype rtype # raw target arguments cdef MPI_Aint tdisp cdef int tcount cdef MPI_Datatype ttype # python-side arguments cdef object _origin cdef object _compare cdef object _result cdef object _target def __cinit__(self): self.oaddr = NULL self.ocount = 0 self.otype = MPI_DATATYPE_NULL self.raddr = NULL self.rcount = 0 self.rtype = MPI_DATATYPE_NULL self.tdisp = 0 self.tcount = 0 self.ttype = MPI_DATATYPE_NULL cdef int for_rma(self, int readonly, object origin, int rank, object target) except -1: # ORIGIN self._origin = message_simple( origin, readonly, rank, 0, &self.oaddr, &self.ocount, &self.otype) if ((rank == MPI_PROC_NULL) and (origin is not None) and (is_list(origin) or is_tuple(origin)) and (len(origin) > 0 and isinstance(origin[-1], Datatype))): self.otype = (origin[-1]).ob_mpi self._origin = origin # TARGET cdef Py_ssize_t nargs = 0 if target is None: self.tdisp = 0 self.tcount = self.ocount self.ttype = self.otype elif is_integral(target): self.tdisp = target self.tcount = self.ocount self.ttype = self.otype elif is_list(target) or is_tuple(target): self.tdisp = 0 self.tcount = self.ocount self.ttype = self.otype nargs = len(target) if nargs >= 1: self.tdisp = target[0] if nargs >= 2: self.tcount = target[1] if nargs >= 3: self.ttype = (target[2]).ob_mpi if nargs >= 4: raise ValueError("target: expecting 3 items at most") else: raise ValueError("target: expecting integral or list/tuple") self._target = target return 0 cdef int for_put(self, object origin, int rank, object target) except -1: self.for_rma(1, origin, rank, target) return 0 cdef int for_get(self, object origin, int rank, object target) except -1: self.for_rma(0, origin, rank, target) return 0 cdef int for_acc(self, object origin, int rank, object target) except -1: self.for_rma(1, origin, rank, target) return 0 cdef int set_origin(self, object origin, int rank) except -1: self._origin = message_simple( origin, 1, rank, 0, &self.oaddr, &self.ocount, &self.otype) self.tdisp = 0 self.tcount = self.ocount self.ttype = self.otype cdef int set_compare(self, object compare, int rank) except -1: self._compare = message_simple( compare, 1, rank, 0, &self.caddr, &self.ccount, &self.ctype) cdef int set_result(self, object result, int rank) except -1: self._result = message_simple( result, 0, rank, 0, &self.raddr, &self.rcount, &self.rtype) cdef int for_get_acc(self, object origin, object result, int rank, object target) except -1: self.for_rma(0, origin, rank, target) self.set_result(result, rank) return 0 cdef int for_fetch_op(self, object origin, object result, int rank, MPI_Aint disp) except -1: self.set_origin(origin, rank) self.set_result(result, rank) self.tdisp = disp if rank == MPI_PROC_NULL: return 0 # Check if self.ocount != 1: raise ValueError( "origin: expecting a single element, got %d" % self.ocount) if self.rcount != 1: raise ValueError( "result: expecting a single element, got %d" % self.rcount) if self.otype != self.rtype: raise ValueError( "mismatch in origin and result MPI datatypes") return 0 cdef int for_cmp_swap(self, object origin, object compare, object result, int rank, MPI_Aint disp) except -1: self.set_origin(origin, rank) self.set_compare(compare, rank) self.set_result(result, rank) self.tdisp = disp if rank == MPI_PROC_NULL: return 0 # Check if self.ocount != 1: raise ValueError( "origin: expecting a single element, got %d" % self.ocount) if self.ccount != 1: raise ValueError( "compare: expecting a single element, got %d" % self.ccount) if self.rcount != 1: raise ValueError( "result: expecting a single element, got %d" % self.rcount) if self.otype != self.ctype: raise ValueError( "mismatch in origin and compare MPI datatypes") if self.otype != self.rtype: raise ValueError( "mismatch in origin and result MPI datatypes") return 0 cdef inline _p_msg_rma message_rma(): cdef _p_msg_rma msg = _p_msg_rma.__new__(_p_msg_rma) return msg #------------------------------------------------------------------------------ @cython.final @cython.internal cdef class _p_msg_io: # raw C-side data cdef void *buf cdef int count cdef MPI_Datatype dtype # python-side data cdef object _msg def __cinit__(self): self.buf = NULL self.count = 0 self.dtype = MPI_DATATYPE_NULL cdef int for_read(self, object msg) except -1: self._msg = message_simple(msg, 0, # writable 0, 0, &self.buf, &self.count, &self.dtype) return 0 cdef int for_write(self, object msg) except -1: self._msg = message_simple(msg, 1, # readonly 0, 0, &self.buf, &self.count, &self.dtype) return 0 cdef inline _p_msg_io message_io_read(object buf): cdef _p_msg_io msg = _p_msg_io.__new__(_p_msg_io) msg.for_read(buf) return msg cdef inline _p_msg_io message_io_write(object buf): cdef _p_msg_io msg = _p_msg_io.__new__(_p_msg_io) msg.for_write(buf) return msg #------------------------------------------------------------------------------ mpi4py-3.1.6/src/mpi4py/MPI/msgpickle.pxi000066400000000000000000001306121460670727200201170ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef extern from "Python.h": bint PyBytes_CheckExact(object) char* PyBytes_AsString(object) except NULL Py_ssize_t PyBytes_Size(object) except -1 object PyBytes_FromStringAndSize(char*,Py_ssize_t) # ----------------------------------------------------------------------------- cdef object PyPickle_dumps = None cdef object PyPickle_loads = None cdef object PyPickle_PROTOCOL = None if PY3: from pickle import dumps as PyPickle_dumps from pickle import loads as PyPickle_loads from pickle import HIGHEST_PROTOCOL as PyPickle_PROTOCOL else: try: from cPickle import dumps as PyPickle_dumps from cPickle import loads as PyPickle_loads from cPickle import HIGHEST_PROTOCOL as PyPickle_PROTOCOL except ImportError: from pickle import dumps as PyPickle_dumps from pickle import loads as PyPickle_loads from pickle import HIGHEST_PROTOCOL as PyPickle_PROTOCOL if Py_GETENV(b"MPI4PY_PICKLE_PROTOCOL") != NULL: PyPickle_PROTOCOL = int(Py_GETENV(b"MPI4PY_PICKLE_PROTOCOL")) cdef object PyBytesIO_New = None cdef object PyPickle_loadf = None if PY2: try: from cStringIO import StringIO as PyBytesIO_New except ImportError: from io import BytesIO as PyBytesIO_New try: from cPickle import load as PyPickle_loadf except ImportError: from pickle import load as PyPickle_loadf cdef class Pickle: """ Pickle/unpickle Python objects """ cdef object ob_dumps cdef object ob_loads cdef object ob_PROTO def __cinit__(self, *args, **kwargs): self.ob_dumps = PyPickle_dumps self.ob_loads = PyPickle_loads self.ob_PROTO = PyPickle_PROTOCOL def __init__( self, dumps: Optional[Callable[[Any, int], bytes]] = None, loads: Optional[Callable[[Buffer], Any]] = None, protocol: Optional[int] = None, ) -> None: if dumps is None: dumps = PyPickle_dumps if loads is None: loads = PyPickle_loads if protocol is None: if dumps is PyPickle_dumps: protocol = PyPickle_PROTOCOL self.ob_dumps = dumps self.ob_loads = loads self.ob_PROTO = protocol def dumps( self, obj: Any, buffer_callback: Optional[Callable[[Buffer], Any]] = None, ) -> bytes: """ Serialize object to pickle data stream. """ if buffer_callback is not None: return cdumps_oob(self, obj, buffer_callback) return cdumps(self, obj) def loads( self, data: Buffer, buffers: Optional[Iterable[Buffer]] = None, ) -> Any: """ Deserialize object from pickle data stream. """ if buffers is not None: return cloads_oob(self, data, buffers) return cloads(self, data) property PROTOCOL: """pickle protocol""" def __get__(self) -> Optional[int]: return self.ob_PROTO def __set__(self, protocol: Optional[int]): if protocol is None: if self.ob_dumps is PyPickle_dumps: protocol = PyPickle_PROTOCOL self.ob_PROTO = protocol cdef Pickle PyMPI_PICKLE = Pickle() pickle = PyMPI_PICKLE # ----------------------------------------------------------------------------- cdef object cdumps_oob(Pickle pkl, object obj, object buffer_callback): cdef int protocol = -1 if pkl.ob_PROTO is not None: protocol = pkl.ob_PROTO if protocol >= 0: protocol = max(protocol, 5) return pkl.ob_dumps(obj, protocol, buffer_callback=buffer_callback) cdef object cloads_oob(Pickle pkl, object data, object buffers): return pkl.ob_loads(data, buffers=buffers) cdef object cdumps(Pickle pkl, object obj): if pkl.ob_PROTO is not None: return pkl.ob_dumps(obj, pkl.ob_PROTO) else: return pkl.ob_dumps(obj) cdef object cloads(Pickle pkl, object buf): if PY2: if not PyBytes_CheckExact(buf): if pkl.ob_loads is PyPickle_loads: buf = PyBytesIO_New(buf) return PyPickle_loadf(buf) return pkl.ob_loads(buf) cdef object pickle_dump(Pickle pkl, object obj, void **p, int *n): cdef object buf = cdumps(pkl, obj) p[0] = PyBytes_AsString(buf) n[0] = downcast(PyBytes_Size(buf)) return buf cdef object pickle_load(Pickle pkl, void *p, int n): if p == NULL or n == 0: return None return cloads(pkl, tomemory(p, n)) cdef object pickle_dumpv(Pickle pkl, object obj, void **p, int n, int cnt[], int dsp[]): cdef Py_ssize_t i=0, m=n cdef object items if obj is None: items = [None] * m else: items = list(obj) m = len(items) if m != n: raise ValueError( "expecting %d items, got %d" % (n, m)) cdef int c=0, d=0 for i from 0 <= i < m: items[i] = pickle_dump(pkl, items[i], p, &c) cnt[i] = c; dsp[i] = d; d = downcast(d + c) cdef object buf = b''.join(items) p[0] = PyBytes_AsString(buf) return buf cdef object pickle_loadv(Pickle pkl, void *p, int n, int cnt[], int dsp[]): cdef Py_ssize_t i=0, m=n cdef object items = [None] * m if p == NULL: return items for i from 0 <= i < m: items[i] = pickle_load(pkl, p+dsp[i], cnt[i]) return items cdef object pickle_alloc(void **p, int n): cdef object buf = PyBytes_FromStringAndSize(NULL, n) p[0] = PyBytes_AsString(buf) return buf cdef object pickle_allocv(void **p, int n, int cnt[], int dsp[]): cdef int i=0, d=0 for i from 0 <= i < n: dsp[i] = d d += cnt[i] return pickle_alloc(p, d) cdef inline object allocate_count_displ(int n, int **p, int **q): cdef object mem = allocate(2*n, sizeof(int), p) q[0] = p[0] + n return mem # ----------------------------------------------------------------------------- cdef object PyMPI_send(object obj, int dest, int tag, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE # cdef void *sbuf = NULL cdef int scount = 0 cdef MPI_Datatype stype = MPI_BYTE # cdef object tmps = None if dest != MPI_PROC_NULL: tmps = pickle_dump(pickle, obj, &sbuf, &scount) with nogil: CHKERR( MPI_Send(sbuf, scount, stype, dest, tag, comm) ) return None cdef object PyMPI_bsend(object obj, int dest, int tag, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE # cdef void *sbuf = NULL cdef int scount = 0 cdef MPI_Datatype stype = MPI_BYTE # cdef object tmps = None if dest != MPI_PROC_NULL: tmps = pickle_dump(pickle, obj, &sbuf, &scount) with nogil: CHKERR( MPI_Bsend(sbuf, scount, stype, dest, tag, comm) ) return None cdef object PyMPI_ssend(object obj, int dest, int tag, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE # cdef void *sbuf = NULL cdef int scount = 0 cdef MPI_Datatype stype = MPI_BYTE # cdef object tmps = None if dest != MPI_PROC_NULL: tmps = pickle_dump(pickle, obj, &sbuf, &scount) with nogil: CHKERR( MPI_Ssend(sbuf, scount, stype, dest, tag, comm) ) return None # ----------------------------------------------------------------------------- cdef extern from "Python.h": int PyErr_WarnEx(object, const char*, int) except -1 cdef object PyMPI_recv_obarg(object obj, int source, int tag, MPI_Comm comm, MPI_Status *status): cdef Pickle pickle = PyMPI_PICKLE # cdef void *rbuf = NULL cdef int rcount = 0 cdef MPI_Datatype rtype = MPI_BYTE cdef MPI_Status rsts cdef object rmsg = None cdef MPI_Aint rlen = 0 # PyErr_WarnEx(UserWarning, b"the 'buf' argument is deprecated", 1) # if source != MPI_PROC_NULL: if is_integral(obj): rcount = obj rmsg = pickle_alloc(&rbuf, rcount) else: rmsg = getbuffer_w(obj, &rbuf, &rlen) rcount = clipcount(rlen) if status == MPI_STATUS_IGNORE: status = &rsts rmsg with nogil: CHKERR( MPI_Recv(rbuf, rcount, rtype, source, tag, comm, status) ) if source != MPI_PROC_NULL: CHKERR( MPI_Get_count(status, rtype, &rcount) ) # if rcount <= 0: return None return pickle_load(pickle, rbuf, rcount) cdef object PyMPI_recv_match(object obj, int source, int tag, MPI_Comm comm, MPI_Status *status): cdef Pickle pickle = PyMPI_PICKLE # cdef void *rbuf = NULL cdef int rcount = 0 cdef MPI_Datatype rtype = MPI_BYTE # cdef MPI_Message match = MPI_MESSAGE_NULL cdef MPI_Status rsts obj # unused # with nogil: CHKERR( MPI_Mprobe(source, tag, comm, &match, &rsts) ) CHKERR( MPI_Get_count(&rsts, rtype, &rcount) ) cdef object tmpr = pickle_alloc(&rbuf, rcount) with nogil: CHKERR( MPI_Mrecv(rbuf, rcount, rtype, &match, status) ) # if rcount <= 0: return None return pickle_load(pickle, rbuf, rcount) cdef object PyMPI_recv_probe(object obj, int source, int tag, MPI_Comm comm, MPI_Status *status): cdef Pickle pickle = PyMPI_PICKLE # cdef void *rbuf = NULL cdef int rcount = 0 cdef MPI_Datatype rtype = MPI_BYTE # cdef MPI_Status rsts cdef object tmpr obj # unused # with PyMPI_Lock(comm, "recv"): with nogil: CHKERR( MPI_Probe(source, tag, comm, &rsts) ) CHKERR( MPI_Get_count(&rsts, rtype, &rcount) ) CHKERR( PyMPI_Status_get_source(&rsts, &source) ) CHKERR( PyMPI_Status_get_tag(&rsts, &tag) ) tmpr = pickle_alloc(&rbuf, rcount) with nogil: CHKERR( MPI_Recv(rbuf, rcount, rtype, source, tag, comm, status) ) # if rcount <= 0: return None return pickle_load(pickle, rbuf, rcount) cdef object PyMPI_recv(object obj, int source, int tag, MPI_Comm comm, MPI_Status *status): if obj is not None: return PyMPI_recv_obarg(obj, source, tag, comm, status) elif options.recv_mprobe: return PyMPI_recv_match(obj, source, tag, comm, status) else: return PyMPI_recv_probe(obj, source, tag, comm, status) # ----------------------------------------------------------------------------- cdef object PyMPI_isend(object obj, int dest, int tag, MPI_Comm comm, MPI_Request *request): cdef Pickle pickle = PyMPI_PICKLE # cdef void *sbuf = NULL cdef int scount = 0 cdef MPI_Datatype stype = MPI_BYTE # cdef object smsg = None if dest != MPI_PROC_NULL: smsg = pickle_dump(pickle, obj, &sbuf, &scount) with nogil: CHKERR( MPI_Isend(sbuf, scount, stype, dest, tag, comm, request) ) return smsg cdef object PyMPI_ibsend(object obj, int dest, int tag, MPI_Comm comm, MPI_Request *request): cdef Pickle pickle = PyMPI_PICKLE # cdef void *sbuf = NULL cdef int scount = 0 cdef MPI_Datatype stype = MPI_BYTE # cdef object smsg = None if dest != MPI_PROC_NULL: smsg = pickle_dump(pickle, obj, &sbuf, &scount) with nogil: CHKERR( MPI_Ibsend(sbuf, scount, stype, dest, tag, comm, request) ) return smsg cdef object PyMPI_issend(object obj, int dest, int tag, MPI_Comm comm, MPI_Request *request): cdef Pickle pickle = PyMPI_PICKLE # cdef void *sbuf = NULL cdef int scount = 0 cdef MPI_Datatype stype = MPI_BYTE # cdef object smsg = None if dest != MPI_PROC_NULL: smsg = pickle_dump(pickle, obj, &sbuf, &scount) with nogil: CHKERR( MPI_Issend(sbuf, scount, stype, dest, tag, comm, request) ) return smsg cdef object PyMPI_irecv(object obj, int source, int tag, MPI_Comm comm, MPI_Request *request): cdef Pickle pickle = PyMPI_PICKLE # cdef void *rbuf = NULL cdef MPI_Aint rlen = 0 cdef int rcount = 0 cdef MPI_Datatype rtype = MPI_BYTE # cdef object rmsg = None if source != MPI_PROC_NULL: if obj is None: rcount = (1<<15) obj = pickle_alloc(&rbuf, rcount) rmsg = getbuffer_r(obj, NULL, NULL) elif is_integral(obj): rcount = obj obj = pickle_alloc(&rbuf, rcount) rmsg = getbuffer_r(obj, NULL, NULL) else: rmsg = getbuffer_w(obj, &rbuf, &rlen) rcount = clipcount(rlen) with nogil: CHKERR( MPI_Irecv(rbuf, rcount, rtype, source, tag, comm, request) ) return rmsg # ----------------------------------------------------------------------------- cdef object PyMPI_sendrecv(object sobj, int dest, int sendtag, object robj, int source, int recvtag, MPI_Comm comm, MPI_Status *status): cdef MPI_Request request = MPI_REQUEST_NULL sobj = PyMPI_isend(sobj, dest, sendtag, comm, &request) robj = PyMPI_recv (robj, source, recvtag, comm, status) with nogil: CHKERR( MPI_Wait(&request, MPI_STATUS_IGNORE) ) return robj # ----------------------------------------------------------------------------- cdef object PyMPI_load(MPI_Status *status, object ob): cdef Pickle pickle = PyMPI_PICKLE cdef void *rbuf = NULL cdef int rcount = 0 cdef MPI_Datatype rtype = MPI_BYTE if type(ob) is not memory: return None CHKERR( MPI_Get_count(status, rtype, &rcount) ) if rcount <= 0: return None ob = asmemory(ob, &rbuf, NULL) return pickle_load(pickle, rbuf, rcount) cdef object PyMPI_wait(Request request, Status status): cdef object buf # cdef MPI_Status rsts with nogil: CHKERR( MPI_Wait(&request.ob_mpi, &rsts) ) buf = request.ob_buf if status is not None: status.ob_mpi = rsts if request.ob_mpi == MPI_REQUEST_NULL: request.ob_buf = None # return PyMPI_load(&rsts, buf) cdef object PyMPI_test(Request request, int *flag, Status status): cdef object buf = None # cdef MPI_Status rsts with nogil: CHKERR( MPI_Test(&request.ob_mpi, flag, &rsts) ) if flag[0]: buf = request.ob_buf if status is not None: status.ob_mpi = rsts if request.ob_mpi == MPI_REQUEST_NULL: request.ob_buf = None # if not flag[0]: return None return PyMPI_load(&rsts, buf) cdef object PyMPI_waitany(requests, int *index, Status status): cdef object buf = None # cdef int count = 0 cdef MPI_Request *irequests = NULL cdef MPI_Status rsts # cdef tmp = acquire_rs(requests, None, &count, &irequests, NULL) try: with nogil: CHKERR( MPI_Waitany(count, irequests, index, &rsts) ) if index[0] != MPI_UNDEFINED: buf = (requests[index[0]]).ob_buf if status is not None: status.ob_mpi = rsts finally: release_rs(requests, None, count, irequests, 0, NULL) # if index[0] == MPI_UNDEFINED: return None return PyMPI_load(&rsts, buf) cdef object PyMPI_testany(requests, int *index, int *flag, Status status): cdef object buf = None # cdef int count = 0 cdef MPI_Request *irequests = NULL cdef MPI_Status rsts # cdef tmp = acquire_rs(requests, None, &count, &irequests, NULL) try: with nogil: CHKERR( MPI_Testany(count, irequests, index, flag, &rsts) ) if index[0] != MPI_UNDEFINED: buf = (requests[index[0]]).ob_buf if status is not None: status.ob_mpi = rsts finally: release_rs(requests, None, count, irequests, 0, NULL) # if index[0] == MPI_UNDEFINED: return None if not flag[0]: return None return PyMPI_load(&rsts, buf) cdef object PyMPI_waitall(requests, statuses): cdef object bufs = None # cdef Py_ssize_t i = 0 cdef int count = 0 cdef MPI_Request *irequests = NULL cdef MPI_Status *istatuses = MPI_STATUSES_IGNORE # cdef tmp = acquire_rs(requests, True, &count, &irequests, &istatuses) try: with nogil: CHKERR( MPI_Waitall(count, irequests, istatuses) ) bufs = [(requests[i]).ob_buf for i from 0 <= i < count] finally: release_rs(requests, statuses, count, irequests, count, istatuses) # return [PyMPI_load(&istatuses[i], bufs[i]) for i from 0 <= i < count] cdef object PyMPI_testall(requests, int *flag, statuses): cdef object bufs = None # cdef Py_ssize_t i = 0 cdef int count = 0 cdef MPI_Request *irequests = NULL cdef MPI_Status *istatuses = MPI_STATUSES_IGNORE # cdef tmp = acquire_rs(requests, True, &count, &irequests, &istatuses) try: with nogil: CHKERR( MPI_Testall(count, irequests, flag, istatuses) ) if flag[0]: bufs = [(requests[i]).ob_buf for i from 0 <= i < count] finally: release_rs(requests, statuses, count, irequests, count, istatuses) # if not flag[0]: return None return [PyMPI_load(&istatuses[i], bufs[i]) for i from 0 <= i < count] cdef object PyMPI_waitsome(requests, statuses): cdef object bufs = None cdef object indices = None cdef object objects = None # cdef Py_ssize_t i = 0 cdef int incount = 0 cdef MPI_Request *irequests = NULL cdef int outcount = MPI_UNDEFINED, *iindices = NULL cdef MPI_Status *istatuses = MPI_STATUSES_IGNORE # cdef tmp1 = acquire_rs(requests, True, &incount, &irequests, &istatuses) cdef tmp2 = newarray(incount, &iindices) try: with nogil: CHKERR( MPI_Waitsome( incount, irequests, &outcount, iindices, istatuses) ) if outcount != MPI_UNDEFINED: bufs = [(requests[iindices[i]]).ob_buf for i from 0 <= i < outcount] finally: release_rs(requests, statuses, incount, irequests, outcount, istatuses) # if outcount != MPI_UNDEFINED: indices = [iindices[i] for i from 0 <= i < outcount] objects = [PyMPI_load(&istatuses[i], bufs[i]) for i from 0 <= i < outcount] return (indices, objects) cdef object PyMPI_testsome(requests, statuses): cdef object bufs = None cdef object indices = None cdef object objects = None # cdef Py_ssize_t i = 0 cdef int incount = 0 cdef MPI_Request *irequests = NULL cdef int outcount = MPI_UNDEFINED, *iindices = NULL cdef MPI_Status *istatuses = MPI_STATUSES_IGNORE # cdef tmp1 = acquire_rs(requests, True, &incount, &irequests, &istatuses) cdef tmp2 = newarray(incount, &iindices) try: with nogil: CHKERR( MPI_Testsome( incount, irequests, &outcount, iindices, istatuses) ) if outcount != MPI_UNDEFINED: bufs = [(requests[iindices[i]]).ob_buf for i from 0 <= i < outcount] finally: release_rs(requests, statuses, incount, irequests, outcount, istatuses) # if outcount != MPI_UNDEFINED: indices = [iindices[i] for i from 0 <= i < outcount] objects = [PyMPI_load(&istatuses[i], bufs[i]) for i from 0 <= i < outcount] return (indices, objects) # ----------------------------------------------------------------------------- cdef object PyMPI_probe(int source, int tag, MPI_Comm comm, MPI_Status *status): with nogil: CHKERR( MPI_Probe(source, tag, comm, status) ) return True cdef object PyMPI_iprobe(int source, int tag, MPI_Comm comm, MPI_Status *status): cdef int flag = 0 with nogil: CHKERR( MPI_Iprobe(source, tag, comm, &flag, status) ) return flag cdef object PyMPI_mprobe(int source, int tag, MPI_Comm comm, MPI_Message *message, MPI_Status *status): cdef Pickle pickle = PyMPI_PICKLE cdef void* rbuf = NULL cdef int rcount = 0 cdef MPI_Datatype rtype = MPI_BYTE cdef MPI_Status rsts if (status == MPI_STATUS_IGNORE): status = &rsts with nogil: CHKERR( MPI_Mprobe(source, tag, comm, message, status) ) if message[0] == MPI_MESSAGE_NO_PROC: return None CHKERR( MPI_Get_count(status, rtype, &rcount) ) cdef object rmsg = pickle_alloc(&rbuf, rcount) return rmsg cdef object PyMPI_improbe(int source, int tag, MPI_Comm comm, int *flag, MPI_Message *message, MPI_Status *status): cdef Pickle pickle = PyMPI_PICKLE cdef void* rbuf = NULL cdef int rcount = 0 cdef MPI_Datatype rtype = MPI_BYTE cdef MPI_Status rsts if (status == MPI_STATUS_IGNORE): status = &rsts with nogil: CHKERR( MPI_Improbe(source, tag, comm, flag, message, status) ) if flag[0] == 0 or message[0] == MPI_MESSAGE_NO_PROC: return None CHKERR( MPI_Get_count(status, rtype, &rcount) ) cdef object rmsg = pickle_alloc(&rbuf, rcount) return rmsg cdef object PyMPI_mrecv(object rmsg, MPI_Message *message, MPI_Status *status): cdef Pickle pickle = PyMPI_PICKLE cdef void* rbuf = NULL cdef MPI_Aint rlen = 0 cdef MPI_Datatype rtype = MPI_BYTE if message[0] == MPI_MESSAGE_NO_PROC: rmsg = None elif rmsg is None: pass elif PyBytes_CheckExact(rmsg): rmsg = getbuffer_r(rmsg, &rbuf, &rlen) else: rmsg = getbuffer_w(rmsg, &rbuf, &rlen) cdef int rcount = clipcount(rlen) with nogil: CHKERR( MPI_Mrecv(rbuf, rcount, rtype, message, status) ) rmsg = pickle_load(pickle, rbuf, rcount) return rmsg cdef object PyMPI_imrecv(object rmsg, MPI_Message *message, MPI_Request *request): cdef void* rbuf = NULL cdef MPI_Aint rlen = 0 cdef MPI_Datatype rtype = MPI_BYTE if message[0] == MPI_MESSAGE_NO_PROC: rmsg = None elif rmsg is None: pass elif PyBytes_CheckExact(rmsg): rmsg = getbuffer_r(rmsg, &rbuf, &rlen) else: rmsg = getbuffer_w(rmsg, &rbuf, &rlen) cdef int rcount = clipcount(rlen) with nogil: CHKERR( MPI_Imrecv(rbuf, rcount, rtype, message, request) ) return rmsg # ----------------------------------------------------------------------------- cdef object PyMPI_barrier(MPI_Comm comm): with nogil: CHKERR( MPI_Barrier(comm) ) return None cdef object PyMPI_bcast(object obj, int root, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE # cdef void *buf = NULL cdef int count = 0 cdef MPI_Datatype dtype = MPI_BYTE # cdef int dosend=0, dorecv=0 cdef int inter=0, rank=0 CHKERR( MPI_Comm_test_inter(comm, &inter) ) if inter: if root == MPI_PROC_NULL: dosend=0; dorecv=0; elif root == MPI_ROOT: dosend=1; dorecv=0; else: dosend=0; dorecv=1; else: CHKERR( MPI_Comm_rank(comm, &rank) ) if root == rank: dosend=1; dorecv=1; else: dosend=0; dorecv=1; # cdef object smsg = None cdef object rmsg = None # if dosend: smsg = pickle_dump(pickle, obj, &buf, &count) if dosend and dorecv: rmsg = smsg with PyMPI_Lock(comm, "bcast"): with nogil: CHKERR( MPI_Bcast( &count, 1, MPI_INT, root, comm) ) if dorecv and not dosend: rmsg = pickle_alloc(&buf, count) with nogil: CHKERR( MPI_Bcast( buf, count, dtype, root, comm) ) if dorecv: rmsg = pickle_load(pickle, buf, count) # return rmsg cdef object PyMPI_gather(object sendobj, int root, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE # cdef void *sbuf = NULL cdef int scount = 0 cdef MPI_Datatype stype = MPI_BYTE cdef void *rbuf = NULL cdef int *rcounts = NULL cdef int *rdispls = NULL cdef MPI_Datatype rtype = MPI_BYTE # cdef int dosend=0, dorecv=0 cdef int inter=0, size=0, rank=0 CHKERR( MPI_Comm_test_inter(comm, &inter) ) if inter: CHKERR( MPI_Comm_remote_size(comm, &size) ) if root == MPI_PROC_NULL: dosend=0; dorecv=0; elif root == MPI_ROOT: dosend=0; dorecv=1; else: dosend=1; dorecv=0; else: CHKERR( MPI_Comm_size(comm, &size) ) CHKERR( MPI_Comm_rank(comm, &rank) ) if root == rank: dosend=1; dorecv=1; else: dosend=1; dorecv=0; # cdef object tmps = None cdef object rmsg = None cdef object tmp1 # if dorecv: tmp1 = allocate_count_displ(size, &rcounts, &rdispls) if dosend: tmps = pickle_dump(pickle, sendobj, &sbuf, &scount) with PyMPI_Lock(comm, "gather"): with nogil: CHKERR( MPI_Gather( &scount, 1, MPI_INT, rcounts, 1, MPI_INT, root, comm) ) if dorecv: rmsg = pickle_allocv(&rbuf, size, rcounts, rdispls) with nogil: CHKERR( MPI_Gatherv( sbuf, scount, stype, rbuf, rcounts, rdispls, rtype, root, comm) ) if dorecv: rmsg = pickle_loadv(pickle, rbuf, size, rcounts, rdispls) # return rmsg cdef object PyMPI_scatter(object sendobj, int root, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE # cdef void *sbuf = NULL cdef int *scounts = NULL cdef int *sdispls = NULL cdef MPI_Datatype stype = MPI_BYTE cdef void *rbuf = NULL cdef int rcount = 0 cdef MPI_Datatype rtype = MPI_BYTE # cdef int dosend=0, dorecv=0 cdef int inter=0, size=0, rank=0 CHKERR( MPI_Comm_test_inter(comm, &inter) ) if inter: CHKERR( MPI_Comm_remote_size(comm, &size) ) if root == MPI_PROC_NULL: dosend=0; dorecv=0; elif root == MPI_ROOT: dosend=1; dorecv=0; else: dosend=0; dorecv=1; else: CHKERR( MPI_Comm_size(comm, &size) ) CHKERR( MPI_Comm_rank(comm, &rank) ) if root == rank: dosend=1; dorecv=1; else: dosend=0; dorecv=1; # cdef object tmps = None cdef object rmsg = None cdef object tmp1 # if dosend: tmp1 = allocate_count_displ(size, &scounts, &sdispls) if dosend: tmps = pickle_dumpv(pickle, sendobj, &sbuf, size, scounts, sdispls) with PyMPI_Lock(comm, "scatter"): with nogil: CHKERR( MPI_Scatter( scounts, 1, MPI_INT, &rcount, 1, MPI_INT, root, comm) ) if dorecv: rmsg = pickle_alloc(&rbuf, rcount) with nogil: CHKERR( MPI_Scatterv( sbuf, scounts, sdispls, stype, rbuf, rcount, rtype, root, comm) ) if dorecv: rmsg = pickle_load(pickle, rbuf, rcount) # return rmsg cdef object PyMPI_allgather(object sendobj, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE # cdef void *sbuf = NULL cdef int scount = 0 cdef MPI_Datatype stype = MPI_BYTE cdef void *rbuf = NULL cdef int *rcounts = NULL cdef int *rdispls = NULL cdef MPI_Datatype rtype = MPI_BYTE # cdef int inter=0, size=0 CHKERR( MPI_Comm_test_inter(comm, &inter) ) if inter: CHKERR( MPI_Comm_remote_size(comm, &size) ) else: CHKERR( MPI_Comm_size(comm, &size) ) # cdef object tmps = None cdef object rmsg = None cdef object tmp1 # tmp1 = allocate_count_displ(size, &rcounts, &rdispls) tmps = pickle_dump(pickle, sendobj, &sbuf, &scount) with PyMPI_Lock(comm, "allgather"): with nogil: CHKERR( MPI_Allgather( &scount, 1, MPI_INT, rcounts, 1, MPI_INT, comm) ) rmsg = pickle_allocv(&rbuf, size, rcounts, rdispls) with nogil: CHKERR( MPI_Allgatherv( sbuf, scount, stype, rbuf, rcounts, rdispls, rtype, comm) ) rmsg = pickle_loadv(pickle, rbuf, size, rcounts, rdispls) # return rmsg cdef object PyMPI_alltoall(object sendobj, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE # cdef void *sbuf = NULL cdef int *scounts = NULL cdef int *sdispls = NULL cdef MPI_Datatype stype = MPI_BYTE cdef void *rbuf = NULL cdef int *rcounts = NULL cdef int *rdispls = NULL cdef MPI_Datatype rtype = MPI_BYTE # cdef int inter=0, size=0 CHKERR( MPI_Comm_test_inter(comm, &inter) ) if inter: CHKERR( MPI_Comm_remote_size(comm, &size) ) else: CHKERR( MPI_Comm_size(comm, &size) ) # cdef object tmps = None cdef object rmsg = None cdef object tmp1, tmp2 # tmp1 = allocate_count_displ(size, &scounts, &sdispls) tmp2 = allocate_count_displ(size, &rcounts, &rdispls) tmps = pickle_dumpv(pickle, sendobj, &sbuf, size, scounts, sdispls) with PyMPI_Lock(comm, "alltoall"): with nogil: CHKERR( MPI_Alltoall( scounts, 1, MPI_INT, rcounts, 1, MPI_INT, comm) ) rmsg = pickle_allocv(&rbuf, size, rcounts, rdispls) with nogil: CHKERR( MPI_Alltoallv( sbuf, scounts, sdispls, stype, rbuf, rcounts, rdispls, rtype, comm) ) rmsg = pickle_loadv(pickle, rbuf, size, rcounts, rdispls) # return rmsg cdef object PyMPI_neighbor_allgather(object sendobj, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE # cdef void *sbuf = NULL cdef int scount = 0 cdef MPI_Datatype stype = MPI_BYTE cdef void *rbuf = NULL cdef int *rcounts = NULL cdef int *rdispls = NULL cdef MPI_Datatype rtype = MPI_BYTE # cdef int i=0, rsize=0 comm_neighbors_count(comm, &rsize, NULL) # cdef object tmps = None cdef object rmsg = None cdef object tmp1 # tmp1 = allocate_count_displ(rsize, &rcounts, &rdispls) for i from 0 <= i < rsize: rcounts[i] = 0 tmps = pickle_dump(pickle, sendobj, &sbuf, &scount) with PyMPI_Lock(comm, "neighbor_allgather"): with nogil: CHKERR( MPI_Neighbor_allgather( &scount, 1, MPI_INT, rcounts, 1, MPI_INT, comm) ) rmsg = pickle_allocv(&rbuf, rsize, rcounts, rdispls) with nogil: CHKERR( MPI_Neighbor_allgatherv( sbuf, scount, stype, rbuf, rcounts, rdispls, rtype, comm) ) rmsg = pickle_loadv(pickle, rbuf, rsize, rcounts, rdispls) # return rmsg cdef object PyMPI_neighbor_alltoall(object sendobj, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE # cdef void *sbuf = NULL cdef int *scounts = NULL cdef int *sdispls = NULL cdef MPI_Datatype stype = MPI_BYTE cdef void *rbuf = NULL cdef int *rcounts = NULL cdef int *rdispls = NULL cdef MPI_Datatype rtype = MPI_BYTE # cdef int i=0, ssize=0, rsize=0 comm_neighbors_count(comm, &rsize, &ssize) # cdef object tmps = None cdef object rmsg = None cdef object tmp1, tmp2 # tmp1 = allocate_count_displ(ssize, &scounts, &sdispls) tmp2 = allocate_count_displ(rsize, &rcounts, &rdispls) for i from 0 <= i < rsize: rcounts[i] = 0 tmps = pickle_dumpv(pickle, sendobj, &sbuf, ssize, scounts, sdispls) with PyMPI_Lock(comm, "neighbor_alltoall"): with nogil: CHKERR( MPI_Neighbor_alltoall( scounts, 1, MPI_INT, rcounts, 1, MPI_INT, comm) ) rmsg = pickle_allocv(&rbuf, rsize, rcounts, rdispls) with nogil: CHKERR( MPI_Neighbor_alltoallv( sbuf, scounts, sdispls, stype, rbuf, rcounts, rdispls, rtype, comm) ) rmsg = pickle_loadv(pickle, rbuf, rsize, rcounts, rdispls) # return rmsg # ----------------------------------------------------------------------------- cdef inline object _py_reduce(object seq, object op): if seq is None: return None cdef Py_ssize_t i = 0 cdef Py_ssize_t n = len(seq) cdef object res = seq[0] for i from 1 <= i < n: res = op(res, seq[i]) return res cdef inline object _py_scan(object seq, object op): if seq is None: return None cdef Py_ssize_t i = 0 cdef Py_ssize_t n = len(seq) for i from 1 <= i < n: seq[i] = op(seq[i-1], seq[i]) return seq cdef inline object _py_exscan(object seq, object op): if seq is None: return None seq = _py_scan(seq, op) seq.pop(-1) seq.insert(0, None) return seq cdef object PyMPI_reduce_naive(object sendobj, object op, int root, MPI_Comm comm): cdef object items = PyMPI_gather(sendobj, root, comm) return _py_reduce(items, op) cdef object PyMPI_allreduce_naive(object sendobj, object op, MPI_Comm comm): cdef object items = PyMPI_allgather(sendobj, comm) return _py_reduce(items, op) cdef object PyMPI_scan_naive(object sendobj, object op, MPI_Comm comm): cdef object items = PyMPI_gather(sendobj, 0, comm) items = _py_scan(items, op) return PyMPI_scatter(items, 0, comm) cdef object PyMPI_exscan_naive(object sendobj, object op, MPI_Comm comm): cdef object items = PyMPI_gather(sendobj, 0, comm) items = _py_exscan(items, op) return PyMPI_scatter(items, 0, comm) # ----- cdef inline object PyMPI_copy(object obj): cdef Pickle pickle = PyMPI_PICKLE cdef void *buf = NULL cdef int count = 0 obj = pickle_dump(pickle, obj, &buf, &count) return pickle_load(pickle, buf, count) cdef object PyMPI_send_p2p(object obj, int dst, int tag, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE cdef void *sbuf = NULL cdef int scount = 0 cdef MPI_Datatype stype = MPI_BYTE cdef object tmps = pickle_dump(pickle, obj, &sbuf, &scount) with nogil: CHKERR( MPI_Send(&scount, 1, MPI_INT, dst, tag, comm) ) with nogil: CHKERR( MPI_Send(sbuf, scount, stype, dst, tag, comm) ) return None cdef object PyMPI_recv_p2p(int src, int tag, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE cdef void *rbuf = NULL cdef int rcount = 0 cdef MPI_Datatype rtype = MPI_BYTE cdef MPI_Status *status = MPI_STATUS_IGNORE with nogil: CHKERR( MPI_Recv(&rcount, 1, MPI_INT, src, tag, comm, status) ) cdef object tmpr = pickle_alloc(&rbuf, rcount) with nogil: CHKERR( MPI_Recv(rbuf, rcount, rtype, src, tag, comm, status) ) return pickle_load(pickle, rbuf, rcount) cdef object PyMPI_sendrecv_p2p(object obj, int dst, int stag, int src, int rtag, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE cdef void *sbuf = NULL, *rbuf = NULL cdef int scount = 0, rcount = 0 cdef MPI_Datatype dtype = MPI_BYTE cdef object tmps = pickle_dump(pickle, obj, &sbuf, &scount) with nogil: CHKERR( MPI_Sendrecv(&scount, 1, MPI_INT, dst, stag, &rcount, 1, MPI_INT, src, rtag, comm, MPI_STATUS_IGNORE) ) cdef object tmpr = pickle_alloc(&rbuf, rcount) with nogil: CHKERR( MPI_Sendrecv(sbuf, scount, dtype, dst, stag, rbuf, rcount, dtype, src, rtag, comm, MPI_STATUS_IGNORE) ) return pickle_load(pickle, rbuf, rcount) cdef object PyMPI_bcast_p2p(object obj, int root, MPI_Comm comm): cdef Pickle pickle = PyMPI_PICKLE cdef void *buf = NULL cdef int count = 0 cdef MPI_Datatype dtype = MPI_BYTE cdef int rank = MPI_PROC_NULL CHKERR( MPI_Comm_rank(comm, &rank) ) if root == rank: obj = pickle_dump(pickle, obj, &buf, &count) with PyMPI_Lock(comm, "@bcast_p2p@"): with nogil: CHKERR( MPI_Bcast(&count, 1, MPI_INT, root, comm) ) if root != rank: obj = pickle_alloc(&buf, count) with nogil: CHKERR( MPI_Bcast(buf, count, dtype, root, comm) ) return pickle_load(pickle, buf, count) cdef object PyMPI_reduce_p2p(object sendobj, object op, int root, MPI_Comm comm, int tag): # Get communicator size and rank cdef int size = MPI_UNDEFINED cdef int rank = MPI_PROC_NULL CHKERR( MPI_Comm_size(comm, &size) ) CHKERR( MPI_Comm_rank(comm, &rank) ) # Check root argument if root < 0 or root >= size: MPI_Comm_call_errhandler(comm, MPI_ERR_ROOT) raise MPIException(MPI_ERR_ROOT) # cdef object result = PyMPI_copy(sendobj) cdef object tmp # Compute reduction at process 0 cdef unsigned int umask = 1 cdef unsigned int usize = size cdef unsigned int urank = rank cdef int target = 0 while umask < usize: if (umask & urank) != 0: target = ((urank & ~umask) % usize) PyMPI_send_p2p(result, target, tag, comm) else: target = (urank | umask) if target < size: tmp = PyMPI_recv_p2p(target, tag, comm) result = op(result, tmp) umask <<= 1 # Send reduction to root if root != 0: if rank == 0: PyMPI_send_p2p(result, root, tag, comm) elif rank == root: result = PyMPI_recv_p2p(0, tag, comm) if rank != root: result = None # return result cdef object PyMPI_scan_p2p(object sendobj, object op, MPI_Comm comm, int tag): # Get communicator size and rank cdef int size = MPI_UNDEFINED cdef int rank = MPI_PROC_NULL CHKERR( MPI_Comm_size(comm, &size) ) CHKERR( MPI_Comm_rank(comm, &rank) ) # cdef object result = PyMPI_copy(sendobj) cdef object partial = result cdef object tmp # Compute prefix reduction cdef unsigned int umask = 1 cdef unsigned int usize = size cdef unsigned int urank = rank cdef int target = 0 while umask < usize: target = (urank ^ umask) if target < size: tmp = PyMPI_sendrecv_p2p(partial, target, tag, target, tag, comm) if rank > target: partial = op(tmp, partial) result = op(tmp, result) else: tmp = op(partial, tmp) partial = tmp umask <<= 1 # return result cdef object PyMPI_exscan_p2p(object sendobj, object op, MPI_Comm comm, int tag): # Get communicator size and rank cdef int size = MPI_UNDEFINED cdef int rank = MPI_PROC_NULL CHKERR( MPI_Comm_size(comm, &size) ) CHKERR( MPI_Comm_rank(comm, &rank) ) # cdef object result = PyMPI_copy(sendobj) cdef object partial = result cdef object tmp # Compute prefix reduction cdef unsigned int umask = 1 cdef unsigned int usize = size cdef unsigned int urank = rank cdef unsigned int uflag = 0 cdef int target = 0 while umask < usize: target = (urank ^ umask) if target < size: tmp = PyMPI_sendrecv_p2p(partial, target, tag, target, tag, comm) if rank > target: partial = op(tmp, partial) if uflag == 0: result = tmp; uflag = 1 else: result = op(tmp, result) else: tmp = op(partial, tmp) partial = tmp umask <<= 1 # if rank == 0: result = None return result # ----- cdef extern from *: int PyMPI_Commctx_intra(MPI_Comm,MPI_Comm*,int*) nogil int PyMPI_Commctx_inter(MPI_Comm,MPI_Comm*,int*,MPI_Comm*,int*) nogil cdef int PyMPI_Commctx_INTRA(MPI_Comm comm, MPI_Comm *dupcomm, int *tag) except -1: with PyMPI_Lock(comm, "@commctx_intra"): CHKERR( PyMPI_Commctx_intra(comm, dupcomm, tag) ) return 0 cdef int PyMPI_Commctx_INTER(MPI_Comm comm, MPI_Comm *dupcomm, int *tag, MPI_Comm *localcomm, int *low_group) except -1: with PyMPI_Lock(comm, "@commctx_inter"): CHKERR( PyMPI_Commctx_inter(comm, dupcomm, tag, localcomm, low_group) ) return 0 def _commctx_intra( Intracomm comm: Intracomm, ) -> Tuple[Intracomm, int]: "Create/get intracommunicator duplicate" cdef int tag = MPI_UNDEFINED cdef Intracomm dupcomm = Intracomm.__new__(Intracomm) PyMPI_Commctx_INTRA(comm.ob_mpi, &dupcomm.ob_mpi, &tag) return (dupcomm, tag) def _commctx_inter( Intercomm comm: Intercomm, ) -> Tuple[Intercomm, int, Intracomm, bool]: "Create/get intercommunicator duplicate" cdef int tag = MPI_UNDEFINED, low_group = 0 cdef Intercomm dupcomm = Intercomm.__new__(Intercomm) cdef Intracomm localcomm = Intracomm.__new__(Intracomm) PyMPI_Commctx_INTER(comm.ob_mpi, &dupcomm.ob_mpi, &tag, &localcomm.ob_mpi, &low_group) return (dupcomm, tag, localcomm, low_group) # ----- cdef object PyMPI_reduce_intra(object sendobj, object op, int root, MPI_Comm comm): cdef int tag = MPI_UNDEFINED PyMPI_Commctx_INTRA(comm, &comm, &tag) return PyMPI_reduce_p2p(sendobj, op, root, comm, tag) cdef object PyMPI_reduce_inter(object sendobj, object op, int root, MPI_Comm comm): cdef int tag = MPI_UNDEFINED cdef MPI_Comm localcomm = MPI_COMM_NULL PyMPI_Commctx_INTER(comm, &comm, &tag, &localcomm, NULL) # Get communicator remote size and rank cdef int size = MPI_UNDEFINED cdef int rank = MPI_PROC_NULL CHKERR( MPI_Comm_remote_size(comm, &size) ) CHKERR( MPI_Comm_rank(comm, &rank) ) if root >= 0 and root < size: # Reduce in local group and send to remote root sendobj = PyMPI_reduce_p2p(sendobj, op, 0, localcomm, tag) if rank == 0: PyMPI_send_p2p(sendobj, root, tag, comm) return None elif root == MPI_ROOT: # Receive from remote group return PyMPI_recv_p2p(0, tag, comm) elif root == MPI_PROC_NULL: # This process does nothing return None else: # Wrong root argument MPI_Comm_call_errhandler(comm, MPI_ERR_ROOT) raise MPIException(MPI_ERR_ROOT) cdef object PyMPI_allreduce_intra(object sendobj, object op, MPI_Comm comm): cdef int tag = MPI_UNDEFINED PyMPI_Commctx_INTRA(comm, &comm, &tag) sendobj = PyMPI_reduce_p2p(sendobj, op, 0, comm, tag) return PyMPI_bcast_p2p(sendobj, 0, comm) cdef object PyMPI_allreduce_inter(object sendobj, object op, MPI_Comm comm): cdef int tag = MPI_UNDEFINED cdef int rank = MPI_PROC_NULL cdef MPI_Comm localcomm = MPI_COMM_NULL PyMPI_Commctx_INTER(comm, &comm, &tag, &localcomm, NULL) CHKERR( MPI_Comm_rank(comm, &rank) ) # Reduce in local group, exchange, and broadcast in local group sendobj = PyMPI_reduce_p2p(sendobj, op, 0, localcomm, tag) if rank == 0: sendobj = PyMPI_sendrecv_p2p(sendobj, 0, tag, 0, tag, comm) return PyMPI_bcast_p2p(sendobj, 0, localcomm) cdef object PyMPI_scan_intra(object sendobj, object op, MPI_Comm comm): cdef int tag = MPI_UNDEFINED PyMPI_Commctx_INTRA(comm, &comm, &tag) return PyMPI_scan_p2p(sendobj, op, comm, tag) cdef object PyMPI_exscan_intra(object sendobj, object op, MPI_Comm comm): cdef int tag = MPI_UNDEFINED PyMPI_Commctx_INTRA(comm, &comm, &tag) return PyMPI_exscan_p2p(sendobj, op, comm, tag) # ----- cdef inline bint comm_is_intra(MPI_Comm comm) nogil except -1: cdef int inter = 0 CHKERR( MPI_Comm_test_inter(comm, &inter) ) if inter: return 0 else: return 1 cdef object PyMPI_reduce(object sendobj, object op, int root, MPI_Comm comm): if not options.fast_reduce: return PyMPI_reduce_naive(sendobj, op, root, comm) elif comm_is_intra(comm): return PyMPI_reduce_intra(sendobj, op, root, comm) else: return PyMPI_reduce_inter(sendobj, op, root, comm) cdef object PyMPI_allreduce(object sendobj, object op, MPI_Comm comm): if not options.fast_reduce: return PyMPI_allreduce_naive(sendobj, op, comm) elif comm_is_intra(comm): return PyMPI_allreduce_intra(sendobj, op, comm) else: return PyMPI_allreduce_inter(sendobj, op, comm) cdef object PyMPI_scan(object sendobj, object op, MPI_Comm comm): if not options.fast_reduce: return PyMPI_scan_naive(sendobj, op, comm) else: return PyMPI_scan_intra(sendobj, op, comm) cdef object PyMPI_exscan(object sendobj, object op, MPI_Comm comm): if not options.fast_reduce: return PyMPI_exscan_naive(sendobj, op, comm) else: return PyMPI_exscan_intra(sendobj, op, comm) # ----------------------------------------------------------------------------- mpi4py-3.1.6/src/mpi4py/MPI/opimpl.pxi000066400000000000000000000230051460670727200174360ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef object _op_MAX(object x, object y): """maximum""" if y > x: return y else: return x cdef object _op_MIN(object x, object y): """minimum""" if y < x: return y else: return x cdef object _op_SUM(object x, object y): """sum""" return x + y cdef object _op_PROD(object x, object y): """product""" return x * y cdef object _op_BAND(object x, object y): """bit-wise and""" return x & y cdef object _op_BOR(object x, object y): """bit-wise or""" return x | y cdef object _op_BXOR(object x, object y): """bit-wise xor""" return x ^ y cdef object _op_LAND(object x, object y): """logical and""" return bool(x) & bool(y) cdef object _op_LOR(object x, object y): """logical or""" return bool(x) | bool(y) cdef object _op_LXOR(object x, object y): """logical xor""" return bool(x) ^ bool(y) cdef object _op_MAXLOC(object x, object y): """maximum and location""" cdef object i, j, u, v u, i = x v, j = y if u > v: return u, i elif v > u: return v, j elif j < i: return v, j else: return u, i cdef object _op_MINLOC(object x, object y): """minimum and location""" cdef object i, j, u, v u, i = x v, j = y if u < v: return u, i elif v < u: return v, j elif j < i: return v, j else: return u, i cdef object _op_REPLACE(object x, object y): """replace, (x, y) -> y""" return y cdef object _op_NO_OP(object x, object y): """no-op, (x, y) -> x""" return x # ----------------------------------------------------------------------------- cdef list op_user_registry = [None]*(1+32) cdef inline object op_user_py(int index, object x, object y, object dt): return op_user_registry[index](x, y, dt) cdef inline void op_user_mpi( int index, void *a, void *b, MPI_Aint n, MPI_Datatype *t) with gil: cdef Datatype datatype # errors in user-defined reduction operations are unrecoverable try: datatype = Datatype.__new__(Datatype) datatype.ob_mpi = t[0] try: op_user_py(index, tomemory(a, n), tomemory(b, n), datatype) finally: datatype.ob_mpi = MPI_DATATYPE_NULL except: # print the full exception traceback and abort. PySys_WriteStderr(b"Fatal Python error: %s\n", b"exception in user-defined reduction operation") try: print_traceback() finally: MPI_Abort(MPI_COMM_WORLD, 1) cdef inline void op_user_call( int index, void *a, void *b, int *plen, MPI_Datatype *t) nogil: # make it abort if Python has finalized if not Py_IsInitialized(): MPI_Abort(MPI_COMM_WORLD, 1) # make it abort if module clenaup has been done if (op_user_registry) == NULL: MPI_Abort(MPI_COMM_WORLD, 1) # compute the byte-size of memory buffers cdef MPI_Aint lb=0, extent=0 MPI_Type_get_extent(t[0], &lb, &extent) cdef MPI_Aint n = plen[0] * extent # make the actual GIL-safe Python call op_user_mpi(index, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_01(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call( 1, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_02(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call( 2, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_03(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call( 3, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_04(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call( 4, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_05(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call( 5, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_06(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call( 6, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_07(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call( 7, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_08(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call( 8, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_09(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call( 9, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_10(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call(10, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_11(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call(11, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_12(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call(12, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_13(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call(13, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_14(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call(14, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_15(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call(15, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_16(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call(16, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_17(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call(17, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_18(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call(18, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_19(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call(19, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_20(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call(20, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_21(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call(21, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_22(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call(22, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_23(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call(23, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_24(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call(24, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_25(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call(25, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_26(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call(26, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_27(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call(27, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_28(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call(28, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_29(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call(29, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_30(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call(30, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_31(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call(31, a, b, n, t) @cython.callspec("MPIAPI") cdef void op_user_32(void *a, void *b, int *n, MPI_Datatype *t) nogil: op_user_call(32, a, b, n, t) cdef MPI_User_function *op_user_map(int index) nogil: if index == 1: return op_user_01 elif index == 2: return op_user_02 elif index == 3: return op_user_03 elif index == 4: return op_user_04 elif index == 5: return op_user_05 elif index == 6: return op_user_06 elif index == 7: return op_user_07 elif index == 8: return op_user_08 elif index == 9: return op_user_09 elif index == 10: return op_user_10 elif index == 11: return op_user_11 elif index == 12: return op_user_12 elif index == 13: return op_user_13 elif index == 14: return op_user_14 elif index == 15: return op_user_15 elif index == 16: return op_user_16 elif index == 17: return op_user_17 elif index == 18: return op_user_18 elif index == 19: return op_user_19 elif index == 20: return op_user_20 elif index == 21: return op_user_21 elif index == 22: return op_user_22 elif index == 23: return op_user_23 elif index == 24: return op_user_24 elif index == 25: return op_user_25 elif index == 26: return op_user_26 elif index == 27: return op_user_27 elif index == 28: return op_user_28 elif index == 29: return op_user_29 elif index == 30: return op_user_30 elif index == 31: return op_user_31 elif index == 32: return op_user_32 else: return NULL cdef int op_user_new(object function, MPI_User_function **cfunction) except -1: # find a free slot in the registry cdef int index = 0 try: index = op_user_registry.index(None, 1) except ValueError: raise RuntimeError("cannot create too many " "user-defined reduction operations") # the line below will fail # if the function is not callable function.__call__ # register the Python function, # map it to the associated C function, # and return the slot index in registry op_user_registry[index] = function cfunction[0] = op_user_map(index) return index cdef int op_user_del(int *indexp) except -1: # free slot in the registry cdef int index = indexp[0] indexp[0] = 0 # clear the value op_user_registry[index] = None return 0 # ----------------------------------------------------------------------------- mpi4py-3.1.6/src/mpi4py/MPI/reqimpl.pxi000066400000000000000000000130041460670727200176050ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef MPI_Status empty_status PyMPI_Status_set_source (&empty_status, MPI_ANY_SOURCE ) PyMPI_Status_set_tag (&empty_status, MPI_ANY_TAG ) PyMPI_Status_set_error (&empty_status, MPI_SUCCESS ) cdef object acquire_rs(object requests, object statuses, int *count, MPI_Request *rp[], MPI_Status *sp[]): cdef MPI_Request *array_r = NULL cdef MPI_Status *array_s = NULL cdef object ob_r = None, ob_s = None cdef Py_ssize_t i = 0, n = len(requests) count[0] = n ob_r = allocate(n, sizeof(MPI_Request), &array_r) for i from 0 <= i < n: array_r[i] = (requests[i]).ob_mpi rp[0] = array_r if statuses is not None: ob_s = allocate(n, sizeof(MPI_Status), &array_s) for i from 0 <= i < n: array_s[i] = empty_status sp[0] = array_s return (ob_r, ob_s) cdef int release_rs(object requests, object statuses, Py_ssize_t incount, MPI_Request rp[], Py_ssize_t outcount, MPI_Status sp[]) except -1: cdef Py_ssize_t i = 0, nr = incount, ns = 0 cdef Request req = None for i from 0 <= i < nr: req = requests[i] req.ob_mpi = rp[i] if rp[i] == MPI_REQUEST_NULL: req.ob_buf = None if statuses is not None and outcount != MPI_UNDEFINED: ns = len(statuses) if outcount > ns: if isinstance(statuses, list): statuses += [Status.__new__(Status) for i from ns <= i < outcount] ns = outcount for i from 0 <= i < min(nr, ns): (statuses[i]).ob_mpi = sp[i] return 0 # ----------------------------------------------------------------------------- @cython.final @cython.internal cdef class _p_greq: cdef object query_fn cdef object free_fn cdef object cancel_fn cdef tuple args cdef dict kargs def __cinit__(self, query_fn, free_fn, cancel_fn, args, kargs): self.query_fn = query_fn self.free_fn = free_fn self.cancel_fn = cancel_fn self.args = tuple(args) if args is not None else () self.kargs = dict(kargs) if kargs is not None else {} cdef int query(self, MPI_Status *status) except -1: PyMPI_Status_set_source(status, MPI_ANY_SOURCE) PyMPI_Status_set_tag(status, MPI_ANY_TAG) PyMPI_Status_set_error(status, MPI_SUCCESS) MPI_Status_set_elements(status, MPI_BYTE, 0) MPI_Status_set_cancelled(status, 0) cdef Status sts = Status.__new__(Status) if self.query_fn is not None: sts.ob_mpi = status[0] self.query_fn(sts, *self.args, **self.kargs) status[0] = sts.ob_mpi if self.cancel_fn is None: MPI_Status_set_cancelled(status, 0) return MPI_SUCCESS cdef int free(self) except -1: if self.free_fn is not None: self.free_fn(*self.args, **self.kargs) return MPI_SUCCESS cdef int cancel(self, bint completed) except -1: if self.cancel_fn is not None: self.cancel_fn(completed, *self.args, **self.kargs) return MPI_SUCCESS # --- cdef int greq_query(void *extra_state, MPI_Status *status) \ except MPI_ERR_UNKNOWN with gil: cdef _p_greq state = <_p_greq>extra_state cdef int ierr = MPI_SUCCESS cdef object exc try: state.query(status) except MPIException as exc: print_traceback() ierr = exc.Get_error_code() except: print_traceback() ierr = MPI_ERR_OTHER return ierr cdef int greq_free(void *extra_state) \ except MPI_ERR_UNKNOWN with gil: cdef _p_greq state = <_p_greq>extra_state cdef int ierr = MPI_SUCCESS cdef object exc try: state.free() except MPIException as exc: print_traceback() ierr = exc.Get_error_code() except: print_traceback() ierr = MPI_ERR_OTHER Py_DECREF(extra_state) return ierr cdef int greq_cancel(void *extra_state, int completed) \ except MPI_ERR_UNKNOWN with gil: cdef _p_greq state = <_p_greq>extra_state cdef int ierr = MPI_SUCCESS cdef object exc try: state.cancel(completed) except MPIException as exc: print_traceback() ierr = exc.Get_error_code() except: print_traceback() ierr = MPI_ERR_OTHER return ierr # --- @cython.callspec("MPIAPI") cdef int greq_query_fn(void *extra_state, MPI_Status *status) nogil: if extra_state == NULL: return MPI_ERR_INTERN if status == NULL: return MPI_ERR_INTERN if not Py_IsInitialized(): return MPI_ERR_INTERN return greq_query(extra_state, status) @cython.callspec("MPIAPI") cdef int greq_free_fn(void *extra_state) nogil: if extra_state == NULL: return MPI_ERR_INTERN if not Py_IsInitialized(): return MPI_ERR_INTERN return greq_free(extra_state) @cython.callspec("MPIAPI") cdef int greq_cancel_fn(void *extra_state, int completed) nogil: if extra_state == NULL: return MPI_ERR_INTERN if not Py_IsInitialized(): return MPI_ERR_INTERN return greq_cancel(extra_state, completed) # ----------------------------------------------------------------------------- mpi4py-3.1.6/src/mpi4py/MPI/stdlib.pxi000066400000000000000000000011511460670727200174150ustar00rootroot00000000000000#------------------------------------------------------------------------------ cdef extern from * nogil: # "stddef.h" ctypedef unsigned int wchar_t cdef extern from * nogil: # "string.h" int memcmp(const void *, const void *, size_t) void *memset(void *, int, size_t) void *memcpy(void *, const void *, size_t) void *memmove(void *, const void *, size_t) cdef extern from * nogil: # "stdio.h" ctypedef struct FILE FILE *stdin, *stdout, *stderr int fprintf(FILE *, char *, ...) int fflush(FILE *) #------------------------------------------------------------------------------ mpi4py-3.1.6/src/mpi4py/MPI/typemap.pxi000066400000000000000000000254171460670727200176260ustar00rootroot00000000000000# ----------------------------------------------------------------------------- cdef inline int AddTypeMap(dict TD, const char tc[], Datatype dt) except -1: if dt.ob_mpi != MPI_DATATYPE_NULL: TD[pystr(tc)] = dt return 1 return 0 # ----------------------------------------------------------------------------- cdef dict TypeDict = { } _typedict = TypeDict # boolean (C++) AddTypeMap(TypeDict, "?" , __CXX_BOOL__ ) # PEP-3118 & NumPy # boolean (C99) AddTypeMap(TypeDict, "?" , __C_BOOL__ ) # PEP-3118 & NumPy # character AddTypeMap(TypeDict, "c" , __CHAR__ ) # PEP-3118 & NumPy AddTypeMap(TypeDict, "S" , __CHAR__ ) # NumPy AddTypeMap(TypeDict, "S1", __CHAR__ ) # NumPy AddTypeMap(TypeDict, "s", __CHAR__ ) # PEP-3118 AddTypeMap(TypeDict, "1s", __CHAR__ ) # PEP-3118 # (signed) integer AddTypeMap(TypeDict, "b" , __SIGNED_CHAR__ ) # MPI-2 AddTypeMap(TypeDict, "h" , __SHORT__ ) AddTypeMap(TypeDict, "i" , __INT__ ) AddTypeMap(TypeDict, "l" , __LONG__ ) AddTypeMap(TypeDict, "q" , __LONG_LONG__ ) # unsigned integer AddTypeMap(TypeDict, "B" , __UNSIGNED_CHAR__ ) AddTypeMap(TypeDict, "H" , __UNSIGNED_SHORT__ ) AddTypeMap(TypeDict, "I" , __UNSIGNED__ ) AddTypeMap(TypeDict, "L" , __UNSIGNED_LONG__ ) AddTypeMap(TypeDict, "Q" , __UNSIGNED_LONG_LONG__ ) # (real) floating AddTypeMap(TypeDict, "f" , __FLOAT__ ) AddTypeMap(TypeDict, "d" , __DOUBLE__ ) AddTypeMap(TypeDict, "g" , __LONG_DOUBLE__ ) # PEP-3118 & NumPy # complex floating (F77) AddTypeMap(TypeDict, "Zf" , __COMPLEX__ ) # PEP-3118 AddTypeMap(TypeDict, "Zd" , __DOUBLE_COMPLEX__ ) # PEP-3118 AddTypeMap(TypeDict, "F" , __COMPLEX__ ) # NumPy AddTypeMap(TypeDict, "D" , __DOUBLE_COMPLEX__ ) # NumPy # complex floating (F90) AddTypeMap(TypeDict, "Zf" , __COMPLEX8__ ) # PEP-3118 AddTypeMap(TypeDict, "Zd" , __COMPLEX16__ ) # PEP-3118 AddTypeMap(TypeDict, "F" , __COMPLEX8__ ) # NumPy AddTypeMap(TypeDict, "D" , __COMPLEX16__ ) # NumPy # complex floating (C++) AddTypeMap(TypeDict, "Zf" , __CXX_FLOAT_COMPLEX__ ) # PEP-3118 AddTypeMap(TypeDict, "Zd" , __CXX_DOUBLE_COMPLEX__ ) # PEP-3118 AddTypeMap(TypeDict, "Zg" , __CXX_LONG_DOUBLE_COMPLEX__ ) # PEP-3118 AddTypeMap(TypeDict, "F" , __CXX_FLOAT_COMPLEX__ ) # NumPy AddTypeMap(TypeDict, "D" , __CXX_DOUBLE_COMPLEX__ ) # NumPy AddTypeMap(TypeDict, "G" , __CXX_LONG_DOUBLE_COMPLEX__ ) # NumPy # complex floating (C99) AddTypeMap(TypeDict, "Zf" , __C_FLOAT_COMPLEX__ ) # PEP-3118 AddTypeMap(TypeDict, "Zd" , __C_DOUBLE_COMPLEX__ ) # PEP-3118 AddTypeMap(TypeDict, "Zg" , __C_LONG_DOUBLE_COMPLEX__ ) # PEP-3118 AddTypeMap(TypeDict, "F" , __C_FLOAT_COMPLEX__ ) # NumPy AddTypeMap(TypeDict, "D" , __C_DOUBLE_COMPLEX__ ) # NumPy AddTypeMap(TypeDict, "G" , __C_LONG_DOUBLE_COMPLEX__ ) # NumPy # boolean (C99/C++) AddTypeMap(TypeDict, "b1" , __CXX_BOOL__ ) # NumPy AddTypeMap(TypeDict, "b1" , __C_BOOL__ ) # NumPy # signed and unsigned integer (C) if sizeof(char) == 1: AddTypeMap(TypeDict, "i1" , __SIGNED_CHAR__ ) # NumPy AddTypeMap(TypeDict, "u1" , __UNSIGNED_CHAR__ ) # NumPy if sizeof(short) == 2: AddTypeMap(TypeDict, "i2" , __SHORT__ ) # NumPy AddTypeMap(TypeDict, "u2" , __UNSIGNED_SHORT__ ) # NumPy if sizeof(long) == 4: AddTypeMap(TypeDict, "i4" , __LONG__ ) # NumPy AddTypeMap(TypeDict, "u4" , __UNSIGNED_LONG__ ) # NumPy if sizeof(int) == 4: AddTypeMap(TypeDict, "i4" , __INT__ ) # NumPy AddTypeMap(TypeDict, "u4" , __UNSIGNED__ ) # NumPy if sizeof(long long) == 8: AddTypeMap(TypeDict, "i8" , __LONG_LONG__ ) # NumPy AddTypeMap(TypeDict, "u8" , __UNSIGNED_LONG_LONG__ ) # NumPy if sizeof(long) == 8: AddTypeMap(TypeDict, "i8" , __LONG__ ) # NumPy AddTypeMap(TypeDict, "u8" , __UNSIGNED_LONG__ ) # NumPy # signed integer (C99) AddTypeMap(TypeDict, "i1" , __INT8_T__ ) # NumPy AddTypeMap(TypeDict, "i2" , __INT16_T__ ) # NumPy AddTypeMap(TypeDict, "i4" , __INT32_T__ ) # NumPy AddTypeMap(TypeDict, "i8" , __INT64_T__ ) # NumPy # unsigned integer (C99) AddTypeMap(TypeDict, "u1" , __UINT8_T__ ) # NumPy AddTypeMap(TypeDict, "u2" , __UINT16_T__ ) # NumPy AddTypeMap(TypeDict, "u4" , __UINT32_T__ ) # NumPy AddTypeMap(TypeDict, "u8" , __UINT64_T__ ) # NumPy # real (C) and complex (C99/C++) floating if sizeof(float) == 4: AddTypeMap(TypeDict, "f4" , __FLOAT__ ) # NumPy AddTypeMap(TypeDict, "c8" , __CXX_FLOAT_COMPLEX__ ) # NumPy AddTypeMap(TypeDict, "c8" , __C_FLOAT_COMPLEX__ ) # NumPy if sizeof(double) == 8: AddTypeMap(TypeDict, "f8" , __DOUBLE__ ) # NumPy AddTypeMap(TypeDict, "c16" , __CXX_DOUBLE_COMPLEX__ ) # NumPy AddTypeMap(TypeDict, "c16" , __C_DOUBLE_COMPLEX__ ) # NumPy if sizeof(long double) == 12: AddTypeMap(TypeDict, "f12" , __LONG_DOUBLE__ ) # NumPy AddTypeMap(TypeDict, "c24" , __CXX_LONG_DOUBLE_COMPLEX__ ) # NumPy AddTypeMap(TypeDict, "c24" , __C_LONG_DOUBLE_COMPLEX__ ) # NumPy if sizeof(long double) == 16: AddTypeMap(TypeDict, "f16" , __LONG_DOUBLE__ ) # NumPy AddTypeMap(TypeDict, "c32" , __CXX_LONG_DOUBLE_COMPLEX__ ) # NumPy AddTypeMap(TypeDict, "c32" , __C_LONG_DOUBLE_COMPLEX__ ) # NumPy # ssize_t and size_t (C) if sizeof(size_t) == sizeof(long long): AddTypeMap(TypeDict, "n" , __LONG_LONG__ ) AddTypeMap(TypeDict, "N" , __UNSIGNED_LONG_LONG__ ) if sizeof(size_t) == sizeof(long): AddTypeMap(TypeDict, "n" , __LONG__ ) AddTypeMap(TypeDict, "N" , __UNSIGNED_LONG__ ) if sizeof(size_t) == sizeof(int): AddTypeMap(TypeDict, "n" , __INT__ ) AddTypeMap(TypeDict, "N" , __UNSIGNED__ ) if sizeof(size_t) == sizeof(MPI_Count): AddTypeMap(TypeDict, "n" , __COUNT__ ) # intptr_t and uintptr_t (C99) if sizeof(Py_intptr_t) == sizeof(long long): AddTypeMap(TypeDict, "p" , __LONG_LONG__ ) # NumPy AddTypeMap(TypeDict, "P" , __UNSIGNED_LONG_LONG__ ) # NumPy if sizeof(Py_intptr_t) == sizeof(long): AddTypeMap(TypeDict, "p" , __LONG__ ) # NumPy AddTypeMap(TypeDict, "P" , __UNSIGNED_LONG__ ) # NumPy if sizeof(Py_intptr_t) == sizeof(int): AddTypeMap(TypeDict, "p" , __INT__ ) # NumPy AddTypeMap(TypeDict, "P" , __UNSIGNED__ ) # NumPy if sizeof(Py_intptr_t) == sizeof(MPI_Aint): AddTypeMap(TypeDict, "p" , __AINT__ ) # NumPy # wide character if sizeof(wchar_t) == 4: AddTypeMap(TypeDict, "U" , __WCHAR__ ) # NumPy AddTypeMap(TypeDict, "U1", __WCHAR__ ) # NumPy # UTF-16/UCS-2 if sizeof(short) == 2: AddTypeMap(TypeDict, "u" , __UNSIGNED_SHORT__ ) # PEP-3118 AddTypeMap(TypeDict, "1u", __UNSIGNED_SHORT__ ) # PEP-3118 if 2 == 2: AddTypeMap(TypeDict, "u" , __UINT16_T__ ) # PEP-3118 AddTypeMap(TypeDict, "1u", __UINT16_T__ ) # PEP-3118 if sizeof(wchar_t) == 2: AddTypeMap(TypeDict, "u" , __WCHAR__ ) # PEP-3118 AddTypeMap(TypeDict, "1u", __WCHAR__ ) # PEP-3118 # UTF-32/UCS-4 if sizeof(int) == 4: AddTypeMap(TypeDict, "w" , __UNSIGNED__ ) # PEP-3118 AddTypeMap(TypeDict, "1w", __UNSIGNED__ ) # PEP-3118 if 4 == 4: AddTypeMap(TypeDict, "w" , __UINT32_T__ ) # PEP-3118 AddTypeMap(TypeDict, "1w", __UINT32_T__ ) # PEP-3118 if sizeof(wchar_t) == 4: AddTypeMap(TypeDict, "w" , __WCHAR__ ) # PEP-3118 AddTypeMap(TypeDict, "1w", __WCHAR__ ) # PEP-3118 # ----------------------------------------------------------------------------- cdef dict CTypeDict = { } _typedict_c = CTypeDict AddTypeMap(CTypeDict, "?" , __C_BOOL__ ) AddTypeMap(CTypeDict, "b" , __SIGNED_CHAR__ ) AddTypeMap(CTypeDict, "h" , __SHORT__ ) AddTypeMap(CTypeDict, "i" , __INT__ ) AddTypeMap(CTypeDict, "l" , __LONG__ ) AddTypeMap(CTypeDict, "q" , __LONG_LONG__ ) AddTypeMap(CTypeDict, "B" , __UNSIGNED_CHAR__ ) AddTypeMap(CTypeDict, "H" , __UNSIGNED_SHORT__ ) AddTypeMap(CTypeDict, "I" , __UNSIGNED__ ) AddTypeMap(CTypeDict, "L" , __UNSIGNED_LONG__ ) AddTypeMap(CTypeDict, "Q" , __UNSIGNED_LONG_LONG__ ) AddTypeMap(CTypeDict, "f" , __FLOAT__ ) AddTypeMap(CTypeDict, "d" , __DOUBLE__ ) AddTypeMap(CTypeDict, "g" , __LONG_DOUBLE__ ) AddTypeMap(CTypeDict, "F" , __C_FLOAT_COMPLEX__ ) AddTypeMap(CTypeDict, "D" , __C_DOUBLE_COMPLEX__ ) AddTypeMap(CTypeDict, "G" , __C_LONG_DOUBLE_COMPLEX__ ) AddTypeMap(CTypeDict, "b1" , __C_BOOL__ ) AddTypeMap(CTypeDict, "i1" , __INT8_T__ ) AddTypeMap(CTypeDict, "i2" , __INT16_T__ ) AddTypeMap(CTypeDict, "i4" , __INT32_T__ ) AddTypeMap(CTypeDict, "i8" , __INT64_T__ ) AddTypeMap(CTypeDict, "u1" , __UINT8_T__ ) AddTypeMap(CTypeDict, "u2" , __UINT16_T__ ) AddTypeMap(CTypeDict, "u4" , __UINT32_T__ ) AddTypeMap(CTypeDict, "u8" , __UINT64_T__ ) if sizeof(float) == 4: AddTypeMap(CTypeDict, "f4" , __FLOAT__ ) AddTypeMap(CTypeDict, "c8" , __C_FLOAT_COMPLEX__ ) if sizeof(double) == 8: AddTypeMap(CTypeDict, "f8" , __DOUBLE__ ) AddTypeMap(CTypeDict, "c16" , __C_DOUBLE_COMPLEX__ ) if sizeof(long double) == 12: AddTypeMap(CTypeDict, "f12" , __LONG_DOUBLE__ ) AddTypeMap(CTypeDict, "c24" , __C_LONG_DOUBLE_COMPLEX__ ) if sizeof(long double) == 16: AddTypeMap(CTypeDict, "f16" , __LONG_DOUBLE__ ) AddTypeMap(CTypeDict, "c32" , __C_LONG_DOUBLE_COMPLEX__ ) # ----------------------------------------------------------------------------- cdef dict FTypeDict = { } _typedict_f = FTypeDict AddTypeMap(FTypeDict, "?" , __LOGICAL__ ) AddTypeMap(FTypeDict, "i" , __INTEGER__ ) AddTypeMap(FTypeDict, "s" , __REAL__ ) AddTypeMap(FTypeDict, "r" , __REAL__ ) AddTypeMap(FTypeDict, "d" , __DOUBLE_PRECISION__ ) AddTypeMap(FTypeDict, "c" , __COMPLEX__ ) AddTypeMap(FTypeDict, "z" , __DOUBLE_COMPLEX__ ) AddTypeMap(FTypeDict, "?1" , __LOGICAL1__ ) AddTypeMap(FTypeDict, "?2" , __LOGICAL2__ ) AddTypeMap(FTypeDict, "?4" , __LOGICAL4__ ) AddTypeMap(FTypeDict, "?8" , __LOGICAL8__ ) AddTypeMap(FTypeDict, "i1" , __INTEGER1__ ) AddTypeMap(FTypeDict, "i2" , __INTEGER2__ ) AddTypeMap(FTypeDict, "i4" , __INTEGER4__ ) AddTypeMap(FTypeDict, "i8" , __INTEGER8__ ) AddTypeMap(FTypeDict, "i16" , __INTEGER16__ ) AddTypeMap(FTypeDict, "r2" , __REAL2__ ) AddTypeMap(FTypeDict, "r4" , __REAL4__ ) AddTypeMap(FTypeDict, "r8" , __REAL8__ ) AddTypeMap(FTypeDict, "r16" , __REAL16__ ) AddTypeMap(FTypeDict, "c4" , __COMPLEX4__ ) AddTypeMap(FTypeDict, "c8" , __COMPLEX8__ ) AddTypeMap(FTypeDict, "c16" , __COMPLEX16__ ) AddTypeMap(FTypeDict, "c32" , __COMPLEX32__ ) # ----------------------------------------------------------------------------- mpi4py-3.1.6/src/mpi4py/MPI/typestr.pxi000066400000000000000000000113031460670727200176460ustar00rootroot00000000000000# ----------------------------------------------------------------------------- def _typecode(Datatype datatype: Datatype) -> Optional[str]: """ Map MPI datatype to typecode string """ cdef const char *tc = Datatype2String(datatype.ob_mpi) return pystr(tc) if tc != NULL else None # ----------------------------------------------------------------------------- cdef inline const char* Datatype2String(MPI_Datatype datatype) nogil: if datatype == MPI_DATATYPE_NULL: return NULL # MPI elif datatype == MPI_LB : return NULL elif datatype == MPI_UB : return NULL elif datatype == MPI_PACKED : return "B" elif datatype == MPI_BYTE : return "B" elif datatype == MPI_AINT : return "p" elif datatype == MPI_OFFSET : if sizeof(MPI_Offset) == sizeof(MPI_Aint) : return "p" elif sizeof(MPI_Offset) == sizeof(long long) : return "q" elif sizeof(MPI_Offset) == sizeof(long) : return "l" elif sizeof(MPI_Offset) == sizeof(int) : return "i" else : return NULL elif datatype == MPI_COUNT : if sizeof(MPI_Count) == sizeof(MPI_Aint) : return "p" elif sizeof(MPI_Count) == sizeof(long long) : return "q" elif sizeof(MPI_Count) == sizeof(long) : return "l" elif sizeof(MPI_Count) == sizeof(int) : return "i" else : return NULL # C - character elif datatype == MPI_CHAR : return "c" elif datatype == MPI_WCHAR : if sizeof(wchar_t) == 4: return "U" else : return NULL # C - (signed) integral elif datatype == MPI_SIGNED_CHAR : return "b" elif datatype == MPI_SHORT : return "h" elif datatype == MPI_INT : return "i" elif datatype == MPI_LONG : return "l" elif datatype == MPI_LONG_LONG : return "q" # C - unsigned integral elif datatype == MPI_UNSIGNED_CHAR : return "B" elif datatype == MPI_UNSIGNED_SHORT : return "H" elif datatype == MPI_UNSIGNED : return "I" elif datatype == MPI_UNSIGNED_LONG : return "L" elif datatype == MPI_UNSIGNED_LONG_LONG : return "Q" # C - (real) floating elif datatype == MPI_FLOAT : return "f" elif datatype == MPI_DOUBLE : return "d" elif datatype == MPI_LONG_DOUBLE : return "g" # C99 - boolean elif datatype == MPI_C_BOOL : return "?" # C99 - integral elif datatype == MPI_INT8_T : return "i1" elif datatype == MPI_INT16_T : return "i2" elif datatype == MPI_INT32_T : return "i4" elif datatype == MPI_INT64_T : return "i8" elif datatype == MPI_UINT8_T : return "u1" elif datatype == MPI_UINT16_T : return "u2" elif datatype == MPI_UINT32_T : return "u4" elif datatype == MPI_UINT64_T : return "u8" # C99 - complex floating elif datatype == MPI_C_COMPLEX : return "F" elif datatype == MPI_C_FLOAT_COMPLEX : return "F" elif datatype == MPI_C_DOUBLE_COMPLEX : return "D" elif datatype == MPI_C_LONG_DOUBLE_COMPLEX : return "G" # C++ - boolean elif datatype == MPI_CXX_BOOL : return "?" # C++ - complex floating elif datatype == MPI_CXX_FLOAT_COMPLEX : return "F" elif datatype == MPI_CXX_DOUBLE_COMPLEX : return "D" elif datatype == MPI_CXX_LONG_DOUBLE_COMPLEX : return "G" # Fortran elif datatype == MPI_CHARACTER : return "c" elif datatype == MPI_LOGICAL : return NULL elif datatype == MPI_INTEGER : return "i" elif datatype == MPI_REAL : return "f" elif datatype == MPI_DOUBLE_PRECISION : return "d" elif datatype == MPI_COMPLEX : return "F" elif datatype == MPI_DOUBLE_COMPLEX : return "D" # Fortran 90 elif datatype == MPI_LOGICAL1 : return NULL elif datatype == MPI_LOGICAL2 : return NULL elif datatype == MPI_LOGICAL4 : return NULL elif datatype == MPI_LOGICAL8 : return NULL elif datatype == MPI_INTEGER1 : return "i1" elif datatype == MPI_INTEGER2 : return "i2" elif datatype == MPI_INTEGER4 : return "i4" elif datatype == MPI_INTEGER8 : return "i8" elif datatype == MPI_INTEGER16 : return "i16" elif datatype == MPI_REAL2 : return "f2" elif datatype == MPI_REAL4 : return "f4" elif datatype == MPI_REAL8 : return "f8" elif datatype == MPI_REAL16 : return "f16" elif datatype == MPI_COMPLEX4 : return "c4" elif datatype == MPI_COMPLEX8 : return "c8" elif datatype == MPI_COMPLEX16 : return "c16" elif datatype == MPI_COMPLEX32 : return "c32" else : return NULL # ----------------------------------------------------------------------------- mpi4py-3.1.6/src/mpi4py/MPI/typing.pxi000066400000000000000000000004061460670727200174500ustar00rootroot00000000000000cdef Any cdef Union cdef Literal cdef Optional cdef NoReturn cdef Callable cdef Hashable cdef Iterable cdef Iterator cdef Sequence cdef Mapping cdef Tuple cdef List cdef Dict cdef Buffer cdef BufSpec cdef BufSpecB cdef BufSpecV cdef BufSpecW cdef TargetSpec mpi4py-3.1.6/src/mpi4py/MPI/winimpl.pxi000066400000000000000000000016271460670727200176230ustar00rootroot00000000000000#------------------------------------------------------------------------------ cdef inline int win_get_base(MPI_Win win,void **base) except -1: cdef int flag = 0 cdef void *attr = NULL CHKERR( MPI_Win_get_attr(win, MPI_WIN_BASE, &attr, &flag) ) base[0] = attr if flag and attr != NULL else NULL return 0 cdef inline int win_get_size(MPI_Win win,MPI_Aint *size) except -1: cdef int flag = 0 cdef MPI_Aint *attr = NULL CHKERR( MPI_Win_get_attr(win, MPI_WIN_SIZE, &attr, &flag) ) size[0] = attr[0] if flag and attr != NULL else 0 return 0 cdef inline int win_get_unit(MPI_Win win,int *disp_unit) except -1: cdef int flag = 0 cdef int *attr = NULL CHKERR( MPI_Win_get_attr(win, MPI_WIN_DISP_UNIT, &attr, &flag) ) disp_unit[0] = attr[0] if flag and attr != NULL else 1 return 0 #------------------------------------------------------------------------------ mpi4py-3.1.6/src/mpi4py/__init__.pxd000066400000000000000000000000701460670727200172400ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com mpi4py-3.1.6/src/mpi4py/__init__.py000066400000000000000000000122371460670727200171050ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """This is the **MPI for Python** package. The *Message Passing Interface* (MPI) is a standardized and portable message-passing system designed to function on a wide variety of parallel computers. The MPI standard defines the syntax and semantics of library routines and allows users to write portable programs in the main scientific programming languages (Fortran, C, or C++). Since its release, the MPI specification has become the leading standard for message-passing libraries for parallel computers. *MPI for Python* provides MPI bindings for the Python programming language, allowing any Python program to exploit multiple processors. This package build on the MPI specification and provides an object oriented interface which closely follows MPI-2 C++ bindings. """ __version__ = '3.1.6' __author__ = 'Lisandro Dalcin' __credits__ = 'MPI Forum, MPICH Team, Open MPI Team' __all__ = ['MPI'] class Rc: """Runtime configuration options. Attributes ---------- initialize : bool Automatic MPI initialization at import (default: True). threads : bool Request initialization with thread support (default: True). thread_level : {"multiple", "serialized", "funneled", "single"} Level of thread support to request (default: "multiple"). finalize : None or bool Automatic MPI finalization at exit (default: None). fast_reduce : bool Use tree-based reductions for objects (default: True). recv_mprobe : bool Use matched probes to receive objects (default: True). errors : {"exception", "default", "fatal"} Error handling policy (default: "exception"). """ initialize = True threads = True thread_level = 'multiple' finalize = None fast_reduce = True recv_mprobe = True errors = 'exception' def __init__(self, **kwargs): self(**kwargs) def __call__(self, **kwargs): for key in kwargs: if not hasattr(self, key): raise TypeError("unexpected argument '{0}'".format(key)) for key, value in kwargs.items(): setattr(self, key, value) def __repr__(self): return '<{0}.rc>'.format(__name__) rc = Rc() __import__('sys').modules[__name__ + '.rc'] = rc def get_include(): """Return the directory in the package that contains header files. Extension modules that need to compile against mpi4py should use this function to locate the appropriate include directory. Using Python distutils (or perhaps NumPy distutils):: import mpi4py Extension('extension_name', ... include_dirs=[..., mpi4py.get_include()]) """ # pylint: disable=import-outside-toplevel from os.path import join, dirname return join(dirname(__file__), 'include') def get_config(): """Return a dictionary with information about MPI.""" # pylint: disable=import-outside-toplevel from os.path import join, dirname from configparser import ConfigParser parser = ConfigParser() parser.read(join(dirname(__file__), 'mpi.cfg')) return dict(parser.items('mpi')) def profile(name, *, path=None, logfile=None): """Support for the MPI profiling interface. Parameters ---------- name : str Name of the profiler library to load. path : `sequence` of str, optional Additional paths to search for the profiler. logfile : str, optional Filename prefix for dumping profiler output. """ # pylint: disable=import-outside-toplevel import os import sys from .dl import dlopen, dlerror, RTLD_NOW, RTLD_GLOBAL def lookup_dylib(name, path): # pylint: disable=missing-docstring pattern = [] if sys.platform.startswith('win'): # pragma: no cover pattern.append(('', '.dll')) elif sys.platform == 'darwin': # pragma: no cover pattern.append(('lib', '.dylib')) elif os.name == 'posix': # pragma: no cover pattern.append(('lib', '.so')) pattern.append(('', '')) for pth in path: for (lib, dso) in pattern: filename = os.path.join(pth, lib + name + dso) if os.path.isfile(filename): return os.path.abspath(filename) return None if logfile: if name in ('mpe',): if 'MPE_LOGFILE_PREFIX' not in os.environ: os.environ['MPE_LOGFILE_PREFIX'] = logfile if name in ('vt', 'vt-mpi', 'vt-hyb'): if 'VT_FILE_PREFIX' not in os.environ: os.environ['VT_FILE_PREFIX'] = logfile if path is None: path = [] elif isinstance(path, str): path = [path] else: path = list(path) prefix = os.path.dirname(__file__) path.append(os.path.join(prefix, 'lib-pmpi')) filename = lookup_dylib(name, path) if filename is None: raise ValueError("profiler '{0}' not found".format(name)) handle = dlopen(filename, RTLD_NOW | RTLD_GLOBAL) if handle: registry = vars(profile).setdefault('registry', []) registry.append((name, (handle, filename))) else: from warnings import warn warn(dlerror()) mpi4py-3.1.6/src/mpi4py/__init__.pyi000066400000000000000000000014311460670727200172500ustar00rootroot00000000000000from typing import Any, Optional, Union from typing import Sequence from typing import Dict, List __version__: str = ... __author__: str = ... __credits__: str = ... from . import MPI __all__: List[str] = ... class Rc: initialize: bool = True threads: bool = True thread_level: str = 'multiple' finalize: Optional[bool] = None fast_reduce: bool = True recv_mprobe: bool = True errors: str = 'exception' def __init__(self, **kwargs: Any) -> None: ... def __call__(self, **kwargs: Any) -> None: ... def __repr__(self) -> str: ... rc: Rc = ... def get_include() -> str: ... def get_config() -> Dict[str, str]: ... def profile( name: str, *, path: Optional[Union[str, Sequence[str]]] = None, logfile: Optional[str] = None, ) -> None: ... mpi4py-3.1.6/src/mpi4py/__main__.py000066400000000000000000000002451460670727200170620ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Entry-point for ``python -m mpi4py ...``.""" from .run import main if __name__ == '__main__': main() mpi4py-3.1.6/src/mpi4py/__main__.pyi000066400000000000000000000000261460670727200172300ustar00rootroot00000000000000from .run import main mpi4py-3.1.6/src/mpi4py/bench.py000066400000000000000000000142541460670727200164260ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Run MPI benchmarks and tests.""" import sys as _sys def helloworld(comm, args=None, verbose=True): """Hello, World! using MPI.""" # pylint: disable=import-outside-toplevel from argparse import ArgumentParser parser = ArgumentParser(prog=__name__ + " helloworld") parser.add_argument("-q", "--quiet", action="store_false", dest="verbose", default=verbose) options = parser.parse_args(args) from . import MPI size = comm.Get_size() rank = comm.Get_rank() name = MPI.Get_processor_name() message = ("Hello, World! I am process %*d of %d on %s.\n" % (len(str(size - 1)), rank, size, name)) comm.Barrier() if rank > 0: comm.Recv([None, 'B'], rank - 1) if options.verbose: _sys.stdout.write(message) _sys.stdout.flush() if rank < size - 1: comm.Send([None, 'B'], rank + 1) comm.Barrier() return message def ringtest(comm, args=None, verbose=True): """Time a message going around the ring of processes.""" # pylint: disable=too-many-locals # pylint: disable=too-many-statements # pylint: disable=import-outside-toplevel from argparse import ArgumentParser parser = ArgumentParser(prog=__name__ + " ringtest") parser.add_argument("-q", "--quiet", action="store_false", dest="verbose", default=verbose) parser.add_argument("-n", "--size", type=int, default=1, dest="size", help="message size") parser.add_argument("-s", "--skip", type=int, default=0, dest="skip", help="number of warm-up iterations") parser.add_argument("-l", "--loop", type=int, default=1, dest="loop", help="number of iterations") options = parser.parse_args(args) def ring(comm, n=1, loop=1, skip=0): # pylint: disable=invalid-name # pylint: disable=missing-docstring from array import array from . import MPI iterations = list(range((loop + skip))) size = comm.Get_size() rank = comm.Get_rank() source = (rank - 1) % size dest = (rank + 1) % size Sendrecv = comm.Sendrecv Send = comm.Send Recv = comm.Recv Wtime = MPI.Wtime sendmsg = array('B', [+42]) * n recvmsg = array('B', [0x0]) * n if size == 1: for i in iterations: if i == skip: tic = Wtime() Sendrecv(sendmsg, dest, 0, recvmsg, source, 0) else: if rank == 0: for i in iterations: if i == skip: tic = Wtime() Send(sendmsg, dest, 0) Recv(recvmsg, source, 0) else: sendmsg = recvmsg for i in iterations: if i == skip: tic = Wtime() Recv(recvmsg, source, 0) Send(sendmsg, dest, 0) toc = Wtime() if comm.rank == 0 and sendmsg != recvmsg: # pragma: no cover import warnings import traceback try: warnings.warn("received message does not match!") except UserWarning: traceback.print_exc() comm.Abort(2) return toc - tic size = getattr(options, 'size', 1) loop = getattr(options, 'loop', 1) skip = getattr(options, 'skip', 0) comm.Barrier() elapsed = ring(comm, size, loop, skip) if options.verbose and comm.rank == 0: message = ("time for %d loops = %g seconds (%d processes, %d bytes)\n" % (loop, elapsed, comm.size, size)) _sys.stdout.write(message) _sys.stdout.flush() return elapsed def main(args=None): """Entry-point for ``python -m mpi4py.bench``.""" # pylint: disable=import-outside-toplevel from argparse import ArgumentParser, REMAINDER parser = ArgumentParser(prog=__name__, usage="%(prog)s [options] [args]") parser.add_argument("--threads", action="store_true", dest="threads", default=None, help="initialize MPI with thread support") parser.add_argument("--no-threads", action="store_false", dest="threads", default=None, help="initialize MPI without thread support") parser.add_argument("--thread-level", dest="thread_level", default=None, action="store", metavar="LEVEL", choices="single funneled serialized multiple".split(), help="initialize MPI with required thread level") parser.add_argument("--mpe", action="store_true", dest="mpe", default=False, help="use MPE for MPI profiling") parser.add_argument("--vt", action="store_true", dest="vt", default=False, help="use VampirTrace for MPI profiling") parser.add_argument("command", action="store", metavar="", help="benchmark command to run") parser.add_argument("args", nargs=REMAINDER, metavar="[args]", help="arguments for benchmark command") options = parser.parse_args(args) from . import rc, profile if options.threads is not None: rc.threads = options.threads if options.thread_level is not None: rc.thread_level = options.thread_level if options.mpe: profile('mpe', logfile='mpi4py') if options.vt: profile('vt', logfile='mpi4py') from . import MPI comm = MPI.COMM_WORLD if options.command not in main.commands: if comm.rank == 0: parser.error("unknown command '%s'" % options.command) parser.exit(2) command = main.commands[options.command] command(comm, options.args) parser.exit() main.commands = { # type: ignore[attr-defined] 'helloworld': helloworld, 'ringtest': ringtest, } if __name__ == '__main__': main() mpi4py-3.1.6/src/mpi4py/bench.pyi000066400000000000000000000005371460670727200165760ustar00rootroot00000000000000from .MPI import Intracomm from typing import Optional from typing import Sequence def helloworld(comm: Intracomm, args: Optional[Sequence[str]] = None, verbose: bool = True) -> str: ... def ringtest(comm: Intracomm, args: Optional[Sequence[str]] = None, verbose: bool = True) -> float: ... def main(args: Optional[Sequence[str]] = ...) -> None: ... mpi4py-3.1.6/src/mpi4py/dl.pyi000066400000000000000000000014061460670727200161120ustar00rootroot00000000000000import sys from typing import Final, Optional Path = str Handle = int Address = int RTLD_LAZY: Final[int] = ... RTLD_NOW: Final[int] = ... RTLD_GLOBAL: Final[int] = ... RTLD_LOCAL: Final[int] = ... RTLD_NOLOAD: Final[int] = ... RTLD_NODELETE: Final[int] = ... if sys.platform == 'linux': RTLD_DEEPBIND: Final[int] = ... if sys.platform == 'darwin': RTLD_FIRST: Final[int] = ... RTLD_DEFAULT: Final[Handle] = ... RTLD_NEXT: Final[Handle] = ... if sys.platform == 'darwin': RTLD_SELF: Final[Handle] = ... RTLD_MAIN_ONLY: Final[Handle] = ... def dlopen(filename: Optional[Path], mode: int) -> Handle: ... def dlclose(handle: Optional[Handle]) -> int: ... def dlsym(handle: Optional[Handle], symbol: str) -> Address: ... def dlerror() -> Optional[str]: ... mpi4py-3.1.6/src/mpi4py/futures/000077500000000000000000000000001460670727200164645ustar00rootroot00000000000000mpi4py-3.1.6/src/mpi4py/futures/__init__.py000066400000000000000000000010101460670727200205650ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Execute computations asynchronously using MPI processes.""" # pylint: disable=redefined-builtin from ._core import ( Future, Executor, wait, FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED, as_completed, CancelledError, TimeoutError, InvalidStateError, BrokenExecutor, ) from .pool import MPIPoolExecutor from .pool import MPICommExecutor from .pool import ThreadPoolExecutor from .pool import ProcessPoolExecutor mpi4py-3.1.6/src/mpi4py/futures/__init__.pyi000066400000000000000000000011451460670727200207470ustar00rootroot00000000000000from ._core import ( Future as Future, Executor as Executor, wait as wait, FIRST_COMPLETED as FIRST_COMPLETED, FIRST_EXCEPTION as FIRST_EXCEPTION, ALL_COMPLETED as ALL_COMPLETED, as_completed as as_completed, CancelledError as CancelledError, TimeoutError as TimeoutError, InvalidStateError as InvalidStateError, BrokenExecutor as BrokenExecutor, ) from .pool import ( MPIPoolExecutor as MPIPoolExecutor, MPICommExecutor as MPICommExecutor, ) from .pool import ( ThreadPoolExecutor as ThreadPoolExecutor, ProcessPoolExecutor as ProcessPoolExecutor, ) mpi4py-3.1.6/src/mpi4py/futures/__main__.py000066400000000000000000000042711460670727200205620ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Run Python code using ``mpi4py.futures``. Python code (scripts, modules, zip files) is run in the process with rank 0 in ``MPI.COMM_WORLD`` and creates `MPIPoolExecutor` instances to submit tasks. The other processes team-up in a static-size shared pool of workers executing tasks submitted from the master process. """ def main(): """Entry point for ``python -m mpi4py.futures ...``.""" # pylint: disable=missing-docstring # pylint: disable=import-outside-toplevel import os import sys from ..run import run_command_line from ..run import set_abort_status from ._lib import SharedPoolCtx class UsageExit(SystemExit): pass def usage(error=None): from textwrap import dedent usage = dedent(""" usage: {python} -m {prog} [arg] ... or: {python} -m {prog} -m [arg] ... or: {python} -m {prog} -c [arg] ... """).strip().format( python=os.path.basename(sys.executable), prog=__spec__.parent, ) if error: print(error, file=sys.stderr) print(usage, file=sys.stderr) else: print(usage, file=sys.stdout) raise UsageExit(1 if error else 0) def chk_command_line(): args = sys.argv[1:] if not args: usage("No path specified for execution") elif args[0] == '-': pass elif args[0] in ('-h', '--help'): usage() elif args[0] in ('-m', '-c'): if len(args) < 2: usage("Argument expected for option: " + args[0]) elif args[0].startswith('-'): usage("Unknown option: " + args[0]) elif not os.path.exists(args[0]): usage("Path does not exist: " + args[0]) try: with SharedPoolCtx() as context: if context is not None: chk_command_line() run_command_line() except UsageExit: raise except SystemExit as exc: set_abort_status(exc.code) raise except: set_abort_status(1) raise if __name__ == '__main__': main() mpi4py-3.1.6/src/mpi4py/futures/__main__.pyi000066400000000000000000000000301460670727200207200ustar00rootroot00000000000000def main() -> None: ... mpi4py-3.1.6/src/mpi4py/futures/_base.py000066400000000000000000000544201460670727200201140ustar00rootroot00000000000000# Backport of concurrent.futures._base from Python 3.8 # pylint: skip-file # flake8: noqa # type: ignore # Copyright 2009 Brian Quinlan. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. __author__ = 'Brian Quinlan (brian@sweetapp.com)' import collections import logging import threading import time try: from itertools import izip as _zip except ImportError: _zip = zip FIRST_COMPLETED = 'FIRST_COMPLETED' FIRST_EXCEPTION = 'FIRST_EXCEPTION' ALL_COMPLETED = 'ALL_COMPLETED' _AS_COMPLETED = '_AS_COMPLETED' # Possible future states (for internal use by the futures package). PENDING = 'PENDING' RUNNING = 'RUNNING' # The future was cancelled by the user... CANCELLED = 'CANCELLED' # ...and _Waiter.add_cancelled() was called by a worker. CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED' FINISHED = 'FINISHED' _FUTURE_STATES = [ PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED ] _STATE_TO_DESCRIPTION_MAP = { PENDING: "pending", RUNNING: "running", CANCELLED: "cancelled", CANCELLED_AND_NOTIFIED: "cancelled", FINISHED: "finished" } # Logger for internal use by the futures package. LOGGER = logging.getLogger("concurrent.futures") class Error(Exception): """Base class for all future-related exceptions.""" pass class CancelledError(Error): """The Future was cancelled.""" pass class TimeoutError(Error): """The operation exceeded the given deadline.""" pass class InvalidStateError(Error): """The operation is not allowed in this state.""" pass class _Waiter(object): """Provides the event that wait() and as_completed() block on.""" def __init__(self): self.event = threading.Event() self.finished_futures = [] def add_result(self, future): self.finished_futures.append(future) def add_exception(self, future): self.finished_futures.append(future) def add_cancelled(self, future): self.finished_futures.append(future) class _AsCompletedWaiter(_Waiter): """Used by as_completed().""" def __init__(self): super(_AsCompletedWaiter, self).__init__() self.lock = threading.Lock() def add_result(self, future): with self.lock: super(_AsCompletedWaiter, self).add_result(future) self.event.set() def add_exception(self, future): with self.lock: super(_AsCompletedWaiter, self).add_exception(future) self.event.set() def add_cancelled(self, future): with self.lock: super(_AsCompletedWaiter, self).add_cancelled(future) self.event.set() class _FirstCompletedWaiter(_Waiter): """Used by wait(return_when=FIRST_COMPLETED).""" def add_result(self, future): super(_FirstCompletedWaiter, self).add_result(future) self.event.set() def add_exception(self, future): super(_FirstCompletedWaiter, self).add_exception(future) self.event.set() def add_cancelled(self, future): super(_FirstCompletedWaiter, self).add_cancelled(future) self.event.set() class _AllCompletedWaiter(_Waiter): """Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED).""" def __init__(self, num_pending_calls, stop_on_exception): self.num_pending_calls = num_pending_calls self.stop_on_exception = stop_on_exception self.lock = threading.Lock() super(_AllCompletedWaiter, self).__init__() def _decrement_pending_calls(self): with self.lock: self.num_pending_calls -= 1 if not self.num_pending_calls: self.event.set() def add_result(self, future): super(_AllCompletedWaiter, self).add_result(future) self._decrement_pending_calls() def add_exception(self, future): super(_AllCompletedWaiter, self).add_exception(future) if self.stop_on_exception: self.event.set() else: self._decrement_pending_calls() def add_cancelled(self, future): super(_AllCompletedWaiter, self).add_cancelled(future) self._decrement_pending_calls() class _AcquireFutures(object): """A context manager that does an ordered acquire of Future conditions.""" def __init__(self, futures): self.futures = sorted(futures, key=id) def __enter__(self): for future in self.futures: future._condition.acquire() def __exit__(self, *args): for future in self.futures: future._condition.release() def _create_and_install_waiters(fs, return_when): if return_when == _AS_COMPLETED: waiter = _AsCompletedWaiter() elif return_when == FIRST_COMPLETED: waiter = _FirstCompletedWaiter() else: pending_count = sum( f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs) if return_when == FIRST_EXCEPTION: waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True) elif return_when == ALL_COMPLETED: waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False) else: raise ValueError("Invalid return condition: %r" % return_when) for f in fs: f._waiters.append(waiter) return waiter def _yield_finished_futures(fs, waiter, ref_collect): """ Iterate on the list *fs*, yielding finished futures one by one in reverse order. Before yielding a future, *waiter* is removed from its waiters and the future is removed from each set in the collection of sets *ref_collect*. The aim of this function is to avoid keeping stale references after the future is yielded and before the iterator resumes. """ while fs: f = fs[-1] for futures_set in ref_collect: futures_set.remove(f) with f._condition: f._waiters.remove(waiter) del f # Careful not to keep a reference to the popped value yield fs.pop() def as_completed(fs, timeout=None): """An iterator over the given futures that yields each as it completes. Args: fs: The sequence of Futures (possibly created by different Executors) to iterate over. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. Returns: An iterator that yields the given Futures as they complete (finished or cancelled). If any given Futures are duplicated, they will be returned once. Raises: TimeoutError: If the entire result iterator could not be generated before the given timeout. """ if timeout is not None: end_time = timeout + time.time() fs = set(fs) total_futures = len(fs) with _AcquireFutures(fs): finished = set( f for f in fs if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) pending = fs - finished waiter = _create_and_install_waiters(fs, _AS_COMPLETED) finished = list(finished) try: for f in _yield_finished_futures(finished, waiter, (fs,)): f = [f] yield f.pop() while pending: if timeout is None: wait_timeout = None else: wait_timeout = end_time - time.time() if wait_timeout < 0: raise TimeoutError( '%d (of %d) futures unfinished' % ( len(pending), total_futures)) waiter.event.wait(wait_timeout) with waiter.lock: finished = waiter.finished_futures waiter.finished_futures = [] waiter.event.clear() # reverse to keep finishing order finished.reverse() for f in _yield_finished_futures(finished, waiter, (fs, pending)): f = [f] yield f.pop() finally: # Remove waiter from unfinished futures for f in fs: with f._condition: f._waiters.remove(waiter) DoneAndNotDoneFutures = collections.namedtuple( 'DoneAndNotDoneFutures', 'done not_done') def wait(fs, timeout=None, return_when=ALL_COMPLETED): """Wait for the futures in the given sequence to complete. Args: fs: The sequence of Futures (possibly created by different Executors) to wait upon. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. return_when: Indicates when this function should return. The options are: FIRST_COMPLETED - Return when any future finishes or is cancelled. FIRST_EXCEPTION - Return when any future finishes by raising an exception. If no future raises an exception then it is equivalent to ALL_COMPLETED. ALL_COMPLETED - Return when all futures finish or are cancelled. Returns: A named 2-tuple of sets. The first set, named 'done', contains the futures that completed (is finished or cancelled) before the wait completed. The second set, named 'not_done', contains uncompleted futures. """ with _AcquireFutures(fs): done = set(f for f in fs if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) not_done = set(fs) - done if (return_when == FIRST_COMPLETED) and done: return DoneAndNotDoneFutures(done, not_done) elif (return_when == FIRST_EXCEPTION) and done: if any(f for f in done if not f.cancelled() and f.exception() is not None): return DoneAndNotDoneFutures(done, not_done) if len(done) == len(fs): return DoneAndNotDoneFutures(done, not_done) waiter = _create_and_install_waiters(fs, return_when) waiter.event.wait(timeout) for f in fs: with f._condition: f._waiters.remove(waiter) done.update(waiter.finished_futures) return DoneAndNotDoneFutures(done, set(fs) - done) class Future(object): """Represents the result of an asynchronous computation.""" def __init__(self): """Initializes the future. Should not be called by clients.""" self._condition = threading.Condition() self._state = PENDING self._result = None self._exception = None self._waiters = [] self._done_callbacks = [] def _invoke_callbacks(self): for callback in self._done_callbacks: try: callback(self) except Exception: LOGGER.exception('exception calling callback for %r', self) except BaseException: raise except: # old-style exception objects LOGGER.exception('exception calling callback for %r', self) def __repr__(self): with self._condition: if self._state == FINISHED: if self._exception: return '<%s at %#x state=%s raised %s>' % ( self.__class__.__name__, id(self), _STATE_TO_DESCRIPTION_MAP[self._state], self._exception.__class__.__name__) else: return '<%s at %#x state=%s returned %s>' % ( self.__class__.__name__, id(self), _STATE_TO_DESCRIPTION_MAP[self._state], self._result.__class__.__name__) return '<%s at %#x state=%s>' % ( self.__class__.__name__, id(self), _STATE_TO_DESCRIPTION_MAP[self._state]) def cancel(self): """Cancel the future if possible. Returns True if the future was cancelled, False otherwise. A future cannot be cancelled if it is running or has already completed. """ with self._condition: if self._state in [RUNNING, FINISHED]: return False if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: return True self._state = CANCELLED self._condition.notify_all() self._invoke_callbacks() return True def cancelled(self): """Return True if the future was cancelled.""" with self._condition: return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED] def running(self): """Return True if the future is currently executing.""" with self._condition: return self._state == RUNNING def done(self): """Return True of the future was cancelled or finished executing.""" with self._condition: return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED] def __get_result(self): if self._exception: raise self._exception else: return self._result def add_done_callback(self, fn): """Attaches a callable that will be called when the future finishes. Args: fn: A callable that will be called with this future as its only argument when the future completes or is cancelled. The callable will always be called by a thread in the same process in which it was added. If the future has already completed or been cancelled then the callable will be called immediately. These callables are called in the order that they were added. """ with self._condition: if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]: self._done_callbacks.append(fn) return try: fn(self) except Exception: LOGGER.exception('exception calling callback for %r', self) except BaseException: raise except: # old-style exception objects LOGGER.exception('exception calling callback for %r', self) def result(self, timeout=None): """Return the result of the call that the future represents. Args: timeout: The number of seconds to wait for the result if the future isn't done. If None, then there is no limit on the wait time. Returns: The result of the call that the future represents. Raises: CancelledError: If the future was cancelled. TimeoutError: If the future didn't finish executing before the given timeout. Exception: If the call raised then that exception will be raised. """ with self._condition: if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: raise CancelledError() elif self._state == FINISHED: return self.__get_result() self._condition.wait(timeout) if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: raise CancelledError() elif self._state == FINISHED: return self.__get_result() else: raise TimeoutError() def exception(self, timeout=None): """Return the exception raised by the call that the future represents. Args: timeout: The number of seconds to wait for the exception if the future isn't done. If None, then there is no limit on the wait time. Returns: The exception raised by the call that the future represents or None if the call completed without raising. Raises: CancelledError: If the future was cancelled. TimeoutError: If the future didn't finish executing before the given timeout. """ with self._condition: if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: raise CancelledError() elif self._state == FINISHED: return self._exception self._condition.wait(timeout) if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: raise CancelledError() elif self._state == FINISHED: return self._exception else: raise TimeoutError() # The following methods should only be used by Executors and in tests. def set_running_or_notify_cancel(self): """Mark the future as running or process any cancel notifications. Should only be used by Executor implementations and unit tests. If the future has been cancelled (cancel() was called and returned True) then any threads waiting on the future completing (though calls to as_completed() or wait()) are notified and False is returned. If the future was not cancelled then it is put in the running state (future calls to running() will return True) and True is returned. This method should be called by Executor implementations before executing the work associated with this future. If this method returns False then the work should not be executed. Returns: False if the Future was cancelled, True otherwise. Raises: RuntimeError: if this method was already called or if set_result() or set_exception() was called. """ with self._condition: if self._state == CANCELLED: self._state = CANCELLED_AND_NOTIFIED for waiter in self._waiters: waiter.add_cancelled(self) # self._condition.notify_all() is not necessary because # self.cancel() triggers a notification. return False elif self._state == PENDING: self._state = RUNNING return True else: LOGGER.critical('Future %s in unexpected state: %s', id(self), self._state) raise RuntimeError('Future in unexpected state') def set_result(self, result): """Sets the return value of work associated with the future. Should only be used by Executor implementations and unit tests. """ with self._condition: if self._state in set([CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]): raise InvalidStateError('{}: {!r}'.format(self._state, self)) self._result = result self._state = FINISHED for waiter in self._waiters: waiter.add_result(self) self._condition.notify_all() self._invoke_callbacks() def set_exception(self, exception): """Sets the result of the future as being the given exception. Should only be used by Executor implementations and unit tests. """ with self._condition: if self._state in set([CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]): raise InvalidStateError('{}: {!r}'.format(self._state, self)) self._exception = exception self._state = FINISHED for waiter in self._waiters: waiter.add_exception(self) self._condition.notify_all() self._invoke_callbacks() class Executor(object): """This is an abstract base class for concrete asynchronous executors.""" def submit(self, fn, *args, **kwargs): """Submits a callable to be executed with the given arguments. Schedules the callable to be executed as fn(*args, **kwargs) and returns a Future instance representing the execution of the callable. Returns: A Future representing the given call. """ raise NotImplementedError() def map(self, fn, *iterables, **kwargs): """Returns an iterator equivalent to map(fn, iter). Args: fn: A callable that will take as many arguments as there are passed iterables. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. chunksize: The size of the chunks the iterable will be broken into before being passed to a child process. This argument is only used by ProcessPoolExecutor; it is ignored by ThreadPoolExecutor. Returns: An iterator equivalent to: map(func, *iterables) but the calls may be evaluated out-of-order. Raises: TimeoutError: If the entire result iterator could not be generated before the given timeout. Exception: If fn(*args) raises for any values. """ timeout = kwargs.get('timeout') if timeout is not None: end_time = timeout + time.time() fs = [self.submit(fn, *args) for args in _zip(*iterables)] # Yield must be hidden in closure so that the futures are submitted # before the first iterator value is required. def result_iterator(): try: # reverse to keep finishing order fs.reverse() while fs: # Careful not to keep a reference to the popped future if timeout is None: yield fs.pop().result() else: yield fs.pop().result(end_time - time.time()) finally: for future in fs: future.cancel() return result_iterator() def shutdown(self, wait=True, cancel_futures=False): """Clean-up the resources associated with the Executor. It is safe to call this method several times. Otherwise, no other methods can be called after this one. Args: wait: If True then shutdown will not return until all running futures have finished executing and the resources used by the executor have been reclaimed. cancel_futures: If True then shutdown will cancel all pending futures. Futures that are completed or running will not be cancelled. """ pass def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.shutdown(wait=True) return False class BrokenExecutor(RuntimeError): """ Raised when a executor has become non-functional after a severe failure. """ mpi4py-3.1.6/src/mpi4py/futures/_core.py000066400000000000000000000024131460670727200201250ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com # pylint: disable=unused-import # pylint: disable=redefined-builtin # pylint: disable=missing-module-docstring try: from concurrent.futures import ( FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED, CancelledError, TimeoutError, Future, Executor, wait, as_completed, ) try: # Python 3.7 from concurrent.futures import BrokenExecutor except ImportError: # pragma: no cover class BrokenExecutor(RuntimeError): """The executor has become non-functional.""" try: # Python 3.8 from concurrent.futures import InvalidStateError except ImportError: # pragma: no cover # pylint: disable=too-few-public-methods # pylint: disable=useless-object-inheritance class InvalidStateError(CancelledError.__base__): """The operation is not allowed in this state.""" except ImportError: # pragma: no cover from ._base import ( FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED, CancelledError, TimeoutError, InvalidStateError, BrokenExecutor, Future, Executor, wait, as_completed, ) mpi4py-3.1.6/src/mpi4py/futures/_core.pyi000066400000000000000000000012531460670727200202770ustar00rootroot00000000000000import sys from concurrent.futures import ( FIRST_COMPLETED as FIRST_COMPLETED, FIRST_EXCEPTION as FIRST_EXCEPTION, ALL_COMPLETED as ALL_COMPLETED, CancelledError as CancelledError, TimeoutError as TimeoutError, Future as Future, Executor as Executor, wait as wait, as_completed as as_completed, ) if sys.version_info >= (3, 7): from concurrent.futures import BrokenExecutor as BrokenExecutor else: class BrokenExecutor(RuntimeError): ... if sys.version_info >= (3, 8): from concurrent.futures import InvalidStateError as InvalidStateError else: from concurrent.futures._base import Error class InvalidStateError(Error): ... mpi4py-3.1.6/src/mpi4py/futures/_lib.py000066400000000000000000000706311460670727200177520ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Management of MPI worker processes.""" # pylint: disable=broad-except # pylint: disable=too-many-lines # pylint: disable=protected-access # pylint: disable=missing-docstring # pylint: disable=import-outside-toplevel import os import sys import time import atexit import weakref import warnings import itertools import threading import collections from .. import MPI from ._core import BrokenExecutor # --- def serialized(function): if serialized.lock is None: return function def wrapper(*args, **kwargs): with serialized.lock: return function(*args, **kwargs) return wrapper serialized.lock = None # type: ignore[attr-defined] def setup_mpi_threads(): with setup_mpi_threads.lock: thread_level = setup_mpi_threads.thread_level if thread_level is None: thread_level = MPI.Query_thread() setup_mpi_threads.thread_level = thread_level if thread_level < MPI.THREAD_MULTIPLE: serialized.lock = threading.Lock() if thread_level < MPI.THREAD_SERIALIZED: warnings.warn("The level of thread support in MPI " "should be at least MPI_THREAD_SERIALIZED", RuntimeWarning, 2) setup_mpi_threads.lock = threading.Lock() # type: ignore[attr-defined] setup_mpi_threads.thread_level = None # type: ignore[attr-defined] # --- if sys.version_info[0] >= 3: def sys_exception(): exc = sys.exc_info()[1] exc.__traceback__ = None return exc else: # pragma: no cover def sys_exception(): exc = sys.exc_info()[1] return exc def os_environ_get(name, default=None): varname = 'MPI4PY_FUTURES_{}'.format(name) if varname not in os.environ: oldname = 'MPI4PY_{}'.format(name) if oldname in os.environ: # pragma: no cover message = "Environment variable {} is deprecated, use {}" warnings.warn(message.format(oldname, varname), DeprecationWarning) return os.environ[oldname] return os.environ.get(varname, default) # --- BACKOFF = 0.001 class Backoff: def __init__(self, seconds=BACKOFF): self.tval = 0.0 self.tmax = max(float(seconds), 0.0) self.tmin = self.tmax / (1 << 10) def reset(self): self.tval = 0.0 def sleep(self): time.sleep(self.tval) self.tval = min(self.tmax, max(self.tmin, self.tval * 2)) class Queue(collections.deque): put = collections.deque.append pop = collections.deque.popleft add = collections.deque.appendleft class Stack(collections.deque): put = collections.deque.append pop = collections.deque.pop THREADS_QUEUES = weakref.WeakKeyDictionary() # type: weakref.WeakKeyDictionary def join_threads(threads_queues=THREADS_QUEUES): items = list(threads_queues.items()) for _, queue in items: # pragma: no cover queue.put(None) for thread, _ in items: # pragma: no cover thread.join() try: threading._register_atexit(join_threads) # type: ignore[attr-defined] except AttributeError: # pragma: no cover atexit.register(join_threads) class Pool: def __init__(self, executor, manager, *args): self.size = None self.event = threading.Event() self.queue = queue = Queue() self.exref = weakref.ref(executor, lambda _, q=queue: q.put(None)) args = (self, executor._options) + args thread = threading.Thread(target=manager, args=args) self.thread = thread setup_mpi_threads() try: threading._register_atexit except AttributeError: # pragma: no cover thread.daemon = True thread.start() THREADS_QUEUES[thread] = queue def wait(self): self.event.wait() def push(self, item): self.queue.put(item) def done(self): self.queue.put(None) def join(self): self.thread.join() def setup(self, size): self.size = size self.event.set() return self.queue def cancel(self, handler=None): queue = self.queue while True: try: item = queue.pop() except LookupError: break if item is None: queue.put(None) break future, _ = item if handler: handler(future) else: future.cancel() del item, future def broken(self, message): lock = None executor = self.exref() if executor is not None: executor._broken = message if not executor._shutdown: lock = executor._lock def handler(future): if future.set_running_or_notify_cancel(): exception = BrokenExecutor(message) future.set_exception(exception) self.event.set() if lock: lock.acquire() try: self.cancel(handler) finally: if lock: lock.release() def initialize(options): initializer = options.pop('initializer', None) initargs = options.pop('initargs', ()) initkwargs = options.pop('initkwargs', {}) if initializer is not None: try: initializer(*initargs, **initkwargs) return True except BaseException: return False return True def _manager_thread(pool, options): size = options.pop('max_workers', 1) queue = pool.setup(size) def init(): if not initialize(options): pool.broken("initializer failed") return False return True def worker(): backoff = Backoff(options.get('backoff', BACKOFF)) if not init(): queue.put(None) return while True: try: item = queue.pop() backoff.reset() except LookupError: backoff.sleep() continue if item is None: queue.put(None) break future, task = item if not future.set_running_or_notify_cancel(): continue func, args, kwargs = task try: result = func(*args, **kwargs) future.set_result(result) except BaseException: exception = sys_exception() future.set_exception(exception) del item, future threads = [threading.Thread(target=worker) for _ in range(size - 1)] for thread in threads: thread.start() worker() for thread in threads: thread.join() queue.pop() def _manager_comm(pool, options, comm, full=True): assert comm != MPI.COMM_NULL serialized(client_sync)(comm, options, full) if not client_init(comm, options): pool.broken("initializer failed") serialized(client_close)(comm) return size = comm.Get_remote_size() queue = pool.setup(size) workers = Stack(reversed(range(size))) client_exec(comm, options, 0, workers, queue) serialized(client_close)(comm) def _manager_split(pool, options, comm, root): comm = serialized(comm_split)(comm, root) _manager_comm(pool, options, comm, full=False) def _manager_spawn(pool, options): pyexe = options.pop('python_exe', None) pyargs = options.pop('python_args', None) nprocs = options.pop('max_workers', None) info = options.pop('mpi_info', None) comm = serialized(client_spawn)(pyexe, pyargs, nprocs, info) _manager_comm(pool, options, comm) def _manager_service(pool, options): service = options.pop('service', None) info = options.pop('mpi_info', None) comm = serialized(client_connect)(service, info) _manager_comm(pool, options, comm) def ThreadPool(executor): # pylint: disable=invalid-name return Pool(executor, _manager_thread) def SplitPool(executor, comm, root): # pylint: disable=invalid-name return Pool(executor, _manager_split, comm, root) def SpawnPool(executor): # pylint: disable=invalid-name return Pool(executor, _manager_spawn) def ServicePool(executor): # pylint: disable=invalid-name return Pool(executor, _manager_service) def WorkerPool(executor): # pylint: disable=invalid-name if SharedPool is not None: return SharedPool(executor) if 'service' in executor._options: return ServicePool(executor) else: return SpawnPool(executor) # --- SharedPool = None # pylint: disable=invalid-name def _set_shared_pool(obj): # pylint: disable=invalid-name # pylint: disable=global-statement global SharedPool SharedPool = obj def _manager_shared(pool, options, comm, tag, workers): # pylint: disable=too-many-arguments if tag == 0: serialized(client_sync)(comm, options) if tag == 0: if not client_init(comm, options): pool.broken("initializer failed") return if tag >= 1: if options.get('initializer') is not None: pool.broken("cannot run initializer") return size = comm.Get_remote_size() queue = pool.setup(size) client_exec(comm, options, tag, workers, queue) class SharedPoolCtx: # pylint: disable=too-few-public-methods def __init__(self): self.comm = MPI.COMM_NULL self.on_root = None self.counter = None self.workers = None self.threads = weakref.WeakKeyDictionary() def __call__(self, executor): assert SharedPool is self if self.comm != MPI.COMM_NULL and self.on_root: tag = next(self.counter) args = (self.comm, tag, self.workers) pool = Pool(executor, _manager_shared, *args) else: pool = Pool(executor, _manager_thread) del THREADS_QUEUES[pool.thread] self.threads[pool.thread] = pool.queue return pool def __enter__(self): assert SharedPool is None self.on_root = MPI.COMM_WORLD.Get_rank() == 0 if MPI.COMM_WORLD.Get_size() >= 2: self.comm = comm_split(MPI.COMM_WORLD, root=0) if self.on_root: size = self.comm.Get_remote_size() self.counter = itertools.count(0) self.workers = Stack(reversed(range(size))) _set_shared_pool(self) return self if self.on_root else None def __exit__(self, *args): assert SharedPool is self if self.on_root: join_threads(self.threads) if self.comm != MPI.COMM_NULL: if self.on_root: if next(self.counter) == 0: options = dict(main=False) client_sync(self.comm, options) client_init(self.comm, options) client_close(self.comm) else: options = server_sync(self.comm) server_init(self.comm) server_exec(self.comm, options) server_close(self.comm) if not self.on_root: join_threads(self.threads) _set_shared_pool(None) self.comm = MPI.COMM_NULL self.on_root = None self.counter = None self.workers = None self.threads.clear() return False # --- def barrier(comm): assert comm.Is_inter() try: request = comm.Ibarrier() backoff = Backoff() while not request.Test(): backoff.sleep() except (NotImplementedError, MPI.Exception): # pragma: no cover buf = [None, 0, MPI.BYTE] tag = MPI.COMM_WORLD.Get_attr(MPI.TAG_UB) sendreqs, recvreqs = [], [] for pid in range(comm.Get_remote_size()): recvreqs.append(comm.Irecv(buf, pid, tag)) sendreqs.append(comm.Issend(buf, pid, tag)) backoff = Backoff() while not MPI.Request.Testall(recvreqs): backoff.sleep() MPI.Request.Waitall(sendreqs) def bcast_send(comm, data): assert comm.Is_inter() assert comm.Get_size() == 1 if MPI.VERSION >= 2: comm.bcast(data, MPI.ROOT) else: # pragma: no cover tag = MPI.COMM_WORLD.Get_attr(MPI.TAG_UB) size = comm.Get_remote_size() MPI.Request.Waitall([ comm.issend(data, pid, tag) for pid in range(size)]) def bcast_recv(comm): assert comm.Is_inter() assert comm.Get_remote_size() == 1 if MPI.VERSION >= 2: data = comm.bcast(None, 0) else: # pragma: no cover tag = MPI.COMM_WORLD.Get_attr(MPI.TAG_UB) data = comm.recv(None, 0, tag) return data # --- def client_sync(comm, options, full=True): assert comm.Is_inter() assert comm.Get_size() == 1 barrier(comm) if full: options = _sync_get_data(options) bcast_send(comm, options) def client_init(comm, options): serialized(bcast_send)(comm, _init_get_data(options)) sbuf = bytearray([False]) rbuf = bytearray([False]) serialized(comm.Allreduce)(sbuf, rbuf, op=MPI.LAND) success = bool(rbuf[0]) return success def client_exec(comm, options, tag, worker_pool, task_queue): # pylint: disable=too-many-locals # pylint: disable=too-many-statements assert comm.Is_inter() assert comm.Get_size() == 1 assert tag >= 0 backoff = Backoff(options.get('backoff', BACKOFF)) status = MPI.Status() comm_recv = serialized(comm.recv) comm_isend = serialized(comm.issend) comm_iprobe = serialized(comm.iprobe) request_free = serialized(MPI.Request.Free) pending = {} def iprobe(): pid = MPI.ANY_SOURCE return comm_iprobe(pid, tag, status) def probe(): pid = MPI.ANY_SOURCE backoff.reset() while not comm_iprobe(pid, tag, status): backoff.sleep() def recv(): pid = MPI.ANY_SOURCE try: task = comm_recv(None, pid, tag, status) except BaseException: task = (None, sys_exception()) pid = status.source worker_pool.put(pid) future, request = pending.pop(pid) request_free(request) result, exception = task if exception is None: future.set_result(result) else: future.set_exception(exception) def send(): item = task_queue.pop() if item is None: return True try: pid = worker_pool.pop() except LookupError: # pragma: no cover task_queue.add(item) return False future, task = item if not future.set_running_or_notify_cancel(): worker_pool.put(pid) return False try: request = comm_isend(task, pid, tag) pending[pid] = (future, request) except BaseException: worker_pool.put(pid) future.set_exception(sys_exception()) return None while True: if task_queue and worker_pool: backoff.reset() stop = send() if stop: break if pending and iprobe(): backoff.reset() recv() backoff.sleep() while pending: probe() recv() def client_close(comm): assert comm.Is_inter() MPI.Request.Waitall([ comm.issend(None, dest=pid, tag=0) for pid in range(comm.Get_remote_size())]) try: comm.Disconnect() except NotImplementedError: # pragma: no cover comm.Free() def server_sync(comm, full=True): assert comm.Is_inter() assert comm.Get_remote_size() == 1 barrier(comm) options = bcast_recv(comm) if full: options = _sync_set_data(options) return options def server_init(comm): options = bcast_recv(comm) success = initialize(options) sbuf = bytearray([success]) rbuf = bytearray([True]) comm.Allreduce(sbuf, rbuf, op=MPI.LAND) assert bool(rbuf[0]) is False return success def server_exec(comm, options): assert comm.Is_inter() assert comm.Get_remote_size() == 1 backoff = Backoff(options.get('backoff', BACKOFF)) status = MPI.Status() comm_recv = comm.recv comm_isend = comm.issend comm_iprobe = comm.iprobe request_test = MPI.Request.Test def recv(): pid, tag = MPI.ANY_SOURCE, MPI.ANY_TAG backoff.reset() while not comm_iprobe(pid, tag, status): backoff.sleep() pid, tag = status.source, status.tag try: task = comm_recv(None, pid, tag, status) except BaseException: task = sys_exception() return task def call(task): if isinstance(task, BaseException): return (None, task) func, args, kwargs = task try: result = func(*args, **kwargs) return (result, None) except BaseException: return (None, sys_exception()) def send(task): pid, tag = status.source, status.tag try: request = comm_isend(task, pid, tag) except BaseException: task = (None, sys_exception()) request = comm_isend(task, pid, tag) backoff.reset() while not request_test(request): backoff.sleep() while True: task = recv() if task is None: break task = call(task) send(task) def server_close(comm): try: comm.Disconnect() except NotImplementedError: # pragma: no cover comm.Free() # --- def get_comm_world(): return MPI.COMM_WORLD def comm_split(comm, root=0): assert not comm.Is_inter() assert comm.Get_size() > 1 assert 0 <= root < comm.Get_size() rank = comm.Get_rank() if MPI.Get_version() >= (2, 2): allgroup = comm.Get_group() if rank == root: group = allgroup.Incl([root]) else: group = allgroup.Excl([root]) allgroup.Free() intracomm = comm.Create(group) group.Free() else: # pragma: no cover color = 0 if rank == root else 1 intracomm = comm.Split(color, key=0) if rank == root: local_leader = 0 remote_leader = 0 if root else 1 else: local_leader = 0 remote_leader = root intercomm = intracomm.Create_intercomm( local_leader, comm, remote_leader, tag=0) intracomm.Free() return intercomm # --- MAIN_RUN_NAME = '__worker__' def import_main(mod_name, mod_path, init_globals, run_name): import types import runpy module = types.ModuleType(run_name) if init_globals is not None: module.__dict__.update(init_globals) module.__name__ = run_name class TempModulePatch(runpy._TempModule): # pylint: disable=too-few-public-methods def __init__(self, mod_name): # pylint: disable=no-member super().__init__(mod_name) assert self.module.__name__ == run_name self.module = module TempModule = runpy._TempModule # pylint: disable=invalid-name runpy._TempModule = TempModulePatch import_main.sentinel = (mod_name, mod_path) main_module = sys.modules['__main__'] try: sys.modules['__main__'] = sys.modules[run_name] = module if mod_name: # pragma: no cover runpy.run_module(mod_name, run_name=run_name, alter_sys=True) elif mod_path: # pragma: no branch if not getattr(sys.flags, 'isolated', 0): # pragma: no branch sys.path[0] = os.path.realpath(os.path.dirname(mod_path)) runpy.run_path(mod_path, run_name=run_name) sys.modules['__main__'] = sys.modules[run_name] = module except: # pragma: no cover sys.modules['__main__'] = main_module raise finally: del import_main.sentinel runpy._TempModule = TempModule def _sync_get_data(options): main = sys.modules['__main__'] sys.modules.setdefault(MAIN_RUN_NAME, main) import_main_module = options.pop('main', True) data = options.copy() data.pop('initializer', None) data.pop('initargs', None) data.pop('initkwargs', None) if import_main_module: if sys.version_info[0] >= 3: spec = getattr(main, '__spec__', None) name = getattr(spec, 'name', None) else: # pragma: no cover loader = getattr(main, '__loader__', None) name = getattr(loader, 'fullname', None) path = getattr(main, '__file__', None) if name is not None: # pragma: no cover data['@main:mod_name'] = name if path is not None: # pragma: no branch data['@main:mod_path'] = path return data def _sync_set_data(data): if 'path' in data: sys.path.extend(data.pop('path')) if 'wdir' in data: os.chdir(data.pop('wdir')) if 'env' in data: os.environ.update(data.pop('env')) mod_name = data.pop('@main:mod_name', None) mod_path = data.pop('@main:mod_path', None) mod_glbs = data.pop('globals', None) import_main(mod_name, mod_path, mod_glbs, MAIN_RUN_NAME) return data def _init_get_data(options): keys = ('initializer', 'initargs', 'initkwargs') vals = (None, (), {}) data = dict((k, options.pop(k, v)) for k, v in zip(keys, vals)) return data # --- def _check_recursive_spawn(): # pragma: no cover if not hasattr(import_main, 'sentinel'): return main_name, main_path = import_main.sentinel main_info = "\n" if main_name is not None: main_info += " main name: '{}'\n".format(main_name) if main_path is not None: main_info += " main path: '{}'\n".format(main_path) main_info += "\n" sys.stderr.write(""" The main script or module attempted to spawn new MPI worker processes. This probably means that you have forgotten to use the proper idiom in your main script or module: if __name__ == '__main__': ... This error is unrecoverable. The MPI execution environment had to be aborted. The name/path of the offending main script/module follows: """ + main_info) sys.stderr.flush() time.sleep(1) MPI.COMM_WORLD.Abort(1) FLAG_OPT_MAP = { # Python 3 'inspect': 'i', 'interactive': 'i', 'debug': 'd', 'optimize': 'O', 'no_user_site': 's', 'no_site': 'S', 'isolated': 'I', 'ignore_environment': 'E', 'dont_write_bytecode': 'B', 'hash_randomization': 'R', 'verbose': 'v', 'quiet': 'q', 'bytes_warning': 'b', # 'dev_mode': 'Xdev', # 'utf8_mode': 'Xutf8', # 'warn_default_encoding': 'Xwarn_default_encoding', # Python 2 'division_warning': 'Qwarn', 'division_new': 'Qnew', 'py3k_warning': '3', 'tabcheck': 't', 'unicode': 'U', } def get_python_flags(): args = [] for flag, opt in FLAG_OPT_MAP.items(): val = getattr(sys.flags, flag, 0) val = val if opt[0] != 'i' else 0 val = val if opt[0] != 'Q' else min(val, 1) if val > 0: args.append('-' + opt * val) for opt in sys.warnoptions: # pragma: no cover args.append('-W' + opt) sys_xoptions = getattr(sys, '_xoptions', {}) for opt, val in sys_xoptions.items(): # pragma: no cover args.append('-X' + opt if val is True else '-X' + opt + '=' + val) return args def get_max_workers(): max_workers = os_environ_get('MAX_WORKERS') if max_workers is not None: return int(max_workers) if MPI.UNIVERSE_SIZE != MPI.KEYVAL_INVALID: # pragma: no branch universe_size = MPI.COMM_WORLD.Get_attr(MPI.UNIVERSE_SIZE) if universe_size is not None: # pragma: no cover world_size = MPI.COMM_WORLD.Get_size() return max(universe_size - world_size, 1) return 1 def get_spawn_module(): return __spec__.parent + '.server' def client_spawn(python_exe=None, python_args=None, max_workers=None, mpi_info=None): _check_recursive_spawn() if python_exe is None: python_exe = sys.executable if python_args is None: python_args = [] if max_workers is None: max_workers = get_max_workers() if mpi_info is None: mpi_info = dict(soft='1:{}'.format(max_workers)) args = get_python_flags() + list(python_args) args.extend(['-m', get_spawn_module()]) info = MPI.Info.Create() info.update(mpi_info) comm = MPI.COMM_SELF.Spawn(python_exe, args, max_workers, info) info.Free() return comm # --- SERVICE = __spec__.parent SERVER_HOST = 'localhost' SERVER_BIND = '' SERVER_PORT = 31415 def get_service(): return os_environ_get('SERVICE', SERVICE) def get_server_host(): return os_environ_get('SERVER_HOST', SERVER_HOST) def get_server_bind(): return os_environ_get('SERVER_BIND', SERVER_BIND) def get_server_port(): return int(os_environ_get('SERVER_PORT', SERVER_PORT)) def client_lookup(address): from socket import socket host, port = address host = host or get_server_host() port = port or get_server_port() address = (host, int(port)) sock = socket() sock.connect(address) try: fdes = sock.fileno() # pylint: disable=no-member peer = MPI.Comm.Join(fdes) finally: sock.close() mpi_port = peer.recv(None, 0) peer.Disconnect() return mpi_port def server_publish(address, mpi_port): from socket import socket from socket import SOL_SOCKET, SO_REUSEADDR host, port = address host = host or get_server_bind() port = port or get_server_port() address = (host, int(port)) serversock = socket() serversock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) serversock.bind(address) serversock.listen(0) try: sock = serversock.accept()[0] finally: serversock.close() try: fdes = sock.fileno() # pylint: disable=no-member peer = MPI.Comm.Join(fdes) finally: sock.close() peer.send(mpi_port, 0) peer.Disconnect() def client_connect(service, mpi_info=None): info = MPI.INFO_NULL if mpi_info: info = MPI.Info.Create() info.update(mpi_info) if not isinstance(service, (list, tuple)): service = service or get_service() port = MPI.Lookup_name(service, info) else: port = client_lookup(service) comm = MPI.COMM_SELF.Connect(port, info, root=0) if info != MPI.INFO_NULL: info.Free() return comm def server_accept(service, mpi_info=None, root=0, comm=MPI.COMM_WORLD): assert not comm.Is_inter() assert 0 <= root < comm.Get_size() info = MPI.INFO_NULL if comm.Get_rank() == root: if mpi_info: info = MPI.Info.Create() info.update(mpi_info) port = None if comm.Get_rank() == root: port = MPI.Open_port(info) if comm.Get_rank() == root: if not isinstance(service, (list, tuple)): service = service or get_service() MPI.Publish_name(service, port, info) else: server_publish(service, port) service = None comm = comm.Accept(port, info, root) if port is not None: if service is not None: MPI.Unpublish_name(service, port, info) MPI.Close_port(port) if info != MPI.INFO_NULL: info.Free() return comm # --- def server_main_comm(comm, full=True): assert comm != MPI.COMM_NULL options = server_sync(comm, full) server_init(comm) server_exec(comm, options) server_close(comm) def server_main_split(comm, root): comm = comm_split(comm, root) server_main_comm(comm, full=False) def server_main_spawn(): comm = MPI.Comm.Get_parent() server_main_comm(comm) def server_main_service(): from getopt import getopt longopts = ['bind=', 'port=', 'service=', 'info='] optlist, _ = getopt(sys.argv[1:], '', longopts) optdict = {opt[2:]: val for opt, val in optlist} if 'bind' in optdict or 'port' in optdict: bind = optdict.get('bind') or get_server_bind() port = optdict.get('port') or get_server_port() service = (bind, int(port)) else: service = optdict.get('service') or get_service() info = optdict.get('info', '').split(',') info = dict(k_v.split('=', 1) for k_v in info if k_v) comm = server_accept(service, info) server_main_comm(comm) def server_main(): from ..run import set_abort_status try: comm = MPI.Comm.Get_parent() if comm != MPI.COMM_NULL: server_main_spawn() else: server_main_service() except: set_abort_status(1) raise # --- mpi4py-3.1.6/src/mpi4py/futures/_lib.pyi000066400000000000000000000117761460670727200201300ustar00rootroot00000000000000import weakref import threading from ..MPI import Info, Intracomm, Intercomm from ._core import Executor, Future from typing import Any, Optional, Union, Generic, TypeVar from typing import Callable, Iterable, Iterator, Sequence, Mapping from typing import List, Tuple, Dict _T = TypeVar("_T") _Task = Tuple[Callable[..., _T], Tuple, Dict[str, Any]] _Item = Tuple[Future[_T], _Task[_T]] _Info = Union[Info, Mapping[str, str], Iterable[Tuple[str, str]]] def serialized(function: Callable[..., Any]) -> Callable[..., Any]: ... def setup_mpi_threads() -> None: ... def sys_exception() -> BaseException: ... def os_environ_get(name: str, default: Optional[_T] = None) -> Union[str, Optional[_T]]: ... BACKOFF: float = ... class Backoff: tval: float tmax: float tmin: float def __init__(self, seconds: float = BACKOFF) -> None: ... def reset(self) -> None: ... def sleep(self) -> None: ... class Queue(Generic[_T]): def put(self, x: _T) -> None: ... def pop(self) -> _T: ... def add(self, x: _T) -> None: ... class Stack(Generic[_T]): def put(self, x: _T) -> None: ... def pop(self) -> _T: ... _WeakKeyDict = weakref.WeakKeyDictionary _ThreadQueueMap = _WeakKeyDict[threading.Thread, Queue[Optional[_Item[Any]]]] THREADS_QUEUES: _ThreadQueueMap = ... def join_threads(threads_queues: _ThreadQueueMap = ...) -> None: ... class Pool: size: int queue: Queue[Optional[_Item[Any]]] exref: weakref.ReferenceType[Executor] event: threading.Event thread: threading.Thread def __init__(self, executor: Executor, manager: Callable[..., None], *args: Any, ) -> None: ... def wait(self) -> None: ... def push(self, item: _Item[Any]) -> None: ... def done(self) -> None: ... def join(self) -> None: ... def setup(self, size: int) -> Queue: ... def cancel(self, handler: Optional[Callable[[Future], None]] = None) -> None: ... def broken(self, message: str) -> None: ... def initialize(options: Mapping[str, Any]) -> bool: ... def ThreadPool(executor: Executor) -> Pool: ... def SplitPool(executor: Executor, comm: Intracomm, root: int) -> Pool: ... def SpawnPool(executor: Executor) -> Pool: ... def ServicePool(executor: Executor) -> Pool: ... def WorkerPool(executor: Executor) -> Pool: ... SharedPool: Optional[Callable[[Executor], Pool]] = None class SharedPoolCtx: comm: Intercomm on_root: Optional[bool] counter: Iterator[int] workers: Stack[int] threads: _ThreadQueueMap def __init__(self) -> None: ... def __call__(self, executor: Executor) -> Pool: ... def __enter__(self) -> Optional[SharedPoolCtx]: ... def __exit__(self, *args: Any) -> bool: ... def barrier(comm: Intercomm) -> None: ... def bcast_send(comm: Intercomm, data: Any) -> None: ... def bcast_recv(comm: Intercomm) -> Any: ... def client_sync(comm: Intercomm, options: Any, full: bool = True) -> None: ... def client_init(comm: Intercomm, options: Any) -> bool: ... def client_exec(comm: Intercomm, options: Any, tag: int, worker_pool: Stack[int], task_queue: Queue[Optional[_Item[Any]]], ) -> None: ... def client_close(comm: Intercomm) -> None: ... def server_sync(comm: Intercomm, full: bool = True) -> Any: ... def server_init(comm: Intercomm) -> bool: ... def server_exec(comm: Intercomm, options: Any) -> None: ... def server_close(comm: Intercomm) -> None: ... def get_comm_world() -> Intracomm: ... def comm_split(comm: Intracomm, root: int = 0) -> Intercomm: ... MAIN_RUN_NAME: str = ... def import_main(mod_name: str, mod_path: str, init_globals: Optional[Dict[str, Any]], run_name: str, ) -> None: ... FLAG_OPT_MAP: Dict[str, str] def get_python_flags() -> List[str]: ... def get_max_workers() -> int: ... def get_spawn_module() -> str: ... def client_spawn(python_exe: Optional[str] = ..., python_args: Optional[Sequence[str]] = ..., max_workers: Optional[int] = ..., mpi_info: Optional[_Info] = ..., ) -> Intercomm: ... SERVICE: str = ... SERVER_HOST: str = ... SERVER_BIND: str = ... SERVER_PORT: int = ... def get_service() -> str: ... def get_server_host() -> str: ... def get_server_bind() -> str: ... def get_server_port() -> int: ... _Address = Tuple[Optional[str], Optional[int]] def client_lookup(address: _Address): ... def server_publish(address: _Address, mpi_port: str) -> None: ... def client_connect(service: Union[str, _Address], mpi_info: Optional[_Info] = ..., ) -> Intercomm: ... def server_accept(service: Union[str, _Address], mpi_info: Optional[_Info] = ..., root: int = ..., comm: Intracomm = ..., ) -> Intercomm: ... def server_main_comm(comm: Intercomm, full: bool = True) -> None: ... def server_main_split(comm: Intracomm, root: int) -> None: ... def server_main_spawn() -> None: ... def server_main_service() -> None: ... def server_main() -> None: ... mpi4py-3.1.6/src/mpi4py/futures/aplus.py000066400000000000000000000114451460670727200201670ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Support for Future chaining.""" # pylint: disable=broad-except # This implementation is heavily inspired in code written by # Daniel Dotsenko [@dvdotsenko] [dotsa at hotmail.com] # https://github.com/dvdotsenko/python-future-then import sys import weakref import threading import functools from ._core import Future class ThenableFuture(Future): """*Thenable* `Future` subclass.""" def then(self, on_success=None, on_failure=None): """Return ``then(self, on_success, on_failure)``.""" return then(self, on_success, on_failure) def catch(self, on_failure=None): """Return ``catch(self, on_failure)``.""" return catch(self, on_failure) def then(future, on_success=None, on_failure=None): """JavaScript-like (`Promises/A+`_) support for Future chaining. Args: future: Input future instance. on_success (optional): Function to be called when the input future is successfully resolved. Once the input future is resolved, its result value is the input for `on_success`. If `on_success` is ``None``, the output future (i.e., the one returned by this function) will be resolved directly with the result of the input future. If `on_success` returns a future instance, the result future is chained to the output future. Otherwise, the result of `on_success` is used to resolve the output future. on_failure (optional): Function to be called when the input future is rejected. The Exception instance picked up from the rejected future is the input value for `on_failure`. If `on_failure` is ``None``, the output future (i.e., the one returned by this function) will be rejected directly with the exception of the input future. Returns: Output future to be resolved once in input future is resolved and either `on_success` or `on_failure` completes. .. _Promises/A+: https://promisesaplus.com/ """ new_future = future.__class__() done_cb = functools.partial( _done_cb, new_future, on_success=on_success, on_failure=on_failure) future.add_done_callback(done_cb) return new_future def catch(future, on_failure=None): """Close equivalent to ``then(future, None, on_failure)``. Args: future: Input future instance. on_failure (optional): Function to be called when the input future is rejected. If `on_failure` is ``None``, the output future will be resolved with ``None`` thus ignoring the exception. """ if on_failure is None: return then(future, None, lambda exc: None) return then(future, None, on_failure) def _chain_log(new_future, future): with _chain_log.lock: registry = _chain_log.registry try: log = registry[new_future] except KeyError: log = weakref.WeakSet() registry[new_future] = log if future in log: raise RuntimeError( "Circular future chain detected: " "Future {0} is already in the resolved chain {1}" .format(future, set(log))) log.add(future) _chain_log.lock = threading.Lock() # type: ignore[attr-defined] _chain_log.registry = weakref.WeakKeyDictionary() # type: ignore[attr-defined] def _chain_future(new_future, future): _chain_log(new_future, future) done_cb = functools.partial(_done_cb, new_future) future.add_done_callback(done_cb) if sys.version_info[0] == 2: # pragma: no cover def _sys_exception(): exc = sys.exc_info()[1] return exc else: # pragma: no cover def _sys_exception(): exc = sys.exc_info()[1] exc.__traceback__ = None return exc def _done_cb(new_future, future, on_success=None, on_failure=None): if not future.done(): new_future.cancel() return if future.cancelled(): new_future.cancel() return try: result = future.result() if on_success: result = on_success(result) if isinstance(result, Future): _chain_future(new_future, result) else: new_future.set_result(result) except BaseException: exception = _sys_exception() if not on_failure: new_future.set_exception(exception) else: try: result = on_failure(exception) if isinstance(result, BaseException): new_future.set_exception(result) else: new_future.set_result(result) except BaseException: exception = _sys_exception() new_future.set_exception(exception) mpi4py-3.1.6/src/mpi4py/futures/aplus.pyi000066400000000000000000000015411460670727200203340ustar00rootroot00000000000000from ._core import Future from typing import Callable, Generic, Optional, TypeVar, Union _T = TypeVar("_T") class ThenableFuture(Future[_T], Generic[_T]): def then(self, on_success: Optional[Callable[[_T], Union[_T, Future[_T]]]] = None, on_failure: Optional[Callable[[BaseException], Union[_T, BaseException]]] = None, ) -> ThenableFuture[_T]: ... def catch(self, on_failure: Optional[Callable[[BaseException], Union[_T, BaseException]]] = None, ) -> ThenableFuture[_T]: ... def then(future: Future[_T], on_success: Optional[Callable[[_T], Union[_T, Future[_T]]]] = None, on_failure: Optional[Callable[[BaseException], Union[_T, BaseException]]] = None, ) -> Future[_T]: ... def catch(future: Future[_T], on_failure: Optional[Callable[[BaseException], Union[_T, BaseException]]] = None, ) -> Future[_T]: ... mpi4py-3.1.6/src/mpi4py/futures/pool.py000066400000000000000000000307741460670727200200220ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Implements MPIPoolExecutor.""" import time import functools import itertools import threading from ._core import Future from ._core import Executor from ._core import as_completed from . import _lib class MPIPoolExecutor(Executor): """MPI-based asynchronous executor.""" Future = Future def __init__(self, max_workers=None, initializer=None, initargs=(), **kwargs): """Initialize a new MPIPoolExecutor instance. Args: max_workers: The maximum number of MPI processes that can be used to execute the given calls. If ``None`` or not given then the number of worker processes will be determined from the MPI universe size attribute if defined, otherwise a single worker process will be spawned. initializer: An callable used to initialize workers processes. initargs: A tuple of arguments to pass to the initializer. Keyword Args: python_exe: Path to Python executable used to spawn workers. python_args: Command line arguments to pass to Python executable. mpi_info: Dict or iterable with ``(key, value)`` pairs. globals: Dict or iterable with global variables to set in workers. main: If ``False``, do not import ``__main__`` in workers. path: List of paths to append to ``sys.path`` in workers. wdir: Path to set current working directory in workers. env: Environment variables to update ``os.environ`` in workers. """ if max_workers is not None: max_workers = int(max_workers) if max_workers <= 0: raise ValueError("max_workers must be greater than 0") kwargs['max_workers'] = max_workers if initializer is not None: if not callable(initializer): raise TypeError("initializer must be a callable") kwargs['initializer'] = initializer kwargs['initargs'] = initargs self._options = kwargs self._shutdown = False self._broken = None self._lock = threading.Lock() self._pool = None _make_pool = staticmethod(_lib.WorkerPool) def _bootstrap(self): if self._pool is None: self._pool = self._make_pool(self) @property def _max_workers(self): with self._lock: if self._broken: return None if self._shutdown: return None self._bootstrap() self._pool.wait() return self._pool.size def bootup(self, wait=True): """Allocate executor resources eagerly. Args: wait: If ``True`` then bootup will not return until the executor resources are ready to process submissions. """ with self._lock: if self._shutdown: raise RuntimeError("cannot bootup after shutdown") self._bootstrap() if wait: self._pool.wait() return self def submit(self, fn, *args, **kwargs): """Submit a callable to be executed with the given arguments. Schedule the callable to be executed as ``fn(*args, **kwargs)`` and return a `Future` instance representing the execution of the callable. Returns: A `Future` representing the given call. """ # pylint: disable=arguments-differ with self._lock: if self._broken: raise _lib.BrokenExecutor(self._broken) if self._shutdown: raise RuntimeError("cannot submit after shutdown") self._bootstrap() future = self.Future() task = (fn, args, kwargs) self._pool.push((future, task)) return future def map(self, fn, *iterables, timeout=None, chunksize=1, unordered=False): """Return an iterator equivalent to ``map(fn, *iterables)``. Args: fn: A callable that will take as many arguments as there are passed iterables. iterables: Iterables yielding positional arguments to be passed to the callable. timeout: The maximum number of seconds to wait. If ``None``, then there is no limit on the wait time. chunksize: The size of the chunks the iterable will be broken into before being passed to a worker process. unordered: If ``True``, yield results out-of-order, as completed. Returns: An iterator equivalent to built-in ``map(func, *iterables)`` but the calls may be evaluated out-of-order. Raises: TimeoutError: If the entire result iterator could not be generated before the given timeout. Exception: If ``fn(*args)`` raises for any values. """ # pylint: disable=arguments-differ return self.starmap(fn, zip(*iterables), timeout, chunksize, unordered) def starmap(self, fn, iterable, timeout=None, chunksize=1, unordered=False): """Return an iterator equivalent to ``itertools.starmap(...)``. Args: fn: A callable that will take positional argument from iterable. iterable: An iterable yielding ``args`` tuples to be used as positional arguments to call ``fn(*args)``. timeout: The maximum number of seconds to wait. If ``None``, then there is no limit on the wait time. chunksize: The size of the chunks the iterable will be broken into before being passed to a worker process. unordered: If ``True``, yield results out-of-order, as completed. Returns: An iterator equivalent to ``itertools.starmap(fn, iterable)`` but the calls may be evaluated out-of-order. Raises: TimeoutError: If the entire result iterator could not be generated before the given timeout. Exception: If ``fn(*args)`` raises for any values. """ # pylint: disable=too-many-arguments if chunksize < 1: raise ValueError("chunksize must be >= 1.") if chunksize == 1: return _starmap_helper(self.submit, fn, iterable, timeout, unordered) else: return _starmap_chunks(self.submit, fn, iterable, timeout, unordered, chunksize) def shutdown(self, wait=True, *, cancel_futures=False): """Clean-up the resources associated with the executor. It is safe to call this method several times. Otherwise, no other methods can be called after this one. Args: wait: If ``True`` then shutdown will not return until all running futures have finished executing and the resources used by the executor have been reclaimed. cancel_futures: If ``True`` then shutdown will cancel all pending futures. Futures that are completed or running will not be cancelled. """ with self._lock: if not self._shutdown: self._shutdown = True if self._pool is not None: self._pool.done() if cancel_futures: if self._pool is not None: self._pool.cancel() pool = None if wait: pool = self._pool self._pool = None if pool is not None: pool.join() def _starmap_helper(submit, function, iterable, timeout, unordered): if timeout is not None: timer = getattr(time, 'monotonic', time.time) end_time = timeout + timer() futures = [submit(function, *args) for args in iterable] if unordered: futures = set(futures) def result_iterator(): # pylint: disable=missing-docstring try: if unordered: if timeout is None: iterator = as_completed(futures) else: iterator = as_completed(futures, end_time - timer()) for future in iterator: futures.remove(future) future = [future] yield future.pop().result() else: futures.reverse() if timeout is None: while futures: yield futures.pop().result() else: while futures: yield futures.pop().result(end_time - timer()) except: while futures: futures.pop().cancel() raise return result_iterator() def _apply_chunks(function, chunk): return [function(*args) for args in chunk] def _build_chunks(chunksize, iterable): iterable = iter(iterable) while True: chunk = tuple(itertools.islice(iterable, chunksize)) if not chunk: return yield (chunk,) def _chain_from_iterable_of_lists(iterable): for item in iterable: item.reverse() while item: yield item.pop() def _starmap_chunks(submit, function, iterable, timeout, unordered, chunksize): # pylint: disable=too-many-arguments function = functools.partial(_apply_chunks, function) iterable = _build_chunks(chunksize, iterable) result = _starmap_helper(submit, function, iterable, timeout, unordered) return _chain_from_iterable_of_lists(result) class MPICommExecutor: """Context manager for `MPIPoolExecutor`. This context manager splits a MPI (intra)communicator in two disjoint sets: a single master process and the remaining worker processes. These sets are then connected through an intercommunicator. The target of the ``with`` statement is assigned either an `MPIPoolExecutor` instance (at the master) or ``None`` (at the workers). Example:: with MPICommExecutor(MPI.COMM_WORLD, root=0) as executor: if executor is not None: # master process executor.submit(...) executor.map(...) """ # pylint: disable=too-few-public-methods def __init__(self, comm=None, root=0, **kwargs): """Initialize a new MPICommExecutor instance. Args: comm: MPI (intra)communicator. root: Designated master process. Raises: ValueError: If the communicator has wrong kind or the root value is not in the expected range. """ if comm is None: comm = _lib.get_comm_world() if comm.Is_inter(): raise ValueError("Expecting an intracommunicator") if root < 0 or root >= comm.Get_size(): raise ValueError("Expecting root in range(comm.size)") self._comm = comm self._root = root self._options = kwargs self._executor = None def __enter__(self): """Return `MPIPoolExecutor` instance at the root.""" # pylint: disable=protected-access if self._executor is not None: raise RuntimeError("__enter__") comm = self._comm root = self._root options = self._options executor = None if _lib.SharedPool: assert root == 0 executor = MPIPoolExecutor(**options) executor._pool = _lib.SharedPool(executor) elif comm.Get_size() == 1: executor = MPIPoolExecutor(**options) executor._pool = _lib.ThreadPool(executor) elif comm.Get_rank() == root: executor = MPIPoolExecutor(**options) executor._pool = _lib.SplitPool(executor, comm, root) else: _lib.server_main_split(comm, root) self._executor = executor return executor def __exit__(self, *args): """Shutdown `MPIPoolExecutor` instance at the root.""" executor = self._executor self._executor = None if executor is not None: executor.shutdown(wait=True) return False else: return True class ThreadPoolExecutor(MPIPoolExecutor): # noqa: D204 """`MPIPoolExecutor` subclass using a pool of threads.""" _make_pool = staticmethod(_lib.ThreadPool) class ProcessPoolExecutor(MPIPoolExecutor): # noqa: D204 """`MPIPoolExecutor` subclass using a pool of processes.""" _make_pool = staticmethod(_lib.SpawnPool) mpi4py-3.1.6/src/mpi4py/futures/pool.pyi000066400000000000000000000041061460670727200201610ustar00rootroot00000000000000import sys from ..MPI import Intracomm, COMM_WORLD from ._core import Executor, Future from typing import Any, Optional, Tuple, Type, TypeVar, Union from typing import Callable, Iterable, Iterator, Mapping, Sequence _T = TypeVar("_T") class MPIPoolExecutor(Executor): Future: Any = ... # _type: Union[Type[Future], Callable[[], Future]] def __init__( self, max_workers: Optional[int] = None, initializer: Optional[Callable[..., None]] = None, initargs: Tuple = (), *, python_exe: str = ..., python_args: Sequence[str] = ..., mpi_info: Union[Mapping[str, str], Iterable[Tuple[str, str]]] = ..., globals: Union[Mapping[str, str], Iterable[Tuple[str, str]]] = ..., main: bool = True, path: Sequence[str] = ..., wdir: str = ..., env: Union[Mapping[str, str], Iterable[Tuple[str, str]]] = ..., **kwargs: Any, ) -> None: ... def bootup( self: _T, wait: bool = True, ) -> _T: ... def submit( self, fn: Callable[..., _T], /, *args: Any, **kwargs: Any, ) -> Future[_T]: ... def map( self, fn: Callable[..., _T], *iterables: Iterable[Any], timeout: Optional[float] = None, chunksize: int = 1, unordered: bool = False, ) -> Iterator[_T]: ... def starmap( self, fn: Callable[..., _T], iterable: Iterable[Any], timeout: Optional[float] = None, chunksize: int = 1, unordered: bool = False, ) -> Iterator[_T]: ... def shutdown( self, wait: bool = True, *, cancel_futures: bool = False, ) -> None: ... class MPICommExecutor: def __init__( self, comm: Optional[Intracomm] = COMM_WORLD, root: int = 0, **kwargs: Any, ) -> None: ... def __enter__(self) -> Optional[MPIPoolExecutor]: ... def __exit__(self, *args: Any) -> Optional[bool]: ... class ThreadPoolExecutor(MPIPoolExecutor): ... class ProcessPoolExecutor(MPIPoolExecutor): ... mpi4py-3.1.6/src/mpi4py/futures/server.py000066400000000000000000000004311460670727200203420ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Entry point for MPI workers.""" def main(): """Entry point for worker processes.""" # pylint: disable=import-outside-toplevel from . import _lib _lib.server_main() if __name__ == '__main__': main() mpi4py-3.1.6/src/mpi4py/futures/server.pyi000066400000000000000000000000301460670727200205060ustar00rootroot00000000000000def main() -> None: ... mpi4py-3.1.6/src/mpi4py/include/000077500000000000000000000000001460670727200164125ustar00rootroot00000000000000mpi4py-3.1.6/src/mpi4py/include/mpi4py/000077500000000000000000000000001460670727200176345ustar00rootroot00000000000000mpi4py-3.1.6/src/mpi4py/include/mpi4py/mpi.pxi000066400000000000000000000001241460670727200211400ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com include "../../libmpi.pxd" mpi4py-3.1.6/src/mpi4py/include/mpi4py/mpi4py.h000066400000000000000000000010621460670727200212260ustar00rootroot00000000000000/* Author: Lisandro Dalcin */ /* Contact: dalcinl@gmail.com */ #ifndef MPI4PY_H #define MPI4PY_H #include "mpi.h" #if defined(MSMPI_VER) && !defined(PyMPI_HAVE_MPI_Message) # if defined(MPI_MESSAGE_NULL) # define PyMPI_HAVE_MPI_Message 1 # endif #endif #if (MPI_VERSION < 3) && !defined(PyMPI_HAVE_MPI_Message) typedef void *PyMPI_MPI_Message; #define MPI_Message PyMPI_MPI_Message #endif #include "mpi4py.MPI_api.h" static int import_mpi4py(void) { if (import_mpi4py__MPI() < 0) goto bad; return 0; bad: return -1; } #endif /* MPI4PY_H */ mpi4py-3.1.6/src/mpi4py/include/mpi4py/mpi4py.i000066400000000000000000000043521460670727200212340ustar00rootroot00000000000000/* Author: Lisandro Dalcin */ /* Contact: dalcinl@gmail.com */ /* ---------------------------------------------------------------- */ #if SWIG_VERSION < 0x010328 %warn "SWIG version < 1.3.28 is not supported" #endif /* ---------------------------------------------------------------- */ %header %{ #include "mpi4py/mpi4py.h" %} %init %{ if (import_mpi4py() < 0) #if PY_MAJOR_VERSION >= 3 return NULL; #else return; #endif %} /* ---------------------------------------------------------------- */ %define %mpi4py_fragments(PyType, Type) /* --- AsPtr --- */ %fragment(SWIG_AsPtr_frag(Type),"header") { SWIGINTERN int SWIG_AsPtr_dec(Type)(SWIG_Object input, Type **p) { if (input == Py_None) { if (p) *p = NULL; return SWIG_OK; } else if (PyObject_TypeCheck(input,&PyMPI##PyType##_Type)) { if (p) *p = PyMPI##PyType##_Get(input); return SWIG_OK; } else { void *argp = NULL; int res = SWIG_ConvertPtr(input,&argp,%descriptor(p_##Type), 0); if (!SWIG_IsOK(res)) return res; if (!argp) return SWIG_ValueError; if (p) *p = %static_cast(argp,Type*); return SWIG_OK; } } } /* --- From --- */ %fragment(SWIG_From_frag(Type),"header") { SWIGINTERN SWIG_Object SWIG_From_dec(Type)(Type v) { return PyMPI##PyType##_New(v); } } %enddef /*mpi4py_fragments*/ /* ---------------------------------------------------------------- */ %define SWIG_TYPECHECK_MPI_Comm 400 %enddef %define SWIG_TYPECHECK_MPI_Datatype 401 %enddef %define SWIG_TYPECHECK_MPI_Request 402 %enddef %define SWIG_TYPECHECK_MPI_Message 403 %enddef %define SWIG_TYPECHECK_MPI_Status 404 %enddef %define SWIG_TYPECHECK_MPI_Op 405 %enddef %define SWIG_TYPECHECK_MPI_Group 406 %enddef %define SWIG_TYPECHECK_MPI_Info 407 %enddef %define SWIG_TYPECHECK_MPI_File 408 %enddef %define SWIG_TYPECHECK_MPI_Win 409 %enddef %define SWIG_TYPECHECK_MPI_Errhandler 410 %enddef /* ---------------------------------------------------------------- */ %define %mpi4py_typemap(PyType, Type) %types(Type*); %mpi4py_fragments(PyType, Type); %typemaps_asptrfromn(%checkcode(Type), Type); %enddef /*mpi4py_typemap*/ /* ---------------------------------------------------------------- */ /* * Local Variables: * mode: C * End: */ mpi4py-3.1.6/src/mpi4py/libmpi.pxd000066400000000000000000001276461460670727200170000ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com cdef import from "mpi.h" nogil: #----------------------------------------------------------------- ctypedef long MPI_Aint ctypedef long long MPI_Offset #:= long ctypedef long long MPI_Count #:= MPI_Offset ctypedef struct MPI_Status: int MPI_SOURCE int MPI_TAG int MPI_ERROR ctypedef struct _mpi_datatype_t ctypedef _mpi_datatype_t* MPI_Datatype ctypedef struct _mpi_request_t ctypedef _mpi_request_t* MPI_Request ctypedef struct _mpi_message_t ctypedef _mpi_message_t* MPI_Message ctypedef struct _mpi_op_t ctypedef _mpi_op_t* MPI_Op ctypedef struct _mpi_group_t ctypedef _mpi_group_t* MPI_Group ctypedef struct _mpi_info_t ctypedef _mpi_info_t* MPI_Info ctypedef struct _mpi_comm_t ctypedef _mpi_comm_t* MPI_Comm ctypedef struct _mpi_win_t ctypedef _mpi_win_t* MPI_Win ctypedef struct _mpi_file_t ctypedef _mpi_file_t* MPI_File ctypedef struct _mpi_errhandler_t ctypedef _mpi_errhandler_t* MPI_Errhandler #----------------------------------------------------------------- enum: MPI_UNDEFINED #:= -32766 enum: MPI_ANY_SOURCE #:= MPI_UNDEFINED enum: MPI_ANY_TAG #:= MPI_UNDEFINED enum: MPI_PROC_NULL #:= MPI_UNDEFINED enum: MPI_ROOT #:= MPI_PROC_NULL enum: MPI_IDENT #:= 1 enum: MPI_CONGRUENT #:= 2 enum: MPI_SIMILAR #:= 3 enum: MPI_UNEQUAL #:= 4 void* MPI_BOTTOM #:= 0 void* MPI_IN_PLACE #:= 0 enum: MPI_KEYVAL_INVALID #:= 0 enum: MPI_MAX_OBJECT_NAME #:= 1 #----------------------------------------------------------------- # Null datatype MPI_Datatype MPI_DATATYPE_NULL #:= 0 # MPI datatypes MPI_Datatype MPI_PACKED #:= MPI_DATATYPE_NULL MPI_Datatype MPI_BYTE #:= MPI_DATATYPE_NULL MPI_Datatype MPI_AINT #:= MPI_DATATYPE_NULL MPI_Datatype MPI_OFFSET #:= MPI_DATATYPE_NULL MPI_Datatype MPI_COUNT #:= MPI_DATATYPE_NULL # Elementary C datatypes MPI_Datatype MPI_CHAR #:= MPI_DATATYPE_NULL MPI_Datatype MPI_WCHAR #:= MPI_DATATYPE_NULL MPI_Datatype MPI_SIGNED_CHAR #:= MPI_DATATYPE_NULL MPI_Datatype MPI_SHORT #:= MPI_DATATYPE_NULL MPI_Datatype MPI_INT #:= MPI_DATATYPE_NULL MPI_Datatype MPI_LONG #:= MPI_DATATYPE_NULL MPI_Datatype MPI_LONG_LONG #:= MPI_DATATYPE_NULL MPI_Datatype MPI_LONG_LONG_INT #:= MPI_DATATYPE_NULL MPI_Datatype MPI_UNSIGNED_CHAR #:= MPI_DATATYPE_NULL MPI_Datatype MPI_UNSIGNED_SHORT #:= MPI_DATATYPE_NULL MPI_Datatype MPI_UNSIGNED #:= MPI_DATATYPE_NULL MPI_Datatype MPI_UNSIGNED_LONG #:= MPI_DATATYPE_NULL MPI_Datatype MPI_UNSIGNED_LONG_LONG #:= MPI_DATATYPE_NULL MPI_Datatype MPI_FLOAT #:= MPI_DATATYPE_NULL MPI_Datatype MPI_DOUBLE #:= MPI_DATATYPE_NULL MPI_Datatype MPI_LONG_DOUBLE #:= MPI_DATATYPE_NULL # C99 datatypes MPI_Datatype MPI_C_BOOL #:= MPI_DATATYPE_NULL MPI_Datatype MPI_INT8_T #:= MPI_DATATYPE_NULL MPI_Datatype MPI_INT16_T #:= MPI_DATATYPE_NULL MPI_Datatype MPI_INT32_T #:= MPI_DATATYPE_NULL MPI_Datatype MPI_INT64_T #:= MPI_DATATYPE_NULL MPI_Datatype MPI_UINT8_T #:= MPI_DATATYPE_NULL MPI_Datatype MPI_UINT16_T #:= MPI_DATATYPE_NULL MPI_Datatype MPI_UINT32_T #:= MPI_DATATYPE_NULL MPI_Datatype MPI_UINT64_T #:= MPI_DATATYPE_NULL MPI_Datatype MPI_C_COMPLEX #:= MPI_DATATYPE_NULL MPI_Datatype MPI_C_FLOAT_COMPLEX #:= MPI_DATATYPE_NULL MPI_Datatype MPI_C_DOUBLE_COMPLEX #:= MPI_DATATYPE_NULL MPI_Datatype MPI_C_LONG_DOUBLE_COMPLEX #:= MPI_DATATYPE_NULL # C++ datatypes MPI_Datatype MPI_CXX_BOOL #:= MPI_DATATYPE_NULL MPI_Datatype MPI_CXX_FLOAT_COMPLEX #:= MPI_DATATYPE_NULL MPI_Datatype MPI_CXX_DOUBLE_COMPLEX #:= MPI_DATATYPE_NULL MPI_Datatype MPI_CXX_LONG_DOUBLE_COMPLEX #:= MPI_DATATYPE_NULL # C datatypes for reduction operations MPI_Datatype MPI_SHORT_INT #:= MPI_DATATYPE_NULL MPI_Datatype MPI_2INT #:= MPI_DATATYPE_NULL MPI_Datatype MPI_LONG_INT #:= MPI_DATATYPE_NULL MPI_Datatype MPI_FLOAT_INT #:= MPI_DATATYPE_NULL MPI_Datatype MPI_DOUBLE_INT #:= MPI_DATATYPE_NULL MPI_Datatype MPI_LONG_DOUBLE_INT #:= MPI_DATATYPE_NULL # Elementary Fortran datatypes MPI_Datatype MPI_CHARACTER #:= MPI_DATATYPE_NULL MPI_Datatype MPI_LOGICAL #:= MPI_DATATYPE_NULL MPI_Datatype MPI_INTEGER #:= MPI_DATATYPE_NULL MPI_Datatype MPI_REAL #:= MPI_DATATYPE_NULL MPI_Datatype MPI_DOUBLE_PRECISION #:= MPI_DATATYPE_NULL MPI_Datatype MPI_COMPLEX #:= MPI_DATATYPE_NULL MPI_Datatype MPI_DOUBLE_COMPLEX #:= MPI_DATATYPE_NULL # Size-specific Fortran datatypes MPI_Datatype MPI_LOGICAL1 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_LOGICAL2 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_LOGICAL4 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_LOGICAL8 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_INTEGER1 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_INTEGER2 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_INTEGER4 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_INTEGER8 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_INTEGER16 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_REAL2 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_REAL4 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_REAL8 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_REAL16 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_COMPLEX4 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_COMPLEX8 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_COMPLEX16 #:= MPI_DATATYPE_NULL MPI_Datatype MPI_COMPLEX32 #:= MPI_DATATYPE_NULL # Deprecated since MPI-2, removed in MPI-3 MPI_Datatype MPI_UB #:= MPI_DATATYPE_NULL MPI_Datatype MPI_LB #:= MPI_DATATYPE_NULL int MPI_Type_lb(MPI_Datatype, MPI_Aint*) int MPI_Type_ub(MPI_Datatype, MPI_Aint*) int MPI_Type_extent(MPI_Datatype, MPI_Aint*) int MPI_Address(void*, MPI_Aint*) int MPI_Type_hvector(int, int, MPI_Aint, MPI_Datatype, MPI_Datatype*) int MPI_Type_hindexed(int, int[], MPI_Aint[], MPI_Datatype, MPI_Datatype*) int MPI_Type_struct(int, int[], MPI_Aint[], MPI_Datatype[], MPI_Datatype*) enum: MPI_COMBINER_HVECTOR_INTEGER #:= MPI_UNDEFINED enum: MPI_COMBINER_HINDEXED_INTEGER #:= MPI_UNDEFINED enum: MPI_COMBINER_STRUCT_INTEGER #:= MPI_UNDEFINED int MPI_Type_dup(MPI_Datatype, MPI_Datatype*) int MPI_Type_contiguous(int, MPI_Datatype, MPI_Datatype*) int MPI_Type_vector(int, int, int, MPI_Datatype, MPI_Datatype*) int MPI_Type_indexed(int, int[], int[], MPI_Datatype, MPI_Datatype*) int MPI_Type_create_indexed_block(int, int, int[], MPI_Datatype, MPI_Datatype*) enum: MPI_ORDER_C #:= 0 enum: MPI_ORDER_FORTRAN #:= 1 int MPI_Type_create_subarray(int, int[], int[], int[], int, MPI_Datatype, MPI_Datatype*) enum: MPI_DISTRIBUTE_NONE #:= 0 enum: MPI_DISTRIBUTE_BLOCK #:= 1 enum: MPI_DISTRIBUTE_CYCLIC #:= 2 enum: MPI_DISTRIBUTE_DFLT_DARG #:= 4 int MPI_Type_create_darray(int, int, int, int[], int[], int[], int[], int, MPI_Datatype, MPI_Datatype*) int MPI_Get_address(void*, MPI_Aint*) #:= MPI_Address MPI_Aint MPI_Aint_add(MPI_Aint, MPI_Aint) MPI_Aint MPI_Aint_diff(MPI_Aint, MPI_Aint) int MPI_Type_create_hvector(int, int, MPI_Aint, MPI_Datatype, MPI_Datatype*) #:= MPI_Type_hvector int MPI_Type_create_hindexed(int, int[], MPI_Aint[], MPI_Datatype, MPI_Datatype*) #:= MPI_Type_hindexed int MPI_Type_create_hindexed_block(int, int, MPI_Aint[], MPI_Datatype, MPI_Datatype*) int MPI_Type_create_struct(int, int[], MPI_Aint[], MPI_Datatype[], MPI_Datatype*) #:= MPI_Type_struct int MPI_Type_create_resized(MPI_Datatype, MPI_Aint, MPI_Aint, MPI_Datatype*) int MPI_Type_size(MPI_Datatype, int*) int MPI_Type_size_x(MPI_Datatype, MPI_Count*) int MPI_Type_get_extent(MPI_Datatype, MPI_Aint*, MPI_Aint*) int MPI_Type_get_extent_x(MPI_Datatype, MPI_Count*, MPI_Count*) int MPI_Type_get_true_extent(MPI_Datatype, MPI_Aint*, MPI_Aint*) int MPI_Type_get_true_extent_x(MPI_Datatype, MPI_Count*, MPI_Count*) int MPI_Type_create_f90_integer(int, MPI_Datatype*) int MPI_Type_create_f90_real(int, int, MPI_Datatype*) int MPI_Type_create_f90_complex(int, int, MPI_Datatype*) enum: MPI_TYPECLASS_INTEGER #:= MPI_UNDEFINED enum: MPI_TYPECLASS_REAL #:= MPI_UNDEFINED enum: MPI_TYPECLASS_COMPLEX #:= MPI_UNDEFINED int MPI_Type_match_size(int, int, MPI_Datatype*) int MPI_Type_commit(MPI_Datatype*) int MPI_Type_free(MPI_Datatype*) int MPI_Pack(void*, int, MPI_Datatype, void*, int, int*, MPI_Comm) int MPI_Unpack(void*, int, int*, void*, int, MPI_Datatype, MPI_Comm) int MPI_Pack_size(int, MPI_Datatype, MPI_Comm, int*) int MPI_Pack_external(char[], void*, int, MPI_Datatype, void*, MPI_Aint, MPI_Aint*) int MPI_Unpack_external(char[], void*, MPI_Aint, MPI_Aint*, void*, int, MPI_Datatype) int MPI_Pack_external_size(char[], int, MPI_Datatype, MPI_Aint*) enum: MPI_COMBINER_NAMED #:= MPI_UNDEFINED enum: MPI_COMBINER_DUP #:= MPI_UNDEFINED enum: MPI_COMBINER_CONTIGUOUS #:= MPI_UNDEFINED enum: MPI_COMBINER_VECTOR #:= MPI_UNDEFINED enum: MPI_COMBINER_HVECTOR #:= MPI_UNDEFINED enum: MPI_COMBINER_INDEXED #:= MPI_UNDEFINED enum: MPI_COMBINER_HINDEXED #:= MPI_UNDEFINED enum: MPI_COMBINER_INDEXED_BLOCK #:= MPI_UNDEFINED enum: MPI_COMBINER_HINDEXED_BLOCK #:= MPI_UNDEFINED enum: MPI_COMBINER_STRUCT #:= MPI_UNDEFINED enum: MPI_COMBINER_SUBARRAY #:= MPI_UNDEFINED enum: MPI_COMBINER_DARRAY #:= MPI_UNDEFINED enum: MPI_COMBINER_F90_REAL #:= MPI_UNDEFINED enum: MPI_COMBINER_F90_COMPLEX #:= MPI_UNDEFINED enum: MPI_COMBINER_F90_INTEGER #:= MPI_UNDEFINED enum: MPI_COMBINER_RESIZED #:= MPI_UNDEFINED int MPI_Type_get_envelope(MPI_Datatype, int*, int*, int*, int*) int MPI_Type_get_contents(MPI_Datatype, int, int, int, int[], MPI_Aint[], MPI_Datatype[]) int MPI_Type_get_name(MPI_Datatype, char[], int*) int MPI_Type_set_name(MPI_Datatype, char[]) int MPI_Type_get_attr(MPI_Datatype, int, void*, int*) int MPI_Type_set_attr(MPI_Datatype, int, void*) int MPI_Type_delete_attr(MPI_Datatype, int) ctypedef int MPI_Type_copy_attr_function(MPI_Datatype,int,void*,void*,void*,int*) ctypedef int MPI_Type_delete_attr_function(MPI_Datatype,int,void*,void*) MPI_Type_copy_attr_function* MPI_TYPE_NULL_COPY_FN #:= 0 MPI_Type_copy_attr_function* MPI_TYPE_DUP_FN #:= 0 MPI_Type_delete_attr_function* MPI_TYPE_NULL_DELETE_FN #:= 0 int MPI_Type_create_keyval(MPI_Type_copy_attr_function*, MPI_Type_delete_attr_function*, int*, void*) int MPI_Type_free_keyval(int*) #----------------------------------------------------------------- MPI_Status* MPI_STATUS_IGNORE #:= 0 MPI_Status* MPI_STATUSES_IGNORE #:= 0 int MPI_Get_count(MPI_Status*, MPI_Datatype, int*) int MPI_Get_elements(MPI_Status*, MPI_Datatype, int*) int MPI_Get_elements_x(MPI_Status*, MPI_Datatype, MPI_Count*) int MPI_Status_set_elements(MPI_Status*, MPI_Datatype, int) int MPI_Status_set_elements_x(MPI_Status*, MPI_Datatype, MPI_Count) int MPI_Test_cancelled(MPI_Status*, int*) int MPI_Status_set_cancelled(MPI_Status*, int) #----------------------------------------------------------------- MPI_Request MPI_REQUEST_NULL #:= 0 int MPI_Request_free(MPI_Request*) int MPI_Wait(MPI_Request*, MPI_Status*) int MPI_Test(MPI_Request*, int*, MPI_Status*) int MPI_Request_get_status(MPI_Request, int*, MPI_Status*) int MPI_Cancel(MPI_Request*) int MPI_Waitany(int, MPI_Request[], int*, MPI_Status*) int MPI_Testany(int, MPI_Request[], int*, int*, MPI_Status*) int MPI_Waitall(int, MPI_Request[], MPI_Status[]) int MPI_Testall(int, MPI_Request[], int*, MPI_Status[]) int MPI_Waitsome(int, MPI_Request[], int*, int[], MPI_Status[]) int MPI_Testsome(int, MPI_Request[], int*, int[], MPI_Status[]) int MPI_Start(MPI_Request*) int MPI_Startall(int, MPI_Request*) ctypedef int MPI_Grequest_cancel_function(void*,int) ctypedef int MPI_Grequest_free_function(void*) ctypedef int MPI_Grequest_query_function(void*,MPI_Status*) int MPI_Grequest_start(MPI_Grequest_query_function*, MPI_Grequest_free_function*, MPI_Grequest_cancel_function*, void*, MPI_Request*) int MPI_Grequest_complete(MPI_Request) #----------------------------------------------------------------- MPI_Op MPI_OP_NULL #:= 0 MPI_Op MPI_MAX #:= MPI_OP_NULL MPI_Op MPI_MIN #:= MPI_OP_NULL MPI_Op MPI_SUM #:= MPI_OP_NULL MPI_Op MPI_PROD #:= MPI_OP_NULL MPI_Op MPI_LAND #:= MPI_OP_NULL MPI_Op MPI_BAND #:= MPI_OP_NULL MPI_Op MPI_LOR #:= MPI_OP_NULL MPI_Op MPI_BOR #:= MPI_OP_NULL MPI_Op MPI_LXOR #:= MPI_OP_NULL MPI_Op MPI_BXOR #:= MPI_OP_NULL MPI_Op MPI_MAXLOC #:= MPI_OP_NULL MPI_Op MPI_MINLOC #:= MPI_OP_NULL MPI_Op MPI_REPLACE #:= MPI_OP_NULL MPI_Op MPI_NO_OP #:= MPI_OP_NULL int MPI_Op_free(MPI_Op*) ctypedef void MPI_User_function(void*,void*,int*,MPI_Datatype*) int MPI_Op_create(MPI_User_function*, int, MPI_Op*) int MPI_Op_commutative(MPI_Op, int*) #----------------------------------------------------------------- MPI_Info MPI_INFO_NULL #:= 0 MPI_Info MPI_INFO_ENV #:= MPI_INFO_NULL int MPI_Info_free(MPI_Info*) int MPI_Info_create(MPI_Info*) int MPI_Info_dup(MPI_Info, MPI_Info*) enum: MPI_MAX_INFO_KEY #:= 1 enum: MPI_MAX_INFO_VAL #:= 1 int MPI_Info_get(MPI_Info, char[], int, char[], int*) int MPI_Info_set(MPI_Info, char[], char[]) int MPI_Info_delete(MPI_Info, char[]) int MPI_Info_get_nkeys(MPI_Info, int*) int MPI_Info_get_nthkey(MPI_Info, int, char[]) int MPI_Info_get_valuelen(MPI_Info, char[], int*, int*) #----------------------------------------------------------------- MPI_Group MPI_GROUP_NULL #:= 0 MPI_Group MPI_GROUP_EMPTY #:= 1 int MPI_Group_free(MPI_Group*) int MPI_Group_size(MPI_Group, int*) int MPI_Group_rank(MPI_Group, int*) int MPI_Group_translate_ranks(MPI_Group, int, int[], MPI_Group, int[]) int MPI_Group_compare(MPI_Group, MPI_Group, int*) int MPI_Group_union(MPI_Group, MPI_Group, MPI_Group*) int MPI_Group_intersection(MPI_Group, MPI_Group, MPI_Group*) int MPI_Group_difference(MPI_Group, MPI_Group, MPI_Group*) int MPI_Group_incl(MPI_Group, int, int[], MPI_Group*) int MPI_Group_excl(MPI_Group, int, int[], MPI_Group*) int MPI_Group_range_incl(MPI_Group, int, int[][3], MPI_Group*) int MPI_Group_range_excl(MPI_Group, int, int[][3], MPI_Group*) #----------------------------------------------------------------- MPI_Comm MPI_COMM_NULL #:= 0 MPI_Comm MPI_COMM_SELF #:= MPI_COMM_NULL MPI_Comm MPI_COMM_WORLD #:= MPI_COMM_NULL int MPI_Comm_free(MPI_Comm*) int MPI_Comm_group(MPI_Comm, MPI_Group*) int MPI_Comm_size(MPI_Comm, int*) int MPI_Comm_rank(MPI_Comm, int*) int MPI_Comm_compare(MPI_Comm, MPI_Comm, int*) int MPI_Topo_test(MPI_Comm, int*) int MPI_Comm_test_inter(MPI_Comm, int*) int MPI_Abort(MPI_Comm, int) int MPI_Send(void*, int, MPI_Datatype, int, int, MPI_Comm) int MPI_Recv(void*, int, MPI_Datatype, int, int, MPI_Comm, MPI_Status*) int MPI_Sendrecv(void*, int, MPI_Datatype,int, int, void*, int, MPI_Datatype, int, int, MPI_Comm, MPI_Status*) int MPI_Sendrecv_replace(void*, int, MPI_Datatype, int, int, int, int, MPI_Comm, MPI_Status*) enum: MPI_BSEND_OVERHEAD #:= 0 int MPI_Buffer_attach(void*, int) int MPI_Buffer_detach(void*, int*) int MPI_Bsend(void*, int, MPI_Datatype, int, int, MPI_Comm) int MPI_Ssend(void*, int, MPI_Datatype, int, int, MPI_Comm) int MPI_Rsend(void*, int, MPI_Datatype, int, int, MPI_Comm) int MPI_Isend(void*, int, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Ibsend(void*, int, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Issend(void*, int, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Irsend(void*, int, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Irecv(void*, int, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Send_init(void*, int, MPI_Datatype, int, int, MPI_Comm, MPI_Request*) int MPI_Bsend_init(void*, int, MPI_Datatype, int,int, MPI_Comm, MPI_Request*) int MPI_Ssend_init(void*, int, MPI_Datatype, int,int, MPI_Comm, MPI_Request*) int MPI_Rsend_init(void*, int, MPI_Datatype, int,int, MPI_Comm, MPI_Request*) int MPI_Recv_init(void*, int, MPI_Datatype, int,int, MPI_Comm, MPI_Request*) int MPI_Probe(int, int, MPI_Comm, MPI_Status*) int MPI_Iprobe(int, int, MPI_Comm, int*, MPI_Status*) MPI_Message MPI_MESSAGE_NULL #:= 0 MPI_Message MPI_MESSAGE_NO_PROC #:= MPI_MESSAGE_NULL int MPI_Mprobe(int, int, MPI_Comm, MPI_Message*, MPI_Status*) int MPI_Improbe(int, int, MPI_Comm, int*, MPI_Message*, MPI_Status*) int MPI_Mrecv(void*, int, MPI_Datatype, MPI_Message*, MPI_Status*) int MPI_Imrecv(void*, int, MPI_Datatype, MPI_Message*, MPI_Request*) int MPI_Barrier(MPI_Comm) int MPI_Bcast(void*, int, MPI_Datatype, int, MPI_Comm) int MPI_Gather(void*, int, MPI_Datatype, void*, int, MPI_Datatype, int, MPI_Comm) int MPI_Gatherv(void*, int, MPI_Datatype, void*, int[], int[], MPI_Datatype, int, MPI_Comm) int MPI_Scatter(void*, int, MPI_Datatype, void*, int, MPI_Datatype, int, MPI_Comm) int MPI_Scatterv(void*, int[], int[], MPI_Datatype, void*, int, MPI_Datatype, int, MPI_Comm) int MPI_Allgather(void*, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm) int MPI_Allgatherv(void*, int, MPI_Datatype, void*, int[], int[], MPI_Datatype, MPI_Comm) int MPI_Alltoall(void*, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm) int MPI_Alltoallv(void*, int[], int[], MPI_Datatype, void*, int[], int[], MPI_Datatype, MPI_Comm) int MPI_Alltoallw(void*, int[], int[], MPI_Datatype[], void*, int[], int[], MPI_Datatype[], MPI_Comm) int MPI_Reduce(void*, void*, int, MPI_Datatype, MPI_Op, int, MPI_Comm) int MPI_Allreduce(void*, void*, int, MPI_Datatype, MPI_Op, MPI_Comm) int MPI_Reduce_local(void*, void*, int, MPI_Datatype, MPI_Op) int MPI_Reduce_scatter_block(void*, void*, int, MPI_Datatype, MPI_Op, MPI_Comm) int MPI_Reduce_scatter(void*, void*, int[], MPI_Datatype, MPI_Op, MPI_Comm) int MPI_Scan(void*, void*, int, MPI_Datatype, MPI_Op, MPI_Comm) int MPI_Exscan(void*, void*, int, MPI_Datatype, MPI_Op, MPI_Comm) int MPI_Neighbor_allgather(void*, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm) int MPI_Neighbor_allgatherv(void*, int, MPI_Datatype, void*, int[], int[], MPI_Datatype, MPI_Comm) int MPI_Neighbor_alltoall(void*, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm) int MPI_Neighbor_alltoallv(void*, int[], int[],MPI_Datatype, void*, int[],int[], MPI_Datatype, MPI_Comm) int MPI_Neighbor_alltoallw(void*, int[], MPI_Aint[],MPI_Datatype[], void*, int[],MPI_Aint[], MPI_Datatype[], MPI_Comm) int MPI_Ibarrier(MPI_Comm, MPI_Request*) int MPI_Ibcast(void*, int, MPI_Datatype, int, MPI_Comm, MPI_Request*) int MPI_Igather(void*, int, MPI_Datatype, void*, int, MPI_Datatype, int, MPI_Comm, MPI_Request*) int MPI_Igatherv(void*, int, MPI_Datatype, void*, int[], int[], MPI_Datatype, int, MPI_Comm, MPI_Request*) int MPI_Iscatter(void*, int, MPI_Datatype, void*, int, MPI_Datatype, int, MPI_Comm, MPI_Request*) int MPI_Iscatterv(void*, int[], int[], MPI_Datatype, void*, int, MPI_Datatype, int, MPI_Comm, MPI_Request*) int MPI_Iallgather(void*, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm, MPI_Request*) int MPI_Iallgatherv(void*, int, MPI_Datatype, void*, int[], int[], MPI_Datatype, MPI_Comm, MPI_Request*) int MPI_Ialltoall(void*, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm, MPI_Request*) int MPI_Ialltoallv(void*, int[], int[], MPI_Datatype, void*, int[], int[], MPI_Datatype, MPI_Comm, MPI_Request*) int MPI_Ialltoallw(void*, int[], int[], MPI_Datatype[], void*, int[], int[], MPI_Datatype[], MPI_Comm, MPI_Request*) int MPI_Ireduce(void*, void*, int, MPI_Datatype, MPI_Op, int, MPI_Comm, MPI_Request*) int MPI_Iallreduce(void*, void*, int, MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request*) int MPI_Ireduce_scatter_block(void*, void*, int, MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request*) int MPI_Ireduce_scatter(void*, void*, int[], MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request*) int MPI_Iscan(void*, void*, int, MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request*) int MPI_Iexscan(void*, void*, int, MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request*) int MPI_Ineighbor_allgather(void*, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm, MPI_Request*) int MPI_Ineighbor_allgatherv(void*, int, MPI_Datatype, void*, int[], int[], MPI_Datatype, MPI_Comm, MPI_Request*) int MPI_Ineighbor_alltoall(void*, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm, MPI_Request*) int MPI_Ineighbor_alltoallv(void*, int[], int[],MPI_Datatype, void*, int[],int[], MPI_Datatype, MPI_Comm, MPI_Request*) int MPI_Ineighbor_alltoallw(void*, int[], MPI_Aint[],MPI_Datatype[], void*, int[],MPI_Aint[], MPI_Datatype[], MPI_Comm, MPI_Request*) int MPI_Comm_dup(MPI_Comm, MPI_Comm*) int MPI_Comm_dup_with_info(MPI_Comm, MPI_Info, MPI_Comm*) int MPI_Comm_idup(MPI_Comm, MPI_Comm*, MPI_Request*) int MPI_Comm_create(MPI_Comm, MPI_Group, MPI_Comm*) int MPI_Comm_create_group(MPI_Comm, MPI_Group, int, MPI_Comm*) int MPI_Comm_split(MPI_Comm, int, int, MPI_Comm*) enum: MPI_COMM_TYPE_SHARED #:= MPI_UNDEFINED int MPI_Comm_split_type(MPI_Comm, int, int, MPI_Info, MPI_Comm*) int MPI_Comm_set_info(MPI_Comm, MPI_Info) int MPI_Comm_get_info(MPI_Comm, MPI_Info*) enum: MPI_CART #:= MPI_UNDEFINED int MPI_Cart_create(MPI_Comm, int, int[], int[], int, MPI_Comm*) int MPI_Cartdim_get(MPI_Comm, int*) int MPI_Cart_get(MPI_Comm, int, int[], int[], int[]) int MPI_Cart_rank(MPI_Comm, int[], int*) int MPI_Cart_coords(MPI_Comm, int, int, int[]) int MPI_Cart_shift(MPI_Comm, int, int, int[], int[]) int MPI_Cart_sub(MPI_Comm, int[], MPI_Comm*) int MPI_Cart_map(MPI_Comm, int, int[], int[], int*) int MPI_Dims_create(int, int, int[]) enum: MPI_GRAPH #:= MPI_UNDEFINED int MPI_Graph_create(MPI_Comm, int, int[], int[], int, MPI_Comm*) int MPI_Graphdims_get(MPI_Comm, int*, int*) int MPI_Graph_get(MPI_Comm, int, int, int[], int[]) int MPI_Graph_map(MPI_Comm, int, int[], int[], int*) int MPI_Graph_neighbors_count(MPI_Comm, int, int*) int MPI_Graph_neighbors(MPI_Comm, int, int, int[]) enum: MPI_DIST_GRAPH #:= MPI_UNDEFINED int* MPI_UNWEIGHTED #:= 0 int* MPI_WEIGHTS_EMPTY #:= MPI_UNWEIGHTED int MPI_Dist_graph_create_adjacent(MPI_Comm, int, int[], int[], int, int[], int[], MPI_Info, int, MPI_Comm*) int MPI_Dist_graph_create(MPI_Comm, int, int[], int[], int[], int[], MPI_Info, int, MPI_Comm*) int MPI_Dist_graph_neighbors_count(MPI_Comm, int*, int*, int*) int MPI_Dist_graph_neighbors(MPI_Comm, int, int[], int[], int, int[], int[]) int MPI_Intercomm_create(MPI_Comm, int, MPI_Comm, int, int, MPI_Comm*) int MPI_Comm_remote_group(MPI_Comm, MPI_Group*) int MPI_Comm_remote_size(MPI_Comm, int*) int MPI_Intercomm_merge(MPI_Comm, int, MPI_Comm*) enum: MPI_MAX_PORT_NAME #:= 1 int MPI_Open_port(MPI_Info, char[]) int MPI_Close_port(char[]) int MPI_Publish_name(char[], MPI_Info, char[]) int MPI_Unpublish_name(char[], MPI_Info, char[]) int MPI_Lookup_name(char[], MPI_Info, char[]) int MPI_Comm_accept(char[], MPI_Info, int, MPI_Comm, MPI_Comm*) int MPI_Comm_connect(char[], MPI_Info, int, MPI_Comm, MPI_Comm*) int MPI_Comm_join(int, MPI_Comm*) int MPI_Comm_disconnect(MPI_Comm*) char** MPI_ARGV_NULL #:= 0 char*** MPI_ARGVS_NULL #:= 0 int* MPI_ERRCODES_IGNORE #:= 0 int MPI_Comm_spawn(char[], char*[], int, MPI_Info, int, MPI_Comm, MPI_Comm*, int[]) int MPI_Comm_spawn_multiple(int, char*[], char**[], int[], MPI_Info[], int, MPI_Comm, MPI_Comm*, int[]) int MPI_Comm_get_parent(MPI_Comm*) # Deprecated since MPI-2, removed in MPI-3 int MPI_Errhandler_get(MPI_Comm, MPI_Errhandler*) int MPI_Errhandler_set(MPI_Comm, MPI_Errhandler) ctypedef void MPI_Handler_function(MPI_Comm*,int*,...) int MPI_Errhandler_create(MPI_Handler_function*, MPI_Errhandler*) # Deprecated since MPI-2 int MPI_Attr_get(MPI_Comm, int, void*, int*) int MPI_Attr_put(MPI_Comm, int, void*) int MPI_Attr_delete(MPI_Comm, int) ctypedef int MPI_Copy_function(MPI_Comm,int,void*,void*,void*,int*) ctypedef int MPI_Delete_function(MPI_Comm,int,void*,void*) MPI_Copy_function* MPI_DUP_FN #:= 0 MPI_Copy_function* MPI_NULL_COPY_FN #:= 0 MPI_Delete_function* MPI_NULL_DELETE_FN #:= 0 int MPI_Keyval_create(MPI_Copy_function*, MPI_Delete_function*, int*, void*) int MPI_Keyval_free(int*) int MPI_Comm_get_errhandler(MPI_Comm, MPI_Errhandler*) #:= MPI_Errhandler_get int MPI_Comm_set_errhandler(MPI_Comm, MPI_Errhandler) #:= MPI_Errhandler_set ctypedef void MPI_Comm_errhandler_fn(MPI_Comm*,int*,...) #:= MPI_Handler_function ctypedef void MPI_Comm_errhandler_function(MPI_Comm*,int*,...) #:= MPI_Comm_errhandler_fn int MPI_Comm_create_errhandler(MPI_Comm_errhandler_function*, MPI_Errhandler*) #:= MPI_Errhandler_create int MPI_Comm_call_errhandler(MPI_Comm, int) int MPI_Comm_get_name(MPI_Comm, char[], int*) int MPI_Comm_set_name(MPI_Comm, char[]) enum: MPI_TAG_UB #:= MPI_KEYVAL_INVALID enum: MPI_HOST #:= MPI_KEYVAL_INVALID enum: MPI_IO #:= MPI_KEYVAL_INVALID enum: MPI_WTIME_IS_GLOBAL #:= MPI_KEYVAL_INVALID enum: MPI_UNIVERSE_SIZE #:= MPI_KEYVAL_INVALID enum: MPI_APPNUM #:= MPI_KEYVAL_INVALID enum: MPI_LASTUSEDCODE #:= MPI_KEYVAL_INVALID int MPI_Comm_get_attr(MPI_Comm, int, void*, int*) #:= MPI_Attr_get int MPI_Comm_set_attr(MPI_Comm, int, void*) #:= MPI_Attr_put int MPI_Comm_delete_attr(MPI_Comm, int) #:= MPI_Attr_delete ctypedef int MPI_Comm_copy_attr_function(MPI_Comm,int,void*,void*,void*,int*) #:= MPI_Copy_function ctypedef int MPI_Comm_delete_attr_function(MPI_Comm,int,void*,void*) #:= MPI_Delete_function MPI_Comm_copy_attr_function* MPI_COMM_DUP_FN #:= MPI_DUP_FN MPI_Comm_copy_attr_function* MPI_COMM_NULL_COPY_FN #:= MPI_NULL_COPY_FN MPI_Comm_delete_attr_function* MPI_COMM_NULL_DELETE_FN #:= MPI_NULL_DELETE_FN int MPI_Comm_create_keyval(MPI_Comm_copy_attr_function*, MPI_Comm_delete_attr_function*, int*, void*) #:= MPI_Keyval_create int MPI_Comm_free_keyval(int*) #:= MPI_Keyval_free #----------------------------------------------------------------- MPI_Win MPI_WIN_NULL #:= 0 int MPI_Win_free(MPI_Win*) int MPI_Win_create(void*, MPI_Aint, int, MPI_Info, MPI_Comm, MPI_Win*) int MPI_Win_allocate(MPI_Aint, int, MPI_Info, MPI_Comm, void*, MPI_Win*) int MPI_Win_allocate_shared(MPI_Aint, int, MPI_Info, MPI_Comm, void*, MPI_Win*) int MPI_Win_shared_query(MPI_Win, int, MPI_Aint*, int*, void*) int MPI_Win_create_dynamic(MPI_Info, MPI_Comm, MPI_Win*) int MPI_Win_attach(MPI_Win, void*, MPI_Aint) int MPI_Win_detach(MPI_Win, void*) int MPI_Win_set_info(MPI_Win, MPI_Info) int MPI_Win_get_info(MPI_Win, MPI_Info*) int MPI_Win_get_group(MPI_Win, MPI_Group*) int MPI_Get(void*, int, MPI_Datatype, int, MPI_Aint, int, MPI_Datatype, MPI_Win) int MPI_Put(void*, int, MPI_Datatype, int, MPI_Aint, int, MPI_Datatype, MPI_Win) int MPI_Accumulate(void*, int, MPI_Datatype, int, MPI_Aint, int, MPI_Datatype, MPI_Op, MPI_Win) int MPI_Get_accumulate(void*, int, MPI_Datatype, void*, int,MPI_Datatype, int, MPI_Aint, int, MPI_Datatype, MPI_Op, MPI_Win) int MPI_Fetch_and_op(void*, void*, MPI_Datatype, int, MPI_Aint, MPI_Op, MPI_Win) int MPI_Compare_and_swap(void*, void*, void*, MPI_Datatype, int, MPI_Aint, MPI_Win) int MPI_Rget(void*, int, MPI_Datatype, int, MPI_Aint, int, MPI_Datatype, MPI_Win, MPI_Request*) int MPI_Rput(void*, int, MPI_Datatype, int, MPI_Aint, int, MPI_Datatype, MPI_Win, MPI_Request*) int MPI_Raccumulate(void*, int, MPI_Datatype, int, MPI_Aint, int, MPI_Datatype, MPI_Op, MPI_Win, MPI_Request*) int MPI_Rget_accumulate(void*, int, MPI_Datatype, void*, int,MPI_Datatype, int, MPI_Aint, int, MPI_Datatype, MPI_Op, MPI_Win, MPI_Request*) enum: MPI_MODE_NOCHECK #:= MPI_UNDEFINED enum: MPI_MODE_NOSTORE #:= MPI_UNDEFINED enum: MPI_MODE_NOPUT #:= MPI_UNDEFINED enum: MPI_MODE_NOPRECEDE #:= MPI_UNDEFINED enum: MPI_MODE_NOSUCCEED #:= MPI_UNDEFINED int MPI_Win_fence(int, MPI_Win) int MPI_Win_post(MPI_Group, int, MPI_Win) int MPI_Win_start(MPI_Group, int, MPI_Win) int MPI_Win_complete(MPI_Win) int MPI_Win_wait(MPI_Win) int MPI_Win_test(MPI_Win, int*) enum: MPI_LOCK_EXCLUSIVE #:= MPI_UNDEFINED enum: MPI_LOCK_SHARED #:= MPI_UNDEFINED int MPI_Win_lock(int, int, int, MPI_Win) int MPI_Win_unlock(int, MPI_Win) int MPI_Win_lock_all(int, MPI_Win) int MPI_Win_unlock_all(MPI_Win) int MPI_Win_flush(int, MPI_Win) int MPI_Win_flush_all(MPI_Win) int MPI_Win_flush_local(int, MPI_Win) int MPI_Win_flush_local_all(MPI_Win) int MPI_Win_sync(MPI_Win) int MPI_Win_get_errhandler(MPI_Win, MPI_Errhandler*) int MPI_Win_set_errhandler(MPI_Win, MPI_Errhandler) ctypedef void MPI_Win_errhandler_fn(MPI_Win*,int*,...) ctypedef void MPI_Win_errhandler_function(MPI_Win*,int*,...) #:= MPI_Win_errhandler_fn int MPI_Win_create_errhandler(MPI_Win_errhandler_function*, MPI_Errhandler*) int MPI_Win_call_errhandler(MPI_Win, int) int MPI_Win_get_name(MPI_Win, char[], int*) int MPI_Win_set_name(MPI_Win, char[]) enum: MPI_WIN_BASE #:= MPI_KEYVAL_INVALID enum: MPI_WIN_SIZE #:= MPI_KEYVAL_INVALID enum: MPI_WIN_DISP_UNIT #:= MPI_KEYVAL_INVALID enum: MPI_WIN_CREATE_FLAVOR #:= MPI_KEYVAL_INVALID enum: MPI_WIN_MODEL #:= MPI_KEYVAL_INVALID enum: MPI_WIN_FLAVOR_CREATE #:= MPI_UNDEFINED enum: MPI_WIN_FLAVOR_ALLOCATE #:= MPI_UNDEFINED enum: MPI_WIN_FLAVOR_DYNAMIC #:= MPI_UNDEFINED enum: MPI_WIN_FLAVOR_SHARED #:= MPI_UNDEFINED enum: MPI_WIN_SEPARATE #:= MPI_UNDEFINED enum: MPI_WIN_UNIFIED #:= MPI_UNDEFINED int MPI_Win_get_attr(MPI_Win, int, void*, int*) int MPI_Win_set_attr(MPI_Win, int, void*) int MPI_Win_delete_attr(MPI_Win, int) ctypedef int MPI_Win_copy_attr_function(MPI_Win,int,void*,void*,void*,int*) ctypedef int MPI_Win_delete_attr_function(MPI_Win,int,void*,void*) MPI_Win_copy_attr_function* MPI_WIN_DUP_FN #:= 0 MPI_Win_copy_attr_function* MPI_WIN_NULL_COPY_FN #:= 0 MPI_Win_delete_attr_function* MPI_WIN_NULL_DELETE_FN #:= 0 int MPI_Win_create_keyval(MPI_Win_copy_attr_function*, MPI_Win_delete_attr_function*, int*, void*) int MPI_Win_free_keyval(int*) #----------------------------------------------------------------- MPI_File MPI_FILE_NULL #:= 0 enum: MPI_MODE_RDONLY #:= 1 enum: MPI_MODE_RDWR #:= 2 enum: MPI_MODE_WRONLY #:= 4 enum: MPI_MODE_CREATE #:= 8 enum: MPI_MODE_EXCL #:= 16 enum: MPI_MODE_DELETE_ON_CLOSE #:= 32 enum: MPI_MODE_UNIQUE_OPEN #:= 64 enum: MPI_MODE_APPEND #:= 128 enum: MPI_MODE_SEQUENTIAL #:= 256 int MPI_File_open(MPI_Comm, char[], int, MPI_Info, MPI_File*) int MPI_File_close(MPI_File*) int MPI_File_delete(char[], MPI_Info) int MPI_File_set_size(MPI_File, MPI_Offset) int MPI_File_preallocate(MPI_File, MPI_Offset) int MPI_File_get_size(MPI_File, MPI_Offset*) int MPI_File_get_group(MPI_File, MPI_Group*) int MPI_File_get_amode(MPI_File, int*) int MPI_File_set_info(MPI_File, MPI_Info) int MPI_File_get_info(MPI_File, MPI_Info*) int MPI_File_get_view(MPI_File, MPI_Offset*, MPI_Datatype*, MPI_Datatype*, char[]) int MPI_File_set_view(MPI_File, MPI_Offset, MPI_Datatype, MPI_Datatype, char[], MPI_Info) int MPI_File_read_at (MPI_File, MPI_Offset, void*, int, MPI_Datatype, MPI_Status*) int MPI_File_read_at_all (MPI_File, MPI_Offset, void*, int, MPI_Datatype, MPI_Status*) int MPI_File_write_at (MPI_File, MPI_Offset, void*, int, MPI_Datatype, MPI_Status*) int MPI_File_write_at_all (MPI_File, MPI_Offset, void*, int, MPI_Datatype, MPI_Status*) int MPI_File_iread_at (MPI_File, MPI_Offset, void*, int, MPI_Datatype, MPI_Request*) int MPI_File_iread_at_all (MPI_File, MPI_Offset, void*, int, MPI_Datatype, MPI_Request*) int MPI_File_iwrite_at (MPI_File, MPI_Offset, void*, int, MPI_Datatype, MPI_Request*) int MPI_File_iwrite_at_all(MPI_File, MPI_Offset, void*, int, MPI_Datatype, MPI_Request*) enum: MPI_SEEK_SET #:= 0 enum: MPI_SEEK_CUR #:= 1 enum: MPI_SEEK_END #:= 2 enum: MPI_DISPLACEMENT_CURRENT #:= 3 int MPI_File_seek(MPI_File, MPI_Offset, int) int MPI_File_get_position(MPI_File, MPI_Offset*) int MPI_File_get_byte_offset(MPI_File, MPI_Offset, MPI_Offset*) int MPI_File_read (MPI_File, void*, int, MPI_Datatype, MPI_Status*) int MPI_File_read_all (MPI_File, void*, int, MPI_Datatype, MPI_Status*) int MPI_File_write (MPI_File, void*, int, MPI_Datatype, MPI_Status*) int MPI_File_write_all (MPI_File, void*, int, MPI_Datatype, MPI_Status*) int MPI_File_iread (MPI_File, void*, int, MPI_Datatype, MPI_Request*) int MPI_File_iread_all (MPI_File, void*, int, MPI_Datatype, MPI_Request*) int MPI_File_iwrite (MPI_File, void*, int, MPI_Datatype, MPI_Request*) int MPI_File_iwrite_all(MPI_File, void*, int, MPI_Datatype, MPI_Request*) int MPI_File_read_shared (MPI_File, void*, int, MPI_Datatype, MPI_Status*) int MPI_File_write_shared (MPI_File, void*, int, MPI_Datatype, MPI_Status*) int MPI_File_iread_shared (MPI_File, void*, int, MPI_Datatype, MPI_Request*) int MPI_File_iwrite_shared (MPI_File, void*, int, MPI_Datatype, MPI_Request*) int MPI_File_read_ordered (MPI_File, void*, int, MPI_Datatype, MPI_Status*) int MPI_File_write_ordered (MPI_File, void*, int, MPI_Datatype, MPI_Status*) int MPI_File_seek_shared(MPI_File, MPI_Offset, int) int MPI_File_get_position_shared(MPI_File, MPI_Offset*) int MPI_File_read_at_all_begin (MPI_File, MPI_Offset, void*, int, MPI_Datatype) int MPI_File_read_at_all_end (MPI_File, void*, MPI_Status*) int MPI_File_write_at_all_begin (MPI_File, MPI_Offset, void*, int, MPI_Datatype) int MPI_File_write_at_all_end (MPI_File, void*, MPI_Status*) int MPI_File_read_all_begin (MPI_File, void*, int, MPI_Datatype) int MPI_File_read_all_end (MPI_File, void*, MPI_Status*) int MPI_File_write_all_begin (MPI_File, void*, int, MPI_Datatype) int MPI_File_write_all_end (MPI_File, void*, MPI_Status*) int MPI_File_read_ordered_begin (MPI_File, void*, int, MPI_Datatype) int MPI_File_read_ordered_end (MPI_File, void*, MPI_Status*) int MPI_File_write_ordered_begin (MPI_File, void*, int, MPI_Datatype) int MPI_File_write_ordered_end (MPI_File, void*, MPI_Status*) int MPI_File_get_type_extent(MPI_File, MPI_Datatype, MPI_Aint*) int MPI_File_set_atomicity(MPI_File, int) int MPI_File_get_atomicity(MPI_File, int*) int MPI_File_sync(MPI_File) int MPI_File_get_errhandler(MPI_File, MPI_Errhandler*) int MPI_File_set_errhandler(MPI_File, MPI_Errhandler) ctypedef void MPI_File_errhandler_fn(MPI_File*,int*,...) ctypedef void MPI_File_errhandler_function(MPI_File*,int*,...) #:= MPI_File_errhandler_fn int MPI_File_create_errhandler(MPI_File_errhandler_function*, MPI_Errhandler*) int MPI_File_call_errhandler(MPI_File, int) ctypedef int MPI_Datarep_conversion_function(void*,MPI_Datatype,int,void*,MPI_Offset,void*) ctypedef int MPI_Datarep_extent_function(MPI_Datatype,MPI_Aint*,void*) MPI_Datarep_conversion_function* MPI_CONVERSION_FN_NULL #:= 0 enum: MPI_MAX_DATAREP_STRING #:= 1 int MPI_Register_datarep(char[], MPI_Datarep_conversion_function*, MPI_Datarep_conversion_function*, MPI_Datarep_extent_function*, void*) #----------------------------------------------------------------- MPI_Errhandler MPI_ERRHANDLER_NULL #:= 0 MPI_Errhandler MPI_ERRORS_RETURN #:= MPI_ERRHANDLER_NULL MPI_Errhandler MPI_ERRORS_ARE_FATAL #:= MPI_ERRHANDLER_NULL int MPI_Errhandler_free(MPI_Errhandler*) #----------------------------------------------------------------- enum: MPI_MAX_ERROR_STRING #:= 1 int MPI_Error_class(int, int*) int MPI_Error_string(int, char[], int*) int MPI_Add_error_class(int*) int MPI_Add_error_code(int,int*) int MPI_Add_error_string(int,char[]) # MPI-1 Error classes # ------------------- # Actually no errors enum: MPI_SUCCESS #:= 0 enum: MPI_ERR_LASTCODE #:= 1 # MPI-1 Objects enum: MPI_ERR_COMM #:= MPI_ERR_LASTCODE enum: MPI_ERR_GROUP #:= MPI_ERR_LASTCODE enum: MPI_ERR_TYPE #:= MPI_ERR_LASTCODE enum: MPI_ERR_REQUEST #:= MPI_ERR_LASTCODE enum: MPI_ERR_OP #:= MPI_ERR_LASTCODE # Communication argument parameters enum: MPI_ERR_BUFFER #:= MPI_ERR_LASTCODE enum: MPI_ERR_COUNT #:= MPI_ERR_LASTCODE enum: MPI_ERR_TAG #:= MPI_ERR_LASTCODE enum: MPI_ERR_RANK #:= MPI_ERR_LASTCODE enum: MPI_ERR_ROOT #:= MPI_ERR_LASTCODE enum: MPI_ERR_TRUNCATE #:= MPI_ERR_LASTCODE # Multiple completion enum: MPI_ERR_IN_STATUS #:= MPI_ERR_LASTCODE enum: MPI_ERR_PENDING #:= MPI_ERR_LASTCODE # Topology argument parameters enum: MPI_ERR_TOPOLOGY #:= MPI_ERR_LASTCODE enum: MPI_ERR_DIMS #:= MPI_ERR_LASTCODE # All other arguments, this is a class with many kinds enum: MPI_ERR_ARG #:= MPI_ERR_LASTCODE # Other errors that are not simply an invalid argument enum: MPI_ERR_OTHER #:= MPI_ERR_LASTCODE enum: MPI_ERR_UNKNOWN #:= MPI_ERR_LASTCODE enum: MPI_ERR_INTERN #:= MPI_ERR_LASTCODE # MPI-2 Error classes # ------------------- # Attributes enum: MPI_ERR_KEYVAL #:= MPI_ERR_ARG # Memory Allocation enum: MPI_ERR_NO_MEM #:= MPI_ERR_UNKNOWN # Info Object enum: MPI_ERR_INFO #:= MPI_ERR_ARG enum: MPI_ERR_INFO_KEY #:= MPI_ERR_UNKNOWN enum: MPI_ERR_INFO_VALUE #:= MPI_ERR_UNKNOWN enum: MPI_ERR_INFO_NOKEY #:= MPI_ERR_UNKNOWN # Dynamic Process Management enum: MPI_ERR_SPAWN #:= MPI_ERR_UNKNOWN enum: MPI_ERR_PORT #:= MPI_ERR_UNKNOWN enum: MPI_ERR_SERVICE #:= MPI_ERR_UNKNOWN enum: MPI_ERR_NAME #:= MPI_ERR_UNKNOWN # Input/Ouput enum: MPI_ERR_FILE #:= MPI_ERR_ARG enum: MPI_ERR_NOT_SAME #:= MPI_ERR_UNKNOWN enum: MPI_ERR_BAD_FILE #:= MPI_ERR_UNKNOWN enum: MPI_ERR_NO_SUCH_FILE #:= MPI_ERR_UNKNOWN enum: MPI_ERR_FILE_EXISTS #:= MPI_ERR_UNKNOWN enum: MPI_ERR_FILE_IN_USE #:= MPI_ERR_UNKNOWN enum: MPI_ERR_AMODE #:= MPI_ERR_UNKNOWN enum: MPI_ERR_ACCESS #:= MPI_ERR_UNKNOWN enum: MPI_ERR_READ_ONLY #:= MPI_ERR_UNKNOWN enum: MPI_ERR_NO_SPACE #:= MPI_ERR_UNKNOWN enum: MPI_ERR_QUOTA #:= MPI_ERR_UNKNOWN enum: MPI_ERR_UNSUPPORTED_DATAREP #:= MPI_ERR_UNKNOWN enum: MPI_ERR_UNSUPPORTED_OPERATION #:= MPI_ERR_UNKNOWN enum: MPI_ERR_CONVERSION #:= MPI_ERR_UNKNOWN enum: MPI_ERR_DUP_DATAREP #:= MPI_ERR_UNKNOWN enum: MPI_ERR_IO #:= MPI_ERR_UNKNOWN # One-Sided Communications enum: MPI_ERR_WIN #:= MPI_ERR_ARG enum: MPI_ERR_BASE #:= MPI_ERR_UNKNOWN enum: MPI_ERR_SIZE #:= MPI_ERR_UNKNOWN enum: MPI_ERR_DISP #:= MPI_ERR_UNKNOWN enum: MPI_ERR_ASSERT #:= MPI_ERR_UNKNOWN enum: MPI_ERR_LOCKTYPE #:= MPI_ERR_UNKNOWN enum: MPI_ERR_RMA_CONFLICT #:= MPI_ERR_UNKNOWN enum: MPI_ERR_RMA_SYNC #:= MPI_ERR_UNKNOWN enum: MPI_ERR_RMA_RANGE #:= MPI_ERR_UNKNOWN enum: MPI_ERR_RMA_ATTACH #:= MPI_ERR_UNKNOWN enum: MPI_ERR_RMA_SHARED #:= MPI_ERR_UNKNOWN enum: MPI_ERR_RMA_FLAVOR #:= MPI_ERR_UNKNOWN #----------------------------------------------------------------- int MPI_Alloc_mem(MPI_Aint, MPI_Info, void*) int MPI_Free_mem(void*) #----------------------------------------------------------------- int MPI_Init(int*, char**[]) int MPI_Finalize() int MPI_Initialized(int*) int MPI_Finalized(int*) enum: MPI_THREAD_SINGLE #:= 0 enum: MPI_THREAD_FUNNELED #:= 1 enum: MPI_THREAD_SERIALIZED #:= 2 enum: MPI_THREAD_MULTIPLE #:= 3 int MPI_Init_thread(int*, char**[], int, int*) int MPI_Query_thread(int*) int MPI_Is_thread_main(int*) #----------------------------------------------------------------- enum: MPI_VERSION #:= 1 enum: MPI_SUBVERSION #:= 0 int MPI_Get_version(int*, int*) enum: MPI_MAX_LIBRARY_VERSION_STRING #:= 1 int MPI_Get_library_version(char[], int*) enum: MPI_MAX_PROCESSOR_NAME #:= 1 int MPI_Get_processor_name(char[], int*) #----------------------------------------------------------------- double MPI_Wtime() double MPI_Wtick() int MPI_Pcontrol(int, ...) #----------------------------------------------------------------- # Fortran INTEGER ctypedef int MPI_Fint MPI_Fint* MPI_F_STATUS_IGNORE #:= 0 MPI_Fint* MPI_F_STATUSES_IGNORE #:= 0 int MPI_Status_c2f (MPI_Status*, MPI_Fint*) int MPI_Status_f2c (MPI_Fint*, MPI_Status*) # C -> Fortran MPI_Fint MPI_Type_c2f (MPI_Datatype) MPI_Fint MPI_Request_c2f (MPI_Request) MPI_Fint MPI_Message_c2f (MPI_Message) MPI_Fint MPI_Op_c2f (MPI_Op) MPI_Fint MPI_Info_c2f (MPI_Info) MPI_Fint MPI_Group_c2f (MPI_Group) MPI_Fint MPI_Comm_c2f (MPI_Comm) MPI_Fint MPI_Win_c2f (MPI_Win) MPI_Fint MPI_File_c2f (MPI_File) MPI_Fint MPI_Errhandler_c2f (MPI_Errhandler) # Fortran -> C MPI_Datatype MPI_Type_f2c (MPI_Fint) MPI_Request MPI_Request_f2c (MPI_Fint) MPI_Message MPI_Message_f2c (MPI_Fint) MPI_Op MPI_Op_f2c (MPI_Fint) MPI_Info MPI_Info_f2c (MPI_Fint) MPI_Group MPI_Group_f2c (MPI_Fint) MPI_Comm MPI_Comm_f2c (MPI_Fint) MPI_Win MPI_Win_f2c (MPI_Fint) MPI_File MPI_File_f2c (MPI_Fint) MPI_Errhandler MPI_Errhandler_f2c (MPI_Fint) ## ctypedef struct MPI_F08_status #:= MPI_Status ## MPI_F08_status* MPI_F08_STATUS_IGNORE #:= 0 ## MPI_F08_status* MPI_F08_STATUSES_IGNORE #:= 0 ## int MPI_Status_c2f08(MPI_Status*, MPI_F08_status*) ## int MPI_Status_f082c(MPI_F08_status*, MPI_Status*) ## int MPI_Status_f2f08(MPI_Fint*, MPI_F08_status*) ## int MPI_Status_f082f(MPI_F08_status*, MPI_Fint*) #----------------------------------------------------------------- mpi4py-3.1.6/src/mpi4py/py.typed000066400000000000000000000000331460670727200164620ustar00rootroot00000000000000# Marker file for PEP 561. mpi4py-3.1.6/src/mpi4py/run.py000066400000000000000000000215321460670727200161500ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Run Python code using ``mpi4py``. Run Python code (scripts, modules, zip files) using the ``runpy`` module. In case of an unhandled exception, abort execution of the MPI program by calling ``MPI.COMM_WORLD.Abort()``. """ def run_command_line(args=None): """Run command line ``[pyfile | -m mod | -c cmd | -] [arg] ...``. * ``pyfile`` : program read from script file * ``-m mod`` : run library module as a script * ``-c cmd`` : program passed in as a command string * ``-`` : program read from standard input (``sys.stdin``) * ``arg ...``: arguments passed to program in ``sys.argv[1:]`` """ # pylint: disable=import-outside-toplevel import sys from runpy import run_module, run_path def run_string(string, init_globals=None, run_name=None, filename='', argv0='-c'): # pylint: disable=missing-docstring from runpy import _run_module_code karg = 'script_name' if sys.version_info >= (3, 4) else 'mod_fname' code = compile(string, filename, 'exec', 0, 1) return _run_module_code(code, init_globals, run_name, **{karg: argv0}) sys.argv[:] = args if args is not None else sys.argv[1:] if sys.argv[0] == '-': cmd = sys.stdin.read() run_string(cmd, run_name='__main__', filename='', argv0='-') elif sys.argv[0] == '-c': cmd = sys.argv.pop(1) # Remove "cmd" from argument list run_string(cmd, run_name='__main__', filename='', argv0='-c') elif sys.argv[0] == '-m': del sys.argv[0] # Remove "-m" from argument list run_module(sys.argv[0], run_name='__main__', alter_sys=True) else: from os.path import realpath, dirname if not getattr(sys.flags, 'isolated', 0): # pragma: no branch sys.path[0] = realpath(dirname(sys.argv[0])) # Fix sys.path run_path(sys.argv[0], run_name='__main__') def set_abort_status(status): """Terminate MPI execution environment at Python exit. Terminate MPI execution environment at Python exit by calling ``MPI.COMM_WORLD.Abort(status)``. This function should be called within an ``except`` block. Afterwards, exceptions should be re-raised. """ # pylint: disable=import-outside-toplevel import sys status = (status if isinstance(status, int) else 0 if status is None else 1) pkg = __spec__.parent or __name__.rpartition('.')[0] mpi = sys.modules.get(pkg + '.MPI') if mpi is not None and status: # pylint: disable=protected-access mpi._set_abort_status(status) return sys.exc_info() def main(): """Entry-point for ``python -m mpi4py.run ...``.""" # pylint: disable=missing-docstring # pylint: disable=too-many-statements # pylint: disable=import-outside-toplevel import os import sys package = __spec__.parent def prefix(): prefix = os.path.dirname(__file__) print(prefix, file=sys.stdout) sys.exit(0) def version(): from . import __version__ print(package, __version__, file=sys.stdout) sys.exit(0) def mpi_std_version(): from . import rc rc.initialize = rc.finalize = False from . import MPI version = ".".join(map(str, (MPI.VERSION, MPI.SUBVERSION))) rtver = ".".join(map(str, MPI.Get_version())) note = " (runtime: MPI {0})".format(rtver) if rtver != version else "" print("MPI {0}{1}".format(version, note), file=sys.stdout) sys.exit(0) def mpi_lib_version(): from . import rc rc.initialize = rc.finalize = False from . import MPI library_version = MPI.Get_library_version() print(library_version, file=sys.stdout) sys.exit(0) def usage(errmess=None): from textwrap import dedent if __name__ == '__main__': prog_name = package + '.run' else: prog_name = package python_exe = os.path.basename(sys.executable) subs = dict(prog=prog_name, python=python_exe) cmdline = dedent(""" usage: {python} -m {prog} [options] [arg] ... or: {python} -m {prog} [options] -m [arg] ... or: {python} -m {prog} [options] -c [arg] ... or: {python} -m {prog} [options] - [arg] ... """).strip().format(**subs) helptip = dedent(""" Try `{python} -m {prog} -h` for more information. """).strip().format(**subs) options = dedent(""" options: --prefix show install path and exit --version show version number and exit --mpi-std-version show MPI standard version and exit --mpi-lib-version show MPI library version and exit -h|--help show this help message and exit -rc set 'mpi4py.rc.key=value' -p|--profile use for profiling --mpe profile with MPE --vt profile with VampirTrace """).strip() if errmess: print(errmess, file=sys.stderr) print(cmdline, file=sys.stderr) print(helptip, file=sys.stderr) sys.exit(1) else: print(cmdline, file=sys.stdout) print(options, file=sys.stdout) sys.exit(0) def parse_command_line(args=None): # pylint: disable=too-many-branches class Options: # pylint: disable=too-few-public-methods rc_args = {} profile = None def poparg(args): if len(args) < 2 or args[1].startswith('-'): usage('Argument expected for option: ' + args[0]) return args.pop(1) options = Options() args = sys.argv[1:] if args is None else args[:] while args and args[0].startswith('-'): if args[0] in ('-m', '-c', '-'): break # Stop processing options if args[0] in ('-h', '-help', '--help'): usage() # Print help and exit if args[0] in ('-prefix', '--prefix'): prefix() # Print install path and exit if args[0] in ('-version', '--version'): version() # Print version number and exit if args[0] in ('-mpi-std-version', '--mpi-std-version'): mpi_std_version() # Print MPI standard version and exit if args[0] in ('-mpi-lib-version', '--mpi-lib-version'): mpi_lib_version() # Print MPI library version and exit try: arg0 = args[0] if arg0.startswith('--'): if '=' in arg0: opt, _, arg = arg0[1:].partition('=') if opt in ('-rc', '-profile'): arg0, args[1:1] = opt, [arg] else: arg0 = arg0[1:] if arg0 == '-rc': for entry in poparg(args).split(','): key, _, val = entry.partition('=') if not key or not val: raise ValueError(entry) try: # pylint: disable=eval-used options.rc_args[key] = eval(val, {}) except NameError: options.rc_args[key] = val elif arg0 in ('-p', '-profile'): options.profile = poparg(args) or None elif arg0 in ('-mpe', '-vt'): options.profile = arg0[1:] else: usage('Unknown option: ' + args[0]) del args[0] except Exception: # pylint: disable=broad-except # Bad option, print usage and exit with error usage('Cannot parse option: ' + args[0]) # Check remaining args and return to caller if not args: usage("No path specified for execution") elif args[0] in ('-m', '-c') and len(args) < 2: usage("Argument expected for option: " + args[0]) return options, args def bootstrap(options): if options.rc_args: # Set mpi4py.rc parameters from . import rc rc(**options.rc_args) if options.profile: # Load profiling library from . import profile profile(options.profile) # Parse and process command line options options, args = parse_command_line() bootstrap(options) # Run user code. In case of an unhandled exception, abort # execution of the MPI program by calling 'MPI_Abort()'. try: run_command_line(args) except SystemExit as exc: set_abort_status(exc.code) raise except: set_abort_status(1) raise if __name__ == '__main__': main() mpi4py-3.1.6/src/mpi4py/run.pyi000066400000000000000000000003151460670727200163150ustar00rootroot00000000000000from typing import Any, Optional from typing import Sequence def run_command_line(args: Optional[Sequence[str]] = None) -> None: ... def set_abort_status(status: Any) -> None: ... def main() -> None: ... mpi4py-3.1.6/src/mpi4py/util/000077500000000000000000000000001460670727200157445ustar00rootroot00000000000000mpi4py-3.1.6/src/mpi4py/util/__init__.py000066400000000000000000000001271460670727200200550ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Miscellaneous utilities.""" mpi4py-3.1.6/src/mpi4py/util/__init__.pyi000066400000000000000000000000101460670727200202150ustar00rootroot00000000000000# empty mpi4py-3.1.6/src/mpi4py/util/dtlib.py000066400000000000000000000256231460670727200174240ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Convert NumPy and MPI datatypes.""" # pylint: disable=too-many-locals # pylint: disable=too-many-branches # pylint: disable=too-many-statements # pylint: disable=too-many-return-statements from .. import MPI try: from numpy import dtype as _np_dtype except ImportError: # pragma: no cover pass def _get_datatype(dtype): # pylint: disable=protected-access return MPI._typedict.get(dtype.char) def _get_typecode(datatype): # pylint: disable=protected-access return MPI._typecode(datatype) def _get_alignment_ctypes(typecode): # pylint: disable=protected-access # pylint: disable=import-outside-toplevel import ctypes as ct if typecode in ('p', 'n', 'P', 'N'): kind = 'i' if typecode in ('p', 'n') else 'u' size = ct.sizeof(ct.c_void_p) typecode = '{}{:d}'.format(kind, size) if typecode in ('F', 'D', 'G'): typecode = typecode.lower() if len(typecode) > 1: mapping = { 'b': (ct.c_bool,), 'i': (ct.c_int8, ct.c_int16, ct.c_int32, ct.c_int64), 'u': (ct.c_uint8, ct.c_uint16, ct.c_uint32, ct.c_uint64), 'f': (ct.c_float, ct.c_double, ct.c_longdouble), } kind, size = typecode[0], int(typecode[1:]) if kind == 'c': kind, size = 'f', size // 2 for c_type in mapping[kind]: if ct.sizeof(c_type) == size: typecode = c_type._type_ c_type_base = ct._SimpleCData c_type = type('c_type', (c_type_base,), dict(_type_=typecode)) fields = [('base', ct.c_char), ('c_type', c_type)] struct = type('S', (ct.Structure,), dict(_fields_=fields)) return struct.c_type.offset # pylint: disable=no-member def _get_alignment(datatype): typecode = _get_typecode(datatype) if typecode is None: combiner = datatype.combiner combiner_f90 = ( MPI.COMBINER_F90_INTEGER, MPI.COMBINER_F90_REAL, MPI.COMBINER_F90_COMPLEX, ) if combiner in combiner_f90: typesize = datatype.Get_size() typekind = 'ifc'[combiner_f90.index(combiner)] typecode = '{0}{1:d}'.format(typekind, typesize) if typecode is None: # pylint: disable=import-outside-toplevel from struct import calcsize alignment = datatype.Get_size() return min(max(1, alignment), calcsize('P')) try: return _np_dtype(typecode).alignment except NameError: # pragma: no cover return _get_alignment_ctypes(typecode) def _is_aligned(datatype, offset=0): """Dermine whether an MPI datatype is aligned.""" if datatype.is_predefined: if offset == 0: return True alignment = _get_alignment(datatype) return offset % alignment == 0 combiner = datatype.combiner basetype, _, info = datatype.decode() types, disps = [basetype], [0] try: if combiner == MPI.COMBINER_RESIZED: disps = [info['extent']] if combiner == MPI.COMBINER_STRUCT: types = info['datatypes'] disps = info['displacements'] if combiner == MPI.COMBINER_HVECTOR: disps = [info['stride'] if info['count'] > 1 else 0] if combiner == MPI.COMBINER_HINDEXED: disps = info['displacements'] if combiner == MPI.COMBINER_HINDEXED_BLOCK: disps = info['displacements'] return all( _is_aligned(t, offset + d) for t, d in zip(types, disps) ) finally: for _tp in types: if not _tp.is_predefined: _tp.Free() def from_numpy_dtype(dtype): """Convert NumPy datatype to MPI datatype.""" try: dtype = _np_dtype(dtype) except NameError: # pragma: no cover # pylint: disable=raise-missing-from raise RuntimeError("NumPy is not available") if dtype.hasobject: raise ValueError("NumPy datatype with object entries") if not dtype.isnative: raise ValueError("NumPy datatype with non-native byteorder") # struct data type fields = dtype.fields if fields: blocklengths = [] displacements = [] datatypes = [] try: for name in dtype.names: ftype, fdisp = fields[name] blocklengths.append(1) displacements.append(fdisp) datatypes.append(from_numpy_dtype(ftype)) datatype = MPI.Datatype.Create_struct( blocklengths, displacements, datatypes, ) finally: for mtp in datatypes: mtp.Free() try: return datatype.Create_resized(0, dtype.itemsize) finally: datatype.Free() # subarray data type subdtype = dtype.subdtype if subdtype: base, shape = subdtype datatype = from_numpy_dtype(base) try: if len(shape) == 1: return datatype.Create_contiguous(shape[0]) starts = (0,) * len(shape) return datatype.Create_subarray(shape, shape, starts) finally: datatype.Free() # elementary data type datatype = _get_datatype(dtype) if datatype is None: raise ValueError("cannot convert NumPy datatype to MPI") return datatype.Dup() def to_numpy_dtype(datatype): """Convert MPI datatype to NumPy datatype.""" def mpi2npy(datatype, count): dtype = to_numpy_dtype(datatype) return dtype if count == 1 else (dtype, count) def np_dtype(spec): try: return _np_dtype(spec) except NameError: # pragma: no cover return spec if datatype == MPI.DATATYPE_NULL: raise ValueError("cannot convert null MPI datatype to NumPy") combiner = datatype.combiner # predefined datatype if combiner == MPI.COMBINER_NAMED: typecode = _get_typecode(datatype) if typecode is not None: return np_dtype(typecode) raise ValueError("cannot convert MPI datatype to NumPy") # user-defined datatype basetype, _, info = datatype.decode() datatypes = [basetype] try: # duplicated datatype if combiner == MPI.COMBINER_DUP: return to_numpy_dtype(basetype) # contiguous datatype if combiner == MPI.COMBINER_CONTIGUOUS: dtype = to_numpy_dtype(basetype) count = info['count'] return np_dtype((dtype, (count,))) # subarray datatype if combiner == MPI.COMBINER_SUBARRAY: dtype = to_numpy_dtype(basetype) sizes = info['sizes'] subsizes = info['subsizes'] starts = info['starts'] order = info['order'] assert subsizes == sizes assert min(starts) == max(starts) == 0 if order == MPI.ORDER_FORTRAN: sizes = sizes[::-1] return np_dtype((dtype, tuple(sizes))) # struct datatype aligned = True if combiner == MPI.COMBINER_RESIZED: if basetype.combiner == MPI.COMBINER_STRUCT: aligned = _is_aligned(basetype, info['extent']) combiner = MPI.COMBINER_STRUCT _, _, info = basetype.decode() datatypes.pop().Free() if combiner == MPI.COMBINER_STRUCT: datatypes = info['datatypes'] blocklengths = info['blocklengths'] displacements = info['displacements'] names = list(map('f{}'.format, range(len(datatypes)))) formats = list(map(mpi2npy, datatypes, blocklengths)) offsets = displacements itemsize = datatype.extent aligned &= all(map(_is_aligned, datatypes, offsets)) return np_dtype( { 'names': names, 'formats': formats, 'offsets': offsets, 'itemsize': itemsize, 'aligned': aligned, } ) # vector datatype combiner_vector = ( MPI.COMBINER_VECTOR, MPI.COMBINER_HVECTOR, ) if combiner in combiner_vector: dtype = to_numpy_dtype(basetype) count = info['count'] blocklength = info['blocklength'] stride = info['stride'] if combiner == MPI.COMBINER_VECTOR: stride *= basetype.extent aligned = _is_aligned(basetype) if combiner == MPI.COMBINER_HVECTOR: stride = stride if count > 1 else 0 aligned = _is_aligned(basetype, stride) names = list(map('f{0}'.format, range(count))) formats = [(dtype, (blocklength,))] * count offsets = [stride * i for i in range(count)] itemsize = datatype.extent return np_dtype( { 'names': names, 'formats': formats, 'offsets': offsets, 'itemsize': itemsize, 'aligned': aligned, } ) # indexed datatype combiner_indexed = ( MPI.COMBINER_INDEXED, MPI.COMBINER_HINDEXED, MPI.COMBINER_INDEXED_BLOCK, MPI.COMBINER_HINDEXED_BLOCK, ) if combiner in combiner_indexed: dtype = to_numpy_dtype(basetype) stride = 1 aligned = _is_aligned(basetype) displacements = info['displacements'] if combiner in combiner_indexed[:2]: blocklengths = info['blocklengths'] if combiner in combiner_indexed[2:]: blocklengths = [info['blocklength']] * len(displacements) if combiner in combiner_indexed[0::2]: stride = basetype.extent if combiner in combiner_indexed[1::2]: aligned &= all(_is_aligned(basetype, d) for d in displacements) names = list(map('f{}'.format, range(len(displacements)))) formats = [(dtype, (blen,)) for blen in blocklengths] offsets = [disp * stride for disp in displacements] return np_dtype( { 'names': names, 'formats': formats, 'offsets': offsets, 'aligned': aligned, } ) # Fortran 90 datatype combiner_f90 = ( MPI.COMBINER_F90_INTEGER, MPI.COMBINER_F90_REAL, MPI.COMBINER_F90_COMPLEX, ) if combiner in combiner_f90: datatypes.pop() typesize = datatype.Get_size() typecode = 'ifc'[combiner_f90.index(combiner)] return np_dtype('{0}{1:d}'.format(typecode, typesize)) raise ValueError("cannot convert MPI datatype to NumPy") finally: for _tp in datatypes: if not _tp.is_predefined: _tp.Free() mpi4py-3.1.6/src/mpi4py/util/dtlib.pyi000066400000000000000000000003471460670727200175710ustar00rootroot00000000000000from __future__ import annotations from ..MPI import Datatype from numpy import dtype from numpy.typing import DTypeLike def from_numpy_dtype(dtype: DTypeLike) -> Datatype: ... def to_numpy_dtype(datatype: Datatype) -> dtype: ... mpi4py-3.1.6/src/mpi4py/util/pkl5.py000066400000000000000000000436451460670727200172050ustar00rootroot00000000000000# Author: Lisandro Dalcin # Contact: dalcinl@gmail.com """Pickle-based communication using protocol 5.""" import os as _os import sys as _sys import struct as _struct from .. import MPI from ..MPI import ( PROC_NULL, ANY_SOURCE, ANY_TAG, Status, ) from ..MPI import ( _typedict, _comm_lock, _commctx_inter, memory as _memory, Pickle as _Pickle, ) if _sys.version_info >= (3, 8): from pickle import ( dumps as _dumps, loads as _loads, HIGHEST_PROTOCOL as _PROTOCOL, ) else: # pragma: no cover try: from pickle5 import ( dumps as _dumps, loads as _loads, HIGHEST_PROTOCOL as _PROTOCOL, ) except ImportError: _PROTOCOL = MPI.Pickle().PROTOCOL def _dumps(obj, *_p, **_kw): return MPI.pickle.dumps(obj) def _loads(buf, *_p, **_kw): return MPI.pickle.loads(buf) def _buffer_handler(protocol, threshold): bufs = [] if protocol is None or protocol < 0: protocol = _PROTOCOL if protocol < 5: return bufs, None buffer_len = len buffer_raw = _memory buffer_add = bufs.append def buf_cb(buf): buf = buffer_raw(buf) if buffer_len(buf) >= threshold: buffer_add(buf) return False return True return bufs, buf_cb def _get_threshold(default): varname = 'MPI4PY_PICKLE_THRESHOLD' return int(_os.environ.get(varname, default)) class Pickle(_Pickle): """Pickle/unpickle Python objects using out-of-band buffers.""" THRESHOLD = _get_threshold(1024**2 // 4) # 0.25 MiB def __init__(self, dumps=_dumps, loads=_loads, protocol=_PROTOCOL): """Initialize pickle context.""" # pylint: disable=useless-super-delegation super().__init__(dumps, loads, protocol) def dumps(self, obj): """Serialize object to data and out-of-band buffers.""" bufs, buf_cb = _buffer_handler(self.PROTOCOL, self.THRESHOLD) data = super().dumps(obj, buf_cb) return data, bufs def loads(self, data, bufs): """Deserialize object from data and out-of-band buffers.""" # pylint: disable=useless-super-delegation return super().loads(data, bufs) pickle = Pickle() def _bigmpi_create_type(basetype, count, blocksize): qsize, rsize = divmod(count, blocksize) qtype = basetype.Create_vector( qsize, blocksize, blocksize) rtype = basetype.Create_contiguous(rsize) rdisp = qtype.Get_extent()[1] bigtype = MPI.Datatype.Create_struct( (1, 1), (0, rdisp), (qtype, rtype)) qtype.Free() rtype.Free() return bigtype class _BigMPI: """Support for large message counts.""" blocksize = 1024**3 # 1 GiB def __init__(self): self.cache = {} def __enter__(self): return self def __exit__(self, *exc): cache = self.cache for dtype in cache.values(): dtype.Free() cache.clear() def __call__(self, buf): buf = _memory(buf) count = len(buf) blocksize = self.blocksize if count < blocksize: return (buf, count, MPI.BYTE) cache = self.cache dtype = cache.get(count) if dtype is not None: return (buf, 1, dtype) dtype = _bigmpi_create_type(MPI.BYTE, count, blocksize) cache[count] = dtype.Commit() return (buf, 1, dtype) _bigmpi = _BigMPI() def _info_typecode(): return 'q' def _info_datatype(): code = _info_typecode() return _typedict[code] def _info_pack(info): code = _info_typecode() size = len(info) sfmt = "{0}{1}".format(size, code) return _struct.pack(sfmt, *info) def _info_alloc(size): code = _info_typecode() itemsize = _struct.calcsize(code) return bytearray(size * itemsize) def _info_unpack(info): code = _info_typecode() itemsize = _struct.calcsize(code) size = len(info) // itemsize sfmt = "{0}{1}".format(size, code) return _struct.unpack(sfmt, info) def _new_buffer(size): return MPI.memory.allocate(size) def _send_raw(comm, send, data, bufs, dest, tag): # pylint: disable=too-many-arguments info = [len(data)] info.extend(len(_memory(sbuf)) for sbuf in bufs) infotype = _info_datatype() info = _info_pack(info) send(comm, (info, infotype), dest, tag) with _bigmpi as bigmpi: send(comm, bigmpi(data), dest, tag) for sbuf in bufs: send(comm, bigmpi(sbuf), dest, tag) def _send(comm, send, obj, dest, tag): if dest == PROC_NULL: send(comm, (None, 0, MPI.BYTE), dest, tag) return data, bufs = pickle.dumps(obj) with _comm_lock(comm, 'send'): _send_raw(comm, send, data, bufs, dest, tag) def _isend(comm, isend, obj, dest, tag): sreqs = [] def send(comm, buf, dest, tag): sreqs.append(isend(comm, buf, dest, tag)) _send(comm, send, obj, dest, tag) request = Request(sreqs) return request def _recv_raw(comm, recv, buf, source, tag, status=None): # pylint: disable=too-many-arguments if status is None: status = Status() MPI.Comm.Probe(comm, source, tag, status) source = status.Get_source() tag = status.Get_tag() infotype = _info_datatype() infosize = status.Get_elements(infotype) info = _info_alloc(infosize) MPI.Comm.Recv(comm, (info, infotype), source, tag, status) info = _info_unpack(info) if buf is not None: buf = _memory.frombuffer(buf) if len(buf) > info[0]: buf = buf[:info[0]] if len(buf) < info[0]: buf = None data = _new_buffer(info[0]) if buf is None else buf bufs = list(map(_new_buffer, info[1:])) with _bigmpi as bigmpi: recv(comm, bigmpi(data), source, tag) for rbuf in bufs: recv(comm, bigmpi(rbuf), source, tag) status.Set_elements(MPI.BYTE, sum(info)) return data, bufs def _recv(comm, recv, buf, source, tag, status): # pylint: disable=too-many-arguments if source == PROC_NULL: recv(comm, (None, 0, MPI.BYTE), source, tag, status) return None with _comm_lock(comm, 'recv'): data, bufs = _recv_raw(comm, recv, buf, source, tag, status) return pickle.loads(data, bufs) def _mprobe(comm, mprobe, source, tag, status): if source == PROC_NULL: rmsg = MPI.Comm.Mprobe(comm, source, tag, status) return Message([rmsg]) if status is None: status = Status() with _comm_lock(comm, 'recv'): message = [] numbytes = 0 rmsg = mprobe(comm, source, tag, status) if rmsg is None: return None message.append(rmsg) source = status.Get_source() tag = status.Get_tag() infotype = _info_datatype() infosize = status.Get_elements(infotype) for _ in range(infosize): rmsg = MPI.Comm.Mprobe(comm, source, tag, status) message.append(rmsg) numbytes += status.Get_elements(MPI.BYTE) status.Set_elements(MPI.BYTE, numbytes) return Message(message) def _mrecv_info(rmsg, size, status=None): mrecv = MPI.Message.Recv infotype = _info_datatype() info = _info_alloc(size) mrecv(rmsg, (info, infotype), status) info = _info_unpack(info) return info def _mrecv_none(rmsg, mrecv, status): _mrecv_info(rmsg, 0, status) noproc = MPI.MESSAGE_NO_PROC mrecv(noproc, (None, 0, MPI.BYTE)) data, bufs = pickle.dumps(None) return (bytearray(data), bufs) def _mrecv_data(message, mrecv, status=None): if message[0] == MPI.MESSAGE_NO_PROC: rmsg = message[0] return _mrecv_none(rmsg, mrecv, status) rmsg = iter(message) icnt = len(message) - 1 info = _mrecv_info(next(rmsg), icnt, status) data = _new_buffer(info[0]) bufs = list(map(_new_buffer, info[1:])) with _bigmpi as bigmpi: mrecv(next(rmsg), bigmpi(data)) for rbuf in bufs: mrecv(next(rmsg), bigmpi(rbuf)) if status is not None: status.Set_elements(MPI.BYTE, sum(info)) return (data, bufs) def _mrecv(message, status): def mrecv(rmsg, buf): MPI.Message.Recv(rmsg, buf) data, bufs = _mrecv_data(message, mrecv, status) return pickle.loads(data, bufs) def _imrecv(message): rreqs = [] def mrecv(rmsg, buf): rreqs.append(MPI.Message.Irecv(rmsg, buf)) data, bufs = _mrecv_data(message, mrecv) request = Request(rreqs) setattr(request, '_data_bufs', (data, bufs)) return request def _req_load(request): data_bufs = getattr(request, '_data_bufs', None) if request == MPI.REQUEST_NULL and data_bufs is not None: delattr(request, '_data_bufs') if data_bufs is not None: data, bufs = data_bufs obj = pickle.loads(data, bufs) return obj return None def _test(request, test, status): statuses = None if status is None else [status] flag = test(request, statuses) if flag: obj = _req_load(request) return (flag, obj) return (flag, None) def _testall(requests, testall, statuses): if isinstance(statuses, list): for _ in range(len(requests) - len(statuses)): statuses.append(Status()) reqarray = [] stsarray = None for req in requests: reqarray.extend(req) if statuses is not None: stsarray = [] for req, sts in zip(requests, statuses): stsarray.extend([sts] * len(req)) flag = testall(reqarray, stsarray) if flag: objs = [_req_load(req) for req in requests] return (flag, objs) return (flag, None) def _bcast_intra_raw(comm, bcast, data, bufs, root): rank = comm.Get_rank() if rank == root: info = [len(data)] info.extend(len(_memory(sbuf)) for sbuf in bufs) infotype = _info_datatype() infosize = _info_pack([len(info)]) bcast(comm, (infosize, infotype), root) info = _info_pack(info) bcast(comm, (info, infotype), root) else: infotype = _info_datatype() infosize = _info_alloc(1) bcast(comm, (infosize, infotype), root) infosize = _info_unpack(infosize)[0] info = _info_alloc(infosize) bcast(comm, (info, infotype), root) info = _info_unpack(info) data = _new_buffer(info[0]) bufs = list(map(_new_buffer, info[1:])) with _bigmpi as bigmpi: bcast(comm, bigmpi(data), root) for rbuf in bufs: bcast(comm, bigmpi(rbuf), root) return data, bufs def _bcast_intra(comm, bcast, obj, root): rank = comm.Get_rank() if rank == root: data, bufs = pickle.dumps(obj) else: data, bufs = pickle.dumps(None) with _comm_lock(comm, 'bcast'): data, bufs = _bcast_intra_raw(comm, bcast, data, bufs, root) return pickle.loads(data, bufs) def _bcast_inter(comm, bcast, obj, root): rank = comm.Get_rank() size = comm.Get_remote_size() comm, tag, localcomm, _ = _commctx_inter(comm) if root == MPI.PROC_NULL: return None elif root == MPI.ROOT: send = MPI.Comm.Send data, bufs = pickle.dumps(obj) _send_raw(comm, send, data, bufs, 0, tag) return None elif 0 <= root < size: if rank == 0: recv = MPI.Comm.Recv data, bufs = _recv_raw(comm, recv, None, root, tag) else: data, bufs = pickle.dumps(None) with _comm_lock(localcomm, 'bcast'): data, bufs = _bcast_intra_raw(localcomm, bcast, data, bufs, 0) return pickle.loads(data, bufs) comm.Call_errhandler(MPI.ERR_ROOT) raise MPI.Exception(MPI.ERR_ROOT) def _bcast(comm, bcast, obj, root): if comm.Is_inter(): return _bcast_inter(comm, bcast, obj, root) else: return _bcast_intra(comm, bcast, obj, root) class Request(tuple): """Request.""" def __new__(cls, request=None): """Create and return a new object.""" if request is None: request = (MPI.REQUEST_NULL,) if isinstance(request, MPI.Request): request = (request,) return super().__new__(cls, request) def __eq__(self, other): """Return ``self==other``.""" if isinstance(other, Request): return tuple(self) == tuple(other) if isinstance(other, MPI.Request): return all(req == other for req in self) return NotImplemented def __ne__(self, other): """Return ``self!=other``.""" if isinstance(other, Request): return tuple(self) != tuple(other) if isinstance(other, MPI.Request): return any(req != other for req in self) return NotImplemented def __bool__(self): """Return ``bool(self)``.""" return any(req for req in self) def Free(self) -> None: """Free a communication request.""" # pylint: disable=invalid-name for req in self: req.Free() def cancel(self): """Cancel a communication request.""" # pylint: disable=invalid-name for req in self: req.Cancel() def get_status(self, status=None): """Non-destructive test for the completion of a request.""" # pylint: disable=invalid-name statuses = [status] + [None] * max(len(self) - 1, 0) return all(map(MPI.Request.Get_status, self, statuses)) def test(self, status=None): """Test for the completion of a request.""" return _test(self, MPI.Request.Testall, status) def wait(self, status=None): """Wait for a request to complete.""" return _test(self, MPI.Request.Waitall, status)[1] @classmethod def testall(cls, requests, statuses=None): """Test for the completion of all requests.""" return _testall(requests, MPI.Request.Testall, statuses) @classmethod def waitall(cls, requests, statuses=None): """Wait for all requests to complete.""" return _testall(requests, MPI.Request.Waitall, statuses)[1] class Message(tuple): """Message.""" def __new__(cls, message=None): """Create and return a new object.""" if message is None: message = (MPI.MESSAGE_NULL,) if isinstance(message, MPI.Message): message = (message,) return super().__new__(cls, message) def __eq__(self, other): """Return ``self==other``.""" if isinstance(other, Message): return tuple(self) == tuple(other) if isinstance(other, MPI.Message): return all(msg == other for msg in self) return NotImplemented def __ne__(self, other): """Return ``self!=other``.""" if isinstance(other, Message): return tuple(self) != tuple(other) if isinstance(other, MPI.Message): return any(msg != other for msg in self) return NotImplemented def __bool__(self): """Return ``bool(self)``.""" return any(msg for msg in self) def recv(self, status=None): """Blocking receive of matched message.""" return _mrecv(self, status) def irecv(self): """Nonblocking receive of matched message.""" return _imrecv(self) @classmethod def probe(cls, comm, source=ANY_SOURCE, tag=ANY_TAG, status=None): """Blocking test for a matched message.""" return _mprobe(comm, MPI.Comm.Mprobe, source, tag, status) @classmethod def iprobe(cls, comm, source=ANY_SOURCE, tag=ANY_TAG, status=None): """Nonblocking test for a matched message.""" return _mprobe(comm, MPI.Comm.Improbe, source, tag, status) class Comm(MPI.Comm): """Communicator.""" def send(self, obj, dest, tag=0): """Blocking send in standard mode.""" _send(self, MPI.Comm.Send, obj, dest, tag) def bsend(self, obj, dest, tag=0): """Blocking send in buffered mode.""" _send(self, MPI.Comm.Bsend, obj, dest, tag) def ssend(self, obj, dest, tag=0): """Blocking send in synchronous mode.""" sreq = _isend(self, MPI.Comm.Issend, obj, dest, tag) MPI.Request.Waitall(sreq) def isend(self, obj, dest, tag=0): """Nonblocking send in standard mode.""" return _isend(self, MPI.Comm.Isend, obj, dest, tag) def ibsend(self, obj, dest, tag=0): """Nonblocking send in buffered mode.""" return _isend(self, MPI.Comm.Ibsend, obj, dest, tag) def issend(self, obj, dest, tag=0): """Nonblocking send in synchronous mode.""" return _isend(self, MPI.Comm.Issend, obj, dest, tag) def recv(self, buf=None, source=ANY_SOURCE, tag=ANY_TAG, status=None): """Blocking receive.""" return _recv(self, MPI.Comm.Recv, buf, source, tag, status) def irecv(self, buf=None, source=ANY_SOURCE, tag=ANY_TAG): """Nonblocking receive.""" raise RuntimeError("unsupported") def sendrecv(self, sendobj, dest, sendtag=0, recvbuf=None, source=ANY_SOURCE, recvtag=ANY_TAG, status=None): """Send and receive.""" # pylint: disable=too-many-arguments sreq = _isend(self, MPI.Comm.Isend, sendobj, dest, sendtag) robj = _recv(self, MPI.Comm.Recv, recvbuf, source, recvtag, status) MPI.Request.Waitall(sreq) return robj def mprobe(self, source=ANY_SOURCE, tag=ANY_TAG, status=None): """Blocking test for a matched message.""" return _mprobe(self, MPI.Comm.Mprobe, source, tag, status) def improbe(self, source=ANY_SOURCE, tag=ANY_TAG, status=None): """Nonblocking test for a matched message.""" return _mprobe(self, MPI.Comm.Improbe, source, tag, status) def bcast(self, obj, root=0): """Broadcast.""" return _bcast(self, MPI.Comm.Bcast, obj, root) class Intracomm(Comm, MPI.Intracomm): """Intracommunicator.""" class Intercomm(Comm, MPI.Intercomm): """Intercommunicator.""" mpi4py-3.1.6/src/mpi4py/util/pkl5.pyi000066400000000000000000000105251460670727200173450ustar00rootroot00000000000000from __future__ import annotations from .. import MPI from ..MPI import PROC_NULL, ANY_SOURCE, ANY_TAG from ..MPI import Status, Datatype from typing import Any, Literal, Optional from typing import Callable, Iterable, Sequence from typing import Dict, List, Tuple from typing import overload Buffer = Any class Pickle: PROTOCOL: int = ... THRESHOLD: int = ... def __init__(self, dumps: Callable[[Any, int], bytes] = ..., loads: Callable[[Buffer], Any] = ..., protocol: Optional[int] = ..., ) -> None: ... def dumps(self, obj: Any) -> Tuple[bytes, List[Buffer]]: ... def loads(self, data: Buffer, bufs: Iterable[Buffer]): ... pickle: Pickle = ... class _BigMPI: blocksize: int = ... cache: Dict[int, Datatype] = ... def __init__(self) -> None: ... def __enter__(self) -> _BigMPI: ... def __exit__(self, *exc: Any) -> None: ... def __call__(self, buf: Buffer) -> Tuple[Buffer, int, Datatype]: ... _bigmpi: _BigMPI = ... class Request(Tuple[MPI.Request, ...]): @overload def __new__(cls, request: Optional[MPI.Request] = None) -> Request: ... @overload def __new__(cls, request: Iterable[MPI.Request]) -> Request: ... def __eq__(self, other: Any) -> bool: ... def __ne__(self, other: Any) -> bool: ... def __bool__(self) -> bool: ... def Free(self) -> None: ... def cancel(self) -> None: ... def get_status(self, status: Optional[Status] = None) -> bool: ... def test(self, status: Optional[Status] = None) -> Tuple[bool, Optional[Any]]: ... def wait(self, status: Optional[Status] = None) -> Any: ... @classmethod def testall(cls, requests: Sequence[Request], statuses: Optional[List[Status]] = None) -> Tuple[bool, Optional[List[Any]]]: ... @classmethod def waitall(cls, requests: Sequence[Request], statuses: Optional[List[Status]] = None) -> List[Any]: ... class Message(Tuple[MPI.Message, ...]): @overload def __new__(cls, message: Optional[MPI.Message] = None) -> Message: ... @overload def __new__(cls, message: Iterable[MPI.Message]) -> Message: ... def __eq__(self, other: Any) -> bool: ... def __ne__(self, other: Any) -> bool: ... def __bool__(self) -> bool: ... def recv(self, status: Optional[Status] = None) -> Any: ... def irecv(self) -> Request: ... @classmethod def probe( cls, comm: MPI.Comm, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Optional[Status] = None, ) -> Message: ... @classmethod def iprobe( cls, comm: MPI.Comm, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Optional[Status] = None, ) -> Optional[Message]: ... class Comm(MPI.Comm): def send(self, obj: Any, dest: int, tag: int = 0) -> None: ... def bsend(self, obj: Any, dest: int, tag: int = 0) -> None: ... def ssend(self, obj: Any, dest: int, tag: int = 0) -> None: ... def isend(self, obj: Any, dest: int, tag: int = 0) -> Request: ... # type: ignore[override] def ibsend(self, obj: Any, dest: int, tag: int = 0) -> Request: ... # type: ignore[override] def issend(self, obj: Any, dest: int, tag: int = 0) -> Request: ... # type: ignore[override] def recv( self, buf: Optional[Buffer] = None, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Optional[Status] = None, ) -> Any: ... def irecv( # type: ignore[override] self, buf: Optional[Buffer] = None, source: int = ANY_SOURCE, tag: int = ANY_TAG, ) -> Request: ... def sendrecv( self, sendobj: Any, dest: int, sendtag: int = 0, recvbuf: Optional[Buffer] = None, source: int = ANY_SOURCE, recvtag: int = ANY_TAG, status: Optional[Status] = None, ) -> Any: ... def mprobe( # type: ignore[override] self, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Optional[Status] = None, ) -> Message: ... def improbe( # type: ignore[override] self, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Optional[Status] = None, ) -> Optional[Message]: ... def bcast( self, obj: Any, root: int = 0, ) -> Any: ... class Intracomm(Comm, MPI.Intracomm): ... class Intercomm(Comm, MPI.Intercomm): ... mpi4py-3.1.6/src/msvcfix.h000066400000000000000000000006641460670727200154030ustar00rootroot00000000000000#if defined(_MSC_VER) #if _MSC_VER <= 1600 #define _Out_writes_( x ) #endif #if _MSC_VER <= 1500 #pragma include_alias( , ) typedef signed __int8 int8_t; typedef unsigned __int8 uint8_t; typedef signed __int16 int16_t; typedef unsigned __int16 uint16_t; typedef signed __int32 int32_t; typedef unsigned __int32 uint32_t; typedef signed __int64 int64_t; typedef unsigned __int64 uint64_t; #endif #endif mpi4py-3.1.6/src/pycompat.h000066400000000000000000000053441460670727200155600ustar00rootroot00000000000000/* Author: Lisandro Dalcin */ /* Contact: dalcinl@gmail.com */ /* ------------------------------------------------------------------------- */ #ifdef PYPY_VERSION #ifndef PyByteArray_Check #define PyByteArray_Check(self) PyObject_TypeCheck(self, &PyByteArray_Type) #endif #ifndef PyByteArray_AsString static char* PyByteArray_AsString(PyObject* o) { PyErr_SetString(PyExc_RuntimeError, "PyPy: PyByteArray_AsString() not available"); (void)o; return NULL; } #endif #ifndef PyByteArray_Size static Py_ssize_t PyByteArray_Size(PyObject* o) { PyErr_SetString(PyExc_RuntimeError, "PyPy: PyByteArray_Size() not available"); (void)o; return -1; } #endif #endif/*PYPY_VERSION*/ /* ------------------------------------------------------------------------- */ /* Legacy Python 2 buffer interface */ static int _Py2_IsBuffer(PyObject *obj) { #if PY_VERSION_HEX < 0x03000000 return PyObject_CheckReadBuffer(obj); #else (void)obj; return 0; #endif } static int _Py2_AsBuffer(PyObject *obj, int *readonly, void **buf, Py_ssize_t *size) { #if defined(PYPY_VERSION) || PY_VERSION_HEX < 0x03000000 const void **rbuf = (const void**)buf; if (!PyObject_AsWriteBuffer(obj, buf, size)) {*readonly = 0; return 0;} PyErr_Clear(); if (!PyObject_AsReadBuffer(obj, rbuf, size)) {*readonly = 1; return 0;} PyErr_Clear(); PyErr_SetString(PyExc_TypeError, "expected a buffer object"); return -1; #else (void)obj; (void)readonly; (void)buf; (void)size; PyErr_SetString(PyExc_SystemError, "Legacy buffer interface not available in Python 3"); return -1; #endif } /* ------------------------------------------------------------------------- */ #ifdef PYPY_VERSION #if PY_VERSION_HEX < 0x03030000 #ifdef PySlice_GetIndicesEx #undef PySlice_GetIndicesEx #define PySlice_GetIndicesEx(s, n, start, stop, step, length) \ PyPySlice_GetIndicesEx((PySliceObject *)(s), n, start, stop, step, length) #else #define PySlice_GetIndicesEx(s, n, start, stop, step, length) \ PySlice_GetIndicesEx((PySliceObject *)(s), n, start, stop, step, length) #endif #endif #else #if PY_VERSION_HEX < 0x03020000 #define PySlice_GetIndicesEx(s, n, start, stop, step, length) \ PySlice_GetIndicesEx((PySliceObject *)(s), n, start, stop, step, length) #endif #endif /* ------------------------------------------------------------------------- */ #ifdef PYPY_VERSION #ifndef Py_IgnoreEnvironmentFlag #define Py_IgnoreEnvironmentFlag 0 #endif #ifndef Py_GETENV #define Py_GETENV(s) (Py_IgnoreEnvironmentFlag ? NULL : getenv(s)) #endif #endif/*PYPY_VERSION*/ /* ------------------------------------------------------------------------- */ /* Local variables: c-basic-offset: 2 indent-tabs-mode: nil End: */ mpi4py-3.1.6/src/pympicommctx.h000066400000000000000000000125151460670727200164530ustar00rootroot00000000000000/* Author: Lisandro Dalcin */ /* Contact: dalcinl@gmail.com */ #include #ifndef PyMPI_MALLOC #define PyMPI_MALLOC malloc #endif #ifndef PyMPI_FREE #define PyMPI_FREE free #endif #ifndef MPIAPI #define MPIAPI #endif #undef CHKERR #define CHKERR(ierr) do { if (ierr != MPI_SUCCESS) return ierr; } while(0) typedef struct { MPI_Comm dupcomm; MPI_Comm localcomm; int tag; int low_group; } PyMPI_Commctx; static int PyMPI_Commctx_KEYVAL = MPI_KEYVAL_INVALID; static int PyMPI_Commctx_TAG_UB = -1; static int PyMPI_Commctx_new(PyMPI_Commctx **_commctx) { PyMPI_Commctx *commctx; if (PyMPI_Commctx_TAG_UB < 0) { int ierr, *attrval = NULL, flag = 0; ierr = MPI_Comm_get_attr(MPI_COMM_WORLD, MPI_TAG_UB, &attrval, &flag); CHKERR(ierr); PyMPI_Commctx_TAG_UB = (flag && attrval) ? *attrval : 32767; } commctx = (PyMPI_Commctx *)PyMPI_MALLOC(sizeof(PyMPI_Commctx)); if (commctx) { commctx->dupcomm = MPI_COMM_NULL; commctx->localcomm = MPI_COMM_NULL; commctx->tag = 0; commctx->low_group = -1; } *_commctx = commctx; return MPI_SUCCESS; } static int MPIAPI PyMPI_Commctx_free_fn(MPI_Comm comm, int k, void *v, void *xs) { int ierr, finalized = 1; PyMPI_Commctx *commctx = (PyMPI_Commctx *)v; (void)comm; (void)k; (void)xs; /* unused */ if (!commctx) return MPI_SUCCESS; ierr = MPI_Finalized(&finalized); CHKERR(ierr); if (finalized) goto fn_exit; if (commctx->localcomm != MPI_COMM_NULL) {ierr = MPI_Comm_free(&commctx->localcomm); CHKERR(ierr);} if (commctx->dupcomm != MPI_COMM_NULL) {ierr = MPI_Comm_free(&commctx->dupcomm); CHKERR(ierr);} fn_exit: PyMPI_FREE(commctx); return MPI_SUCCESS; } static int PyMPI_Commctx_keyval(int *keyval) { int ierr; if (PyMPI_Commctx_KEYVAL != MPI_KEYVAL_INVALID) goto fn_exit; ierr = MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, PyMPI_Commctx_free_fn, &PyMPI_Commctx_KEYVAL, NULL); CHKERR(ierr); fn_exit: if (keyval) *keyval = PyMPI_Commctx_KEYVAL; return MPI_SUCCESS; } static int PyMPI_Commctx_lookup(MPI_Comm comm, PyMPI_Commctx **_commctx) { int ierr, found = 0, keyval = MPI_KEYVAL_INVALID; PyMPI_Commctx *commctx = NULL; ierr = PyMPI_Commctx_keyval(&keyval); CHKERR(ierr); ierr = MPI_Comm_get_attr(comm, keyval, &commctx, &found); CHKERR(ierr); if (found && commctx) goto fn_exit; ierr = PyMPI_Commctx_new(&commctx); CHKERR(ierr); if (!commctx) {(void)MPI_Comm_call_errhandler(comm, MPI_ERR_INTERN); return MPI_ERR_INTERN;} ierr = MPI_Comm_set_attr(comm, keyval, commctx); CHKERR(ierr); ierr = MPI_Comm_dup(comm, &commctx->dupcomm); CHKERR(ierr); fn_exit: if (commctx->tag >= PyMPI_Commctx_TAG_UB) commctx->tag = 0; if (_commctx) *_commctx = commctx; return MPI_SUCCESS; } static int PyMPI_Commctx_clear(MPI_Comm comm) { int ierr, found = 0, keyval = PyMPI_Commctx_KEYVAL; PyMPI_Commctx *commctx = NULL; if (keyval == MPI_KEYVAL_INVALID) return MPI_SUCCESS; ierr = MPI_Comm_get_attr(comm, keyval, &commctx, &found); CHKERR(ierr); if (found) {ierr = MPI_Comm_delete_attr(comm, keyval); CHKERR(ierr);} return MPI_SUCCESS; } static int PyMPI_Commctx_intra(MPI_Comm comm, MPI_Comm *dupcomm, int *tag) { int ierr; PyMPI_Commctx *commctx = NULL; ierr = PyMPI_Commctx_lookup(comm, &commctx);CHKERR(ierr); if (dupcomm) *dupcomm = commctx->dupcomm; if (tag) *tag = commctx->tag++; return MPI_SUCCESS; } static int PyMPI_Commctx_inter(MPI_Comm comm, MPI_Comm *dupcomm, int *tag, MPI_Comm *localcomm, int *low_group) { int ierr; PyMPI_Commctx *commctx = NULL; ierr = PyMPI_Commctx_lookup(comm, &commctx);CHKERR(ierr); if (commctx->localcomm == MPI_COMM_NULL) { int localsize, remotesize, mergerank; MPI_Comm mergecomm = MPI_COMM_NULL; ierr = MPI_Comm_size(comm, &localsize); CHKERR(ierr); ierr = MPI_Comm_remote_size(comm, &remotesize); CHKERR(ierr); ierr = MPI_Intercomm_merge(comm, localsize>remotesize, &mergecomm); CHKERR(ierr); ierr = MPI_Comm_rank(mergecomm, &mergerank); CHKERR(ierr); commctx->low_group = ((localsize>remotesize) ? 0 : (localsize 2) || (MPI_VERSION == 2 && MPI_SUBVERSION >= 2) { MPI_Group localgroup = MPI_GROUP_NULL; ierr = MPI_Comm_group(comm, &localgroup); CHKERR(ierr); ierr = MPI_Comm_create(mergecomm, localgroup, &commctx->localcomm); CHKERR(ierr); ierr = MPI_Group_free(&localgroup); CHKERR(ierr); } #else ierr = MPI_Comm_split(mergecomm, commctx->low_group, 0, &commctx->localcomm); CHKERR(ierr); #endif ierr = MPI_Comm_free(&mergecomm); CHKERR(ierr); } if (dupcomm) *dupcomm = commctx->dupcomm; if (tag) *tag = commctx->tag++; if (localcomm) *localcomm = commctx->localcomm; if (low_group) *low_group = commctx->low_group; return MPI_SUCCESS; } static int PyMPI_Commctx_finalize(void) { int ierr; if (PyMPI_Commctx_KEYVAL == MPI_KEYVAL_INVALID) return MPI_SUCCESS; ierr = PyMPI_Commctx_clear(MPI_COMM_SELF); CHKERR(ierr); ierr = PyMPI_Commctx_clear(MPI_COMM_WORLD); CHKERR(ierr); ierr = MPI_Comm_free_keyval(&PyMPI_Commctx_KEYVAL); CHKERR(ierr); PyMPI_Commctx_TAG_UB = -1; return MPI_SUCCESS; } #undef CHKERR /* Local variables: c-basic-offset: 2 indent-tabs-mode: nil End: */ mpi4py-3.1.6/src/pympistatus.h000066400000000000000000000016231460670727200163220ustar00rootroot00000000000000/* Author: Lisandro Dalcin */ /* Contact: dalcinl@gmail.com */ #if defined(MPIX_HAVE_MPI_STATUS_GETSET) #define PyMPI_Status_get_source MPIX_Status_get_source #define PyMPI_Status_set_source MPIX_Status_set_source #define PyMPI_Status_get_tag MPIX_Status_get_tag #define PyMPI_Status_set_tag MPIX_Status_set_tag #define PyMPI_Status_get_error MPIX_Status_get_error #define PyMPI_Status_set_error MPIX_Status_set_error #else #define PyMPI_Status_GETSET(name,NAME) \ static int PyMPI_Status_get_##name(MPI_Status *s, int *i) \ { if (s && i) { *i = s->MPI_##NAME; } return MPI_SUCCESS; } \ static int PyMPI_Status_set_##name(MPI_Status *s, int i) \ { if (s) { s->MPI_##NAME = i; } return MPI_SUCCESS; } \ PyMPI_Status_GETSET( source, SOURCE ) PyMPI_Status_GETSET( tag, TAG ) PyMPI_Status_GETSET( error, ERROR ) #undef PyMPI_Status_GETSET #endif mpi4py-3.1.6/src/pympivendor.h000066400000000000000000000073001460670727200162720ustar00rootroot00000000000000/* Author: Lisandro Dalcin */ /* Contact: dalcinl@gmail.com */ static int PyMPI_Get_vendor(const char **vendor_name, int *version_major, int *version_minor, int *version_micro) { const char *name = "unknown"; int major=0, minor=0, micro=0; #if defined(I_MPI_VERSION) name = "Intel MPI"; #if defined(I_MPI_NUMVERSION) {int version = I_MPI_NUMVERSION/1000; major = version/10000; version -= major*10000; minor = version/100; version -= minor*100; micro = version/1; version -= micro*1; } #else (void)sscanf(I_MPI_VERSION,"%d.%d Update %d",&major,&minor,µ); #endif #elif defined(PLATFORM_MPI) name = "Platform MPI"; major = (PLATFORM_MPI>>24)&0xff; minor = (PLATFORM_MPI>>16)&0xff; micro = (PLATFORM_MPI>> 8)&0xff; major = (major/16)*10+(major%16); #elif defined(MSMPI_VER) name = "Microsoft MPI"; major = MSMPI_VER >> 8; minor = MSMPI_VER & 0xFF; #elif defined(MVAPICH_VERSION) || defined(MVAPICH_NUMVERSION) name = "MVAPICH"; #if defined(MVAPICH_NUMVERSION) {int version = MVAPICH_NUMVERSION/1000; if (version<1000) version *= 100; major = version/10000; version -= major*10000; minor = version/100; version -= minor*100; micro = version/1; version -= micro*1; } #elif defined(MVAPICH_VERSION) (void)sscanf(MVAPICH_VERSION,"%d.%d.%d",&major,&minor,µ); #endif #elif defined(MVAPICH2_VERSION) || defined(MVAPICH2_NUMVERSION) name = "MVAPICH2"; #if defined(MVAPICH2_NUMVERSION) {int version = MVAPICH2_NUMVERSION/1000; if (version<1000) version *= 100; major = version/10000; version -= major*10000; minor = version/100; version -= minor*100; micro = version/1; version -= micro*1; } #elif defined(MVAPICH2_VERSION) (void)sscanf(MVAPICH2_VERSION,"%d.%d.%d",&major,&minor,µ); #endif #elif defined(MPICH_NAME) && (MPICH_NAME >= 3) name = "MPICH"; #if defined(MPICH_NUMVERSION) {int version = MPICH_NUMVERSION/1000; major = version/10000; version -= major*10000; minor = version/100; version -= minor*100; micro = version/1; version -= micro*1; } #elif defined(MPICH_VERSION) (void)sscanf(MPICH_VERSION,"%d.%d.%d",&major,&minor,µ); #endif #elif defined(MPICH_NAME) && (MPICH_NAME == 2) name = "MPICH2"; #if defined(MPICH2_NUMVERSION) {int version = MPICH2_NUMVERSION/1000; major = version/10000; version -= major*10000; minor = version/100; version -= minor*100; micro = version/1; version -= micro*1; } #elif defined(MPICH2_VERSION) (void)sscanf(MPICH2_VERSION,"%d.%d.%d",&major,&minor,µ); #endif #elif defined(MPICH_NAME) && (MPICH_NAME == 1) name = "MPICH1"; #if defined(MPICH_VERSION) (void)sscanf(MPICH_VERSION,"%d.%d.%d",&major,&minor,µ); #endif #elif defined(OPEN_MPI) name = "Open MPI"; #if defined(OMPI_MAJOR_VERSION) major = OMPI_MAJOR_VERSION; #endif #if defined(OMPI_MINOR_VERSION) minor = OMPI_MINOR_VERSION; #endif #if defined(OMPI_RELEASE_VERSION) micro = OMPI_RELEASE_VERSION; #endif #if defined(OMPI_MAJOR_VERSION) #if OMPI_MAJOR_VERSION >= 10 name = "Spectrum MPI"; #endif #endif #elif defined(LAM_MPI) name = "LAM/MPI"; #if defined(LAM_MAJOR_VERSION) major = LAM_MAJOR_VERSION; #endif #if defined(LAM_MINOR_VERSION) minor = LAM_MINOR_VERSION; #endif #if defined(LAM_RELEASE_VERSION) micro = LAM_RELEASE_VERSION; #endif #endif if (vendor_name) *vendor_name = name; if (version_major) *version_major = major; if (version_minor) *version_minor = minor; if (version_micro) *version_micro = micro; return 0; } /* Local variables: c-basic-offset: 2 indent-tabs-mode: nil End: */ mpi4py-3.1.6/src/python.c000066400000000000000000000063561460670727200152440ustar00rootroot00000000000000/* Author: Lisandro Dalcin */ /* Contact: dalcinl@gmail.com */ /* -------------------------------------------------------------------------- */ #include #define MPICH_IGNORE_CXX_SEEK 1 #define OMPI_IGNORE_CXX_SEEK 1 #include #if PY_MAJOR_VERSION <= 2 #define Py_BytesMain Py_Main #elif PY_VERSION_HEX < 0x03070000 static int Py_BytesMain(int, char **); #elif PY_VERSION_HEX < 0x03080000 PyAPI_FUNC(int) _Py_UnixMain(int, char **); #define Py_BytesMain _Py_UnixMain #endif /* -------------------------------------------------------------------------- */ int main(int argc, char **argv) { int status = 0, flag = 1, finalize = 0; /* MPI initalization */ (void)MPI_Initialized(&flag); if (!flag) { #if defined(MPI_VERSION) && (MPI_VERSION > 1) int required = MPI_THREAD_MULTIPLE; int provided = MPI_THREAD_SINGLE; (void)MPI_Init_thread(&argc, &argv, required, &provided); #else (void)MPI_Init(&argc, &argv); #endif finalize = 1; } /* Python main */ status = Py_BytesMain(argc, argv); /* MPI finalization */ (void)MPI_Finalized(&flag); if (!flag) { if (status) (void)MPI_Abort(MPI_COMM_WORLD, status); if (finalize) (void)MPI_Finalize(); } return status; } /* -------------------------------------------------------------------------- */ #if PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x03070000 #include static wchar_t **mk_wargs(int, char **); static wchar_t **cp_wargs(int, wchar_t **); static void rm_wargs(wchar_t **, int); static int Py_BytesMain(int argc, char **argv) { int sts = 0; wchar_t **wargv = mk_wargs(argc, argv); wchar_t **wargv2 = cp_wargs(argc, wargv); if (wargv && wargv2) sts = Py_Main(argc, wargv); else sts = 1; rm_wargs(wargv2, 1); rm_wargs(wargv, 0); return sts; } #if PY_VERSION_HEX < 0x03050000 #define Py_DecodeLocale _Py_char2wchar #endif static wchar_t ** mk_wargs(int argc, char **argv) { int i; char *saved_locale = NULL; wchar_t **args = NULL; args = (wchar_t **)malloc((size_t)(argc+1)*sizeof(wchar_t *)); if (!args) goto oom; saved_locale = strdup(setlocale(LC_ALL, NULL)); if (!saved_locale) goto oom; setlocale(LC_ALL, ""); for (i=0; i= 3) */ /* -------------------------------------------------------------------------- */ /* Local variables: c-basic-offset: 2 indent-tabs-mode: nil End: */ mpi4py-3.1.6/test/000077500000000000000000000000001460670727200137355ustar00rootroot00000000000000mpi4py-3.1.6/test/arrayimpl.py000066400000000000000000000320551460670727200163140ustar00rootroot00000000000000import sys from mpi4py import MPI try: from collections import OrderedDict except ImportError: OrderedDict = dict try: import array except ImportError: array = None try: import numpy except ImportError: numpy = None try: import cupy except ImportError: cupy = None try: import numba import numba.cuda from distutils.version import StrictVersion numba_version = StrictVersion(numba.__version__).version if numba_version < (0, 48): import warnings warnings.warn('To test Numba GPU arrays, use Numba v0.48.0+.', RuntimeWarning) numba = None except ImportError: numba = None __all__ = ['allclose', 'subTest'] def allclose(a, b, rtol=1.e-5, atol=1.e-8): try: iter(a) except TypeError: a = [a] try: iter(b) except TypeError: b = [b] for x, y in zip(a, b): if abs(x-y) > (atol + rtol * abs(y)): return False return True def make_typemap(entries): typemap = OrderedDict(entries) for typecode, datatype in entries: if datatype == MPI.DATATYPE_NULL: del typemap[typecode] return typemap TypeMap = make_typemap([ ('b', MPI.SIGNED_CHAR), ('h', MPI.SHORT), ('i', MPI.INT), ('l', MPI.LONG), ('q', MPI.LONG_LONG), ('f', MPI.FLOAT), ('d', MPI.DOUBLE), ('g', MPI.LONG_DOUBLE), ]) TypeMapBool = make_typemap([ ('?', MPI.C_BOOL), ]) TypeMapInteger = make_typemap([ ('b', MPI.SIGNED_CHAR), ('h', MPI.SHORT), ('i', MPI.INT), ('l', MPI.LONG), ('q', MPI.LONG_LONG), ]) TypeMapUnsigned = make_typemap([ ('B', MPI.UNSIGNED_CHAR), ('H', MPI.UNSIGNED_SHORT), ('I', MPI.UNSIGNED_INT), ('L', MPI.UNSIGNED_LONG), ('Q', MPI.UNSIGNED_LONG_LONG), ]) TypeMapFloat = make_typemap([ ('f', MPI.FLOAT), ('d', MPI.DOUBLE), ('g', MPI.LONG_DOUBLE), ]) TypeMapComplex = make_typemap([ ('F', MPI.C_FLOAT_COMPLEX), ('D', MPI.C_DOUBLE_COMPLEX), ('G', MPI.C_LONG_DOUBLE_COMPLEX), ]) ArrayBackends = [] def add_backend(cls): ArrayBackends.append(cls) return cls class BaseArray(object): backend = None TypeMap = TypeMap.copy() TypeMap.pop('g', None) if sys.version_info[:2] < (3, 3): TypeMap.pop('q', None) def __len__(self): return len(self.array) def __getitem__(self, i): return self.array[i] def __setitem__(self, i, v): self.array[i] = v @property def mpidtype(self): try: return self.TypeMap[self.typecode] except KeyError: return MPI.DATATYPE_NULL def as_raw(self): return self.array def as_mpi(self): return (self.as_raw(), self.mpidtype) def as_mpi_c(self, count): return (self.as_raw(), count, self.mpidtype) def as_mpi_v(self, cnt, dsp): return (self.as_raw(), (cnt, dsp), self.mpidtype) if array is not None: def product(seq): res = 1 for s in seq: res = res * s return res def mkshape(shape): return tuple([int(s) for s in shape]) @add_backend class ArrayArray(BaseArray): backend = 'array' def __init__(self, arg, typecode, shape=None): if isinstance(arg, (int, float)): if shape is None: shape = () else: try: shape = mkshape(shape) except TypeError: shape = (int(shape),) size = product(shape) arg = [arg] * size else: size = len(arg) if shape is None: shape = (size,) else: shape = mkshape(shape) assert size == product(shape) self.array = array.array(typecode, arg) @property def address(self): return self.array.buffer_info()[0] @property def typecode(self): return self.array.typecode @property def itemsize(self): return self.array.itemsize @property def flat(self): return self.array @property def size(self): return self.array.buffer_info()[1] if numpy is not None: @add_backend class ArrayNumPy(BaseArray): backend = 'numpy' TypeMap = make_typemap([]) #TypeMap.update(TypeMapBool) TypeMap.update(TypeMapInteger) #TypeMap.update(TypeMapUnsigned) TypeMap.update(TypeMapFloat) TypeMap.update(TypeMapComplex) def __init__(self, arg, typecode, shape=None): if isinstance(arg, (int, float, complex)): if shape is None: shape = () else: if shape is None: shape = len(arg) self.array = numpy.zeros(shape, typecode) if isinstance(arg, (int, float, complex)): self.array.fill(arg) else: self.array[:] = numpy.asarray(arg, typecode) @property def address(self): return self.array.__array_interface__['data'][0] @property def typecode(self): return self.array.dtype.char @property def itemsize(self): return self.array.itemsize @property def flat(self): return self.array.flat @property def size(self): return self.array.size try: import dlpackimpl as dlpack except ImportError: dlpack = None class BaseDLPackCPU(object): def __dlpack_device__(self): return (dlpack.DLDeviceType.kDLCPU, 0) def __dlpack__(self, stream=None): assert stream is None capsule = dlpack.make_py_capsule(self.array) return capsule def as_raw(self): return self if dlpack is not None and array is not None: @add_backend class DLPackArray(BaseDLPackCPU, ArrayArray): backend = 'dlpack-array' def __init__(self, arg, typecode, shape=None): super(DLPackArray, self).__init__(arg, typecode, shape) if dlpack is not None and numpy is not None: @add_backend class DLPackNumPy(BaseDLPackCPU, ArrayNumPy): backend = 'dlpack-numpy' def __init__(self, arg, typecode, shape=None): super(DLPackNumPy, self).__init__(arg, typecode, shape) def typestr(typecode, itemsize): typestr = '' if sys.byteorder == 'little': typestr += '<' if sys.byteorder == 'big': typestr += '>' if typecode in '?': typestr += 'b' if typecode in 'bhilq': typestr += 'i' if typecode in 'BHILQ': typestr += 'u' if typecode in 'fdg': typestr += 'f' if typecode in 'FDG': typestr += 'c' typestr += str(itemsize) return typestr class BaseFakeGPUArray(object): def set_interface(self, shape, readonly=False): self.__cuda_array_interface__ = dict( version = 0, data = (self.address, readonly), typestr = typestr(self.typecode, self.itemsize), shape = shape, ) def as_raw(self): return self if array is not None: @add_backend class FakeGPUArrayBasic(BaseFakeGPUArray, ArrayArray): def __init__(self, arg, typecode, shape=None, readonly=False): super(FakeGPUArrayBasic, self).__init__(arg, typecode, shape) self.set_interface((len(self),), readonly) if numpy is not None: @add_backend class FakeGPUArrayNumPy(BaseFakeGPUArray, ArrayNumPy): def __init__(self, arg, typecode, shape=None, readonly=False): super(FakeGPUArrayNumPy, self).__init__(arg, typecode, shape) self.set_interface(self.array.shape, readonly) if cupy is not None: @add_backend class GPUArrayCuPy(BaseArray): backend = 'cupy' TypeMap = make_typemap([]) #TypeMap.update(TypeMapBool) TypeMap.update(TypeMapInteger) #TypeMap.update(TypeMapUnsigned) TypeMap.update(TypeMapFloat) TypeMap.update(TypeMapComplex) try: cupy.array(0, 'g') except ValueError: TypeMap.pop('g', None) try: cupy.array(0, 'G') except ValueError: TypeMap.pop('G', None) def __init__(self, arg, typecode, shape=None, readonly=False): if isinstance(arg, (int, float, complex)): if shape is None: shape = () else: if shape is None: shape = len(arg) self.array = cupy.zeros(shape, typecode) if isinstance(arg, (int, float, complex)): self.array.fill(arg) else: self.array[:] = cupy.asarray(arg, typecode) @property def address(self): return self.array.__cuda_array_interface__['data'][0] @property def typecode(self): return self.array.dtype.char @property def itemsize(self): return self.array.itemsize @property def flat(self): return self.array.ravel() @property def size(self): return self.array.size def as_raw(self): cupy.cuda.get_current_stream().synchronize() return self.array if cupy is not None: # Note: we do not create a BaseDLPackGPU class because each GPU library # has its own way to get device ID etc, so we have to reimplement the # DLPack support anyway @add_backend class DLPackCuPy(GPUArrayCuPy): backend = 'dlpack-cupy' has_dlpack = None dev_type = None def __init__(self, arg, typecode, shape=None): super().__init__(arg, typecode, shape) self.has_dlpack = hasattr(self.array, '__dlpack_device__') # TODO(leofang): test CUDA managed memory? if cupy.cuda.runtime.is_hip: self.dev_type = dlpack.DLDeviceType.kDLROCM else: self.dev_type = dlpack.DLDeviceType.kDLCUDA def __dlpack_device__(self): if self.has_dlpack: return self.array.__dlpack_device__() else: return (self.dev_type, self.array.device.id) def __dlpack__(self, stream=None): cupy.cuda.get_current_stream().synchronize() if self.has_dlpack: return self.array.__dlpack__(stream=-1) else: return self.array.toDlpack() def as_raw(self): return self if numba is not None: @add_backend class GPUArrayNumba(BaseArray): backend = 'numba' TypeMap = make_typemap([]) #TypeMap.update(TypeMapBool) TypeMap.update(TypeMapInteger) #TypeMap.update(TypeMapUnsigned) TypeMap.update(TypeMapFloat) TypeMap.update(TypeMapComplex) # one can allocate arrays with those types, # but the Numba compiler doesn't support them... TypeMap.pop('g', None) TypeMap.pop('G', None) def __init__(self, arg, typecode, shape=None, readonly=False): if isinstance(arg, (int, float, complex)): if shape is None: shape = () else: if shape is None: shape = len(arg) self.array = numba.cuda.device_array(shape, typecode) if isinstance(arg, (int, float, complex)): if self.array.size > 0: self.array[:] = arg elif arg == [] or arg == (): self.array = numba.cuda.device_array(0, typecode) else: if self.array.size > 0: self.array[:] = numba.cuda.to_device(arg) # def __getitem__(self, i): # if isinstance(i, slice): # return self.array[i] # elif i < self.array.size: # return self.array[i] # else: # raise StopIteration @property def address(self): return self.array.__cuda_array_interface__['data'][0] @property def typecode(self): return self.array.dtype.char @property def itemsize(self): return self.array.dtype.itemsize @property def flat(self): if self.array.ndim <= 1: return self.array else: return self.array.ravel() @property def size(self): return self.array.size def as_raw(self): # numba by default always runs on the legacy default stream numba.cuda.default_stream().synchronize() return self.array def subTest(case, skip=(), skiptypecode=()): for array in ArrayBackends: if array.backend == skip: continue if array.backend in skip: continue for typecode in array.TypeMap: if typecode == skiptypecode: continue if typecode in skiptypecode: continue with case.subTest(backend=array.backend, typecode=typecode): try: yield array, typecode except GeneratorExit: return mpi4py-3.1.6/test/dlpackimpl.py000066400000000000000000000143751460670727200164410ustar00rootroot00000000000000import sys import ctypes try: from enum import IntEnum except ImportError: IntEnum = object if hasattr(sys, 'pypy_version_info'): raise ImportError("unsupported on PyPy") class DLDeviceType(IntEnum): kDLCPU = 1 kDLCUDA = 2 kDLCUDAHost = 3 kDLOpenCL = 4 kDLVulkan = 7 kDLMetal = 8 kDLVPI = 9 kDLROCM = 10 kDLROCMHost = 11 kDLExtDev = 12 kDLCUDAManaged = 13 class DLDevice(ctypes.Structure): _fields_ = [ ("device_type", ctypes.c_uint), ("device_id", ctypes.c_int), ] class DLDataTypeCode(IntEnum): kDLInt = 0 kDLUInt = 1 kDLFloat = 2 kDLOpaqueHandle = 3 kDLBfloat = 4 kDLComplex = 5 class DLDataType(ctypes.Structure): _fields_ = [ ("code", ctypes.c_uint8), ("bits", ctypes.c_uint8), ("lanes", ctypes.c_uint16), ] class DLTensor(ctypes.Structure): _fields_ = [ ("data", ctypes.c_void_p), ("device", DLDevice), ("ndim", ctypes.c_int), ("dtype", DLDataType), ("shape", ctypes.POINTER(ctypes.c_int64)), ("strides", ctypes.POINTER(ctypes.c_int64)), ("byte_offset", ctypes.c_uint64), ] DLManagedTensorDeleter = ctypes.CFUNCTYPE(None, ctypes.c_void_p) class DLManagedTensor(ctypes.Structure): _fields_ = [ ("dl_tensor", DLTensor), ("manager_ctx", ctypes.c_void_p), ("deleter", DLManagedTensorDeleter), ] pyapi = ctypes.pythonapi DLManagedTensor_p = ctypes.POINTER(DLManagedTensor) Py_IncRef = pyapi.Py_IncRef Py_IncRef.restype = None Py_IncRef.argtypes = [ctypes.py_object] Py_DecRef = pyapi.Py_DecRef Py_DecRef.restype = None Py_DecRef.argtypes = [ctypes.py_object] PyCapsule_Destructor = ctypes.CFUNCTYPE(None, ctypes.c_void_p) PyCapsule_New = pyapi.PyCapsule_New PyCapsule_New.restype = ctypes.py_object PyCapsule_New.argtypes = [ctypes.c_void_p, ctypes.c_char_p, PyCapsule_Destructor] PyCapsule_IsValid = pyapi.PyCapsule_IsValid PyCapsule_IsValid.restype = ctypes.c_int PyCapsule_IsValid.argtypes = [ctypes.py_object] PyCapsule_GetPointer = pyapi.PyCapsule_GetPointer PyCapsule_GetPointer.restype = ctypes.c_void_p PyCapsule_GetPointer.argtypes = [ctypes.py_object, ctypes.c_char_p] PyCapsule_SetContext = pyapi.PyCapsule_SetContext PyCapsule_SetContext.restype = ctypes.c_int PyCapsule_SetContext.argtypes = [ctypes.py_object, ctypes.c_void_p] PyCapsule_GetContext = pyapi.PyCapsule_GetContext PyCapsule_GetContext.restype = ctypes.c_void_p PyCapsule_GetContext.argtypes = [ctypes.py_object] def make_dl_datatype(typecode, itemsize): code = None bits = itemsize * 8 lanes = 1 if typecode in "bhilqnp": code = DLDataTypeCode.kDLInt if typecode in "BHILQNP": code = DLDataTypeCode.kDLUInt if typecode in "efdg": code = DLDataTypeCode.kDLFloat if typecode in "FDG": code = DLDataTypeCode.kDLComplex if typecode == "G" and itemsize == 32: code = DLDataTypeCode.kDLFloat bits //= 2 lanes *= 2 datatype = DLDataType() datatype.code = code datatype.bits = bits datatype.lanes = lanes return datatype def make_dl_shape(shape, order=None, strides=None): null = ctypes.cast(0, ctypes.POINTER(ctypes.c_int64)) if isinstance(shape, int): shape = [shape] ndim = len(shape) if ndim == 0: shape = null strides = null else: shape = (ctypes.c_int64*ndim)(*shape) if order == 'C': size = 1 strides = [] for i in range(ndim-1, -1, -1): strides.append(size) size *= shape[i] strides = (ctypes.c_int64*ndim)(*strides) elif order == 'F': size = 1 strides = [] for i in range(ndim): strides.append(size) size *= shape[i] strides = (ctypes.c_int64*ndim)(*strides) elif strides is not None: strides = (ctypes.c_int64*ndim)(*strides) else: strides = null return ndim, shape, strides def make_dl_tensor(obj): try: data, size = obj.buffer_info() typecode = obj.typecode itemsize = obj.itemsize except AttributeError: data = obj.ctypes.data size = obj.size typecode = obj.dtype.char itemsize = obj.itemsize device = DLDevice(DLDeviceType.kDLCPU, 0) datatype = make_dl_datatype(typecode, itemsize) ndim, shape, strides = make_dl_shape(size) dltensor = DLTensor() dltensor.data = data if size > 0 else 0 dltensor.device = device dltensor.ndim = ndim dltensor.dtype = datatype dltensor.shape = shape dltensor.strides = strides dltensor.byte_offset = 0 return dltensor def make_dl_manager_ctx(obj): py_obj = ctypes.py_object(obj) if False: Py_IncRef(py_obj) void_p = ctypes.c_void_p.from_buffer(py_obj) return void_p @DLManagedTensorDeleter def dl_managed_tensor_deleter(void_p): managed = ctypes.cast(void_p, DLManagedTensor_p) manager_ctx = managed.contents.manager_ctx py_obj = ctypes.cast(manager_ctx, ctypes.py_object) if False: Py_DecRef(py_obj) def make_dl_managed_tensor(obj): managed = DLManagedTensor() managed.dl_tensor = make_dl_tensor(obj) managed.manager_ctx = make_dl_manager_ctx(obj) managed.deleter = dl_managed_tensor_deleter return managed def make_py_context(context): py_obj = ctypes.py_object(context) Py_IncRef(py_obj) context = ctypes.c_void_p.from_buffer(py_obj) return ctypes.c_void_p(context.value) @PyCapsule_Destructor def py_capsule_destructor(void_p): capsule = ctypes.cast(void_p, ctypes.py_object) if PyCapsule_IsValid(capsule, b"dltensor"): pointer = PyCapsule_GetPointer(capsule, b"dltensor") managed = ctypes.cast(pointer, DLManagedTensor_p) deleter = managed.contents.deleter if deleter: deleter(managed) context = PyCapsule_GetContext(capsule) managed = ctypes.cast(context, ctypes.py_object) Py_DecRef(managed) def make_py_capsule(managed): if not isinstance(managed, DLManagedTensor): managed = make_dl_managed_tensor(managed) pointer = ctypes.pointer(managed) capsule = PyCapsule_New(pointer, b"dltensor", py_capsule_destructor) context = make_py_context(managed) PyCapsule_SetContext(capsule, context) return capsule mpi4py-3.1.6/test/mpiunittest.py000066400000000000000000000070671460670727200167060ustar00rootroot00000000000000import os import sys import glob import unittest from distutils.versionpredicate import VersionPredicate class TestCase(unittest.TestCase): def assertRaisesMPI(self, IErrClass, callableObj, *args, **kwargs): from mpi4py.MPI import Exception as excClass, Get_version try: callableObj(*args, **kwargs) except NotImplementedError: if Get_version() >= (2, 0): raise self.failureException("raised NotImplementedError") except excClass: excValue = sys.exc_info()[1] error_class = excValue.Get_error_class() if isinstance(IErrClass, (list, tuple)): match = (error_class in IErrClass) else: match = (error_class == IErrClass) if not match: if isinstance(IErrClass, (list, tuple)): IErrClassName = [ErrClsName(e) for e in IErrClass] IErrClassName = type(IErrClass)(IErrClassName) else: IErrClassName = ErrClsName(IErrClass) raise self.failureException( "generated error class is '%s' (%d), " "but expected '%s' (%s)" % \ (ErrClsName(error_class), error_class, IErrClassName, IErrClass,) ) else: if hasattr(excClass,'__name__'): excName = excClass.__name__ else: excName = str(excClass) raise self.failureException("%s not raised" % excName) try: unittest.TestCase.subTest except AttributeError: class _SubTestManager(object): def __init__(self, case, msg=None, **params): pass def __enter__(self): pass def __exit__(self, *args): pass def subTest(self, msg=None, **params): return self._SubTestManager(self, msg, **params) ErrClsMap = None def ErrClsName(ierr): global ErrClsMap if ErrClsMap is None: from mpi4py import MPI ErrClsMap = {} ErrClsMap[MPI.SUCCESS] = 'SUCCESS' for entry in dir(MPI): if entry.startswith('ERR_'): errcls = getattr(MPI, entry) ErrClsMap[errcls] = entry try: return ErrClsMap[ierr] except KeyError: return '' def mpi_predicate(predicate): from mpi4py import MPI def key(s): s = s.replace(' ', '') s = s.replace('/', '') s = s.replace('-', '') s = s.replace('Microsoft', 'MS') return s.lower() vp = VersionPredicate(key(predicate)) if vp.name == 'mpi': name, version = 'mpi', MPI.Get_version() version = version + (0,) else: name, version = MPI.get_vendor() if vp.name == key(name): if vp.satisfied_by('%d.%d.%d' % version): return vp return None def is_mpi_gpu(predicate, array): if array.backend in ('cupy', 'numba', 'dlpack-cupy'): if mpi_predicate(predicate): return True return False SkipTest = unittest.SkipTest skip = unittest.skip skipIf = unittest.skipIf skipUnless = unittest.skipUnless def skipMPI(predicate, *conditions): version = mpi_predicate(predicate) if version: if not conditions or any(conditions): return unittest.skip(str(version)) return unittest.skipIf(False, '') def disable(what, reason): return unittest.skip(reason)(what) def main(*args, **kargs): try: unittest.main(*args, **kargs) except SystemExit: pass mpi4py-3.1.6/test/runtests.py000066400000000000000000000254001460670727200161770ustar00rootroot00000000000000import sys, os import optparse import unittest def getoptionparser(): parser = optparse.OptionParser() parser.add_option("-q", "--quiet", action="store_const", const=0, dest="verbose", default=1, help="minimal output") parser.add_option("-v", "--verbose", action="store_const", const=2, dest="verbose", default=1, help="verbose output") parser.add_option("-i", "--include", type="string", action="append", dest="include", default=[], help="include tests matching PATTERN", metavar="PATTERN") parser.add_option("-e", "--exclude", type="string", action="append", dest="exclude", default=[], help="exclude tests matching PATTERN", metavar="PATTERN") parser.add_option("-f", "--failfast", action="store_true", dest="failfast", default=False, help="stop on first failure") parser.add_option("-c", "--catch", action="store_true", dest="catchbreak", default=False, help="catch Control-C and display results") parser.add_option("-k", "--pattern", type="string", action="append", dest="patterns", default=[], help="only run tests which match the given substring") parser.add_option("--no-builddir", action="store_false", dest="builddir", default=True, help="disable testing from build directory") parser.add_option("--path", type="string", action="append", dest="path", default=[], help="prepend PATH to sys.path", metavar="PATH") parser.add_option("--refleaks", type="int", action="store", dest="repeats", default=3, help="run tests REPEAT times in a loop to catch leaks", metavar="REPEAT") parser.add_option("--threads", action="store_true", dest="threads", default=None, help="initialize MPI with thread support") parser.add_option("--no-threads", action="store_false", dest="threads", default=None, help="initialize MPI without thread support") parser.add_option("--thread-level", type="choice", choices=["single", "funneled", "serialized", "multiple"], action="store", dest="thread_level", default=None, help="initialize MPI with required thread support") parser.add_option("--mpe", action="store_true", dest="mpe", default=False, help="use MPE for MPI profiling") parser.add_option("--vt", action="store_true", dest="vt", default=False, help="use VampirTrace for MPI profiling") parser.add_option("--cupy", action="store_true", dest="cupy", default=False, help="enable testing with CuPy arrays") parser.add_option("--no-cupy", action="store_false", dest="cupy", default=False, help="disable testing with CuPy arrays") parser.add_option("--numba", action="store_true", dest="numba", default=False, help="enable testing with Numba arrays") parser.add_option("--no-numba", action="store_false", dest="numba", default=False, help="disable testing with Numba arrays") parser.add_option("--no-numpy", action="store_false", dest="numpy", default=True, help="disable testing with NumPy arrays") parser.add_option("--no-array", action="store_false", dest="array", default=True, help="disable testing with builtin array module") parser.add_option("--no-skip-mpi", action="store_false", dest="skip_mpi", default=True, help="disable known failures with backend MPI") return parser def getbuilddir(): try: try: from setuptools.dist import Distribution except ImportError: from distutils.dist import Distribution try: from setuptools.command.build import build except ImportError: from distutils.command.build import build cmd_obj = build(Distribution()) cmd_obj.finalize_options() return cmd_obj.build_platlib except Exception: return None def setup_python(options): rootdir = os.path.dirname(os.path.dirname(__file__)) builddir = os.path.join(rootdir, getbuilddir()) if options.builddir and os.path.exists(builddir): sys.path.insert(0, builddir) if options.path: path = options.path[:] path.reverse() for p in path: sys.path.insert(0, p) def setup_unittest(options): from unittest import TestSuite try: from unittest.runner import _WritelnDecorator except ImportError: from unittest import _WritelnDecorator # writeln_orig = _WritelnDecorator.writeln def writeln(self, message=''): try: self.stream.flush() except: pass writeln_orig(self, message) try: self.stream.flush() except: pass _WritelnDecorator.writeln = writeln def import_package(options, pkgname): # if not options.cupy: sys.modules['cupy'] = None if not options.numba: sys.modules['numba'] = None if not options.numpy: sys.modules['numpy'] = None if not options.array: sys.modules['array'] = None # package = __import__(pkgname) # import mpi4py.rc if options.threads is not None: mpi4py.rc.threads = options.threads if options.thread_level is not None: mpi4py.rc.thread_level = options.thread_level if options.mpe: mpi4py.profile('mpe', logfile='runtests-mpi4py') if options.vt: mpi4py.profile('vt', logfile='runtests-mpi4py') import mpi4py.MPI # return package def getprocessorinfo(): from mpi4py import MPI rank = MPI.COMM_WORLD.Get_rank() name = MPI.Get_processor_name() return (rank, name) def getlibraryinfo(): from mpi4py import MPI info = "MPI %d.%d" % MPI.Get_version() name, version = MPI.get_vendor() if name != "unknown": info += (" (%s %s)" % (name, '%d.%d.%d' % version)) return info def getpythoninfo(): x, y = sys.version_info[:2] return ("Python %d.%d (%s)" % (x, y, sys.executable)) def getpackageinfo(pkg): return ("%s %s (%s)" % (pkg.__name__, pkg.__version__, pkg.__path__[0])) def writeln(message='', endl='\n'): sys.stderr.flush() sys.stderr.write(message+endl) sys.stderr.flush() def print_banner(options, package): r, n = getprocessorinfo() fmt = "[%d@%s] %s" if options.verbose: writeln(fmt % (r, n, getpythoninfo())) writeln(fmt % (r, n, getlibraryinfo())) writeln(fmt % (r, n, getpackageinfo(package))) def load_tests(options, args): # Find tests import re, glob testsuitedir = os.path.dirname(__file__) sys.path.insert(0, testsuitedir) pattern = 'test_*.py' wildcard = os.path.join(testsuitedir, pattern) testfiles = glob.glob(wildcard) include = exclude = None if options.include: include = re.compile('|'.join(options.include)).search if options.exclude: exclude = re.compile('|'.join(options.exclude)).search testnames = [] for testfile in testfiles: filename = os.path.basename(testfile) testname = os.path.splitext(filename)[0] if ((exclude and exclude(testname)) or (include and not include(testname))): continue testnames.append(testname) testnames.sort() # Handle options if not options.cupy: sys.modules['cupy'] = None if not options.numba: sys.modules['numba'] = None if not options.numpy: sys.modules['numpy'] = None if not options.array: sys.modules['array'] = None if not options.skip_mpi: import mpiunittest mpiunittest.skipMPI = lambda p, *c: lambda f: f # Load tests and populate suite testloader = unittest.TestLoader() if options.patterns: testloader.testNamePatterns = [ ('*%s*' % p) if ('*' not in p) else p for p in options.patterns] testsuite = unittest.TestSuite() for testname in testnames: module = __import__(testname) for arg in args: try: cases = testloader.loadTestsFromNames((arg,), module) except AttributeError: continue testsuite.addTests(cases) if not args: cases = testloader.loadTestsFromModule(module) testsuite.addTests(cases) return testsuite def run_tests(options, testsuite, runner=None): if runner is None: runner = unittest.TextTestRunner() runner.verbosity = options.verbose runner.failfast = options.failfast if options.catchbreak: unittest.installHandler() result = runner.run(testsuite) return result.wasSuccessful() def test_refleaks(options, args): from sys import gettotalrefcount from gc import collect testsuite = load_tests(options, args) testsuite._cleanup = False for case in testsuite: case._cleanup = False class EmptyIO(object): def write(self, *args): pass runner = unittest.TextTestRunner(stream=EmptyIO(), verbosity=0) rank, name = getprocessorinfo() r1 = r2 = 0 repeats = options.repeats while repeats: collect() r1 = gettotalrefcount() run_tests(options, testsuite, runner) collect() r2 = gettotalrefcount() leaks = r2-r1 if leaks and repeats < options.repeats: writeln('[%d@%s] refleaks: (%d - %d) --> %d' % (rank, name, r2, r1, leaks)) repeats -= 1 def abort(code=1): from mpi4py import MPI MPI.COMM_WORLD.Abort(code) def shutdown(success): from mpi4py import MPI def main(args=None): pkgname = 'mpi4py' parser = getoptionparser() (options, args) = parser.parse_args(args) setup_python(options) setup_unittest(options) package = import_package(options, pkgname) print_banner(options, package) testsuite = load_tests(options, args) success = run_tests(options, testsuite) if not success and options.failfast: abort() if success and hasattr(sys, 'gettotalrefcount'): test_refleaks(options, args) shutdown(success) return not success if __name__ == '__main__': import sys sys.dont_write_bytecode = True sys.exit(main()) mpi4py-3.1.6/test/spawn_child.py000066400000000000000000000007051460670727200166040ustar00rootroot00000000000000import sys; sys.path.insert(0, sys.argv[1]) import mpi4py if len(sys.argv) > 2: lfn = "runtests-mpi4py-child" mpe = sys.argv[2] == 'mpe' vt = sys.argv[2] == 'vt' if mpe: mpi4py.profile('mpe', logfile=lfn) if vt: mpi4py.profile('vt', logfile=lfn) from mpi4py import MPI parent = MPI.Comm.Get_parent() parent.Barrier() parent.Disconnect() assert parent == MPI.COMM_NULL parent = MPI.Comm.Get_parent() assert parent == MPI.COMM_NULL mpi4py-3.1.6/test/test_address.py000066400000000000000000000032421460670727200167740ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest try: import array except ImportError: array = None try: import numpy except ImportError: numpy = None class TestAddress(unittest.TestCase): @unittest.skipIf(array is None, 'array') def testGetAddress1(self): from struct import pack, unpack location = array.array('i', range(10)) bufptr, _ = location.buffer_info() addr = MPI.Get_address(location) addr = unpack('P', pack('P', addr))[0] self.assertEqual(addr, bufptr) @unittest.skipIf(numpy is None, 'numpy') def testGetAddress2(self): from struct import pack, unpack location = numpy.asarray(range(10), dtype='i') bufptr, _ = location.__array_interface__['data'] addr = MPI.Get_address(location) addr = unpack('P', pack('P', addr))[0] self.assertEqual(addr, bufptr) def testBottom(self): base = MPI.Get_address(MPI.BOTTOM) addr = MPI.Aint_add(base, 0) self.assertEqual(addr, base) diff = MPI.Aint_diff(base, base) self.assertEqual(diff, 0) @unittest.skipIf(array is None, 'array') def testAintAdd(self): location = array.array('i', range(10)) base = MPI.Get_address(location) addr = MPI.Aint_add(base, 4) self.assertEqual(addr, base + 4) @unittest.skipIf(array is None, 'array') def testAintDiff(self): location = array.array('i', range(10)) base = MPI.Get_address(location) addr1 = base + 8 addr2 = base + 4 diff = MPI.Aint_diff(addr1, addr2) self.assertEqual(diff, 4) if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_attributes.py000066400000000000000000000162561460670727200175460ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest try: import array except ImportError: array = None class BaseTestAttr(object): keyval = MPI.KEYVAL_INVALID def tearDown(self): if self.obj: self.obj.Free() if self.keyval != MPI.KEYVAL_INVALID: self.keyval = type(self.obj).Free_keyval(self.keyval) self.assertEqual(self.keyval, MPI.KEYVAL_INVALID) def testAttr(self, copy_fn=None, delete_fn=None): cls, obj = type(self.obj), self.obj self.keyval = cls.Create_keyval(copy_fn, delete_fn) self.assertNotEqual(self.keyval, MPI.KEYVAL_INVALID) attr = obj.Get_attr(self.keyval) self.assertEqual(attr, None) attrval = [1,2,3] obj.Set_attr(self.keyval, attrval) attr = obj.Get_attr(self.keyval) self.assertTrue(attr is attrval) if hasattr(obj, 'Dup'): dup = obj.Dup() attr = dup.Get_attr(self.keyval) if copy_fn is True: self.assertTrue(attr is attrval) elif not copy_fn: self.assertTrue(attr is None) dup.Free() obj.Delete_attr(self.keyval) attr = obj.Get_attr(self.keyval) self.assertTrue(attr is None) def testAttrCopyFalse(self): self.testAttr(False) def testAttrCopyTrue(self): self.testAttr(True) def testAttrNoCopy(self): cls, obj = type(self.obj), self.obj def copy_fn(o, k, v): assert k == self.keyval assert v is attrval return NotImplemented self.keyval = cls.Create_keyval(copy_fn, None) self.assertNotEqual(self.keyval, MPI.KEYVAL_INVALID) attr = obj.Get_attr(self.keyval) self.assertEqual(attr, None) attrval = [1,2,3] obj.Set_attr(self.keyval, attrval) attr = obj.Get_attr(self.keyval) self.assertTrue(attr is attrval) if hasattr(obj, 'Dup'): dup = obj.Dup() attr = dup.Get_attr(self.keyval) self.assertTrue(attr is None) dup.Free() obj.Delete_attr(self.keyval) attr = obj.Get_attr(self.keyval) self.assertTrue(attr is None) def testAttrNoPython(self, intval=123456789): cls, obj = type(self.obj), self.obj def copy_fn(o, k, v): assert k == self.keyval assert v == intval return v def del_fn(o, k, v): assert k == self.keyval assert v == intval self.keyval = cls.Create_keyval(copy_fn, del_fn, nopython=True) self.assertNotEqual(self.keyval, MPI.KEYVAL_INVALID) attr = obj.Get_attr(self.keyval) self.assertEqual(attr, None) obj.Set_attr(self.keyval, intval) attr = obj.Get_attr(self.keyval) self.assertEqual(attr, intval) if hasattr(obj, 'Dup'): dup = obj.Dup() attr = dup.Get_attr(self.keyval) self.assertEqual(attr, intval) dup.Free() obj.Delete_attr(self.keyval) attr = obj.Get_attr(self.keyval) self.assertTrue(attr is None) @unittest.skipMPI('openmpi(<=1.10.2)') def testAttrNoPythonZero(self): self.testAttrNoPython(0) @unittest.skipIf(array is None, 'array') def testAttrNoPythonArray(self): cls, obj = type(self.obj), self.obj self.keyval = cls.Create_keyval(nopython=True) # ary = array.array('i', [42]) addr, _ = ary.buffer_info() obj.Set_attr(self.keyval, addr) # attr = obj.Get_attr(self.keyval) self.assertEqual(attr, addr) class BaseTestCommAttr(BaseTestAttr): NULL = MPI.COMM_NULL @unittest.skipMPI('openmpi(<=1.5.1)') def testAttrCopyDelete(self): cls, obj, null = type(self.obj), self.obj, self.NULL # self.keyval = cls.Create_keyval( copy_fn=lambda o, k, v: cls.Dup(v), delete_fn=lambda o, k, v: cls.Free(v)) self.assertNotEqual(self.keyval, MPI.KEYVAL_INVALID) # obj1 = obj dup1 = obj1.Dup() obj1.Set_attr(self.keyval, dup1) self.assertTrue(dup1 != null) obj2 = obj1.Dup() dup2 = obj2.Get_attr(self.keyval) self.assertTrue(dup1 != dup2) obj2.Free() self.assertTrue(dup2 == null) self.obj.Delete_attr(self.keyval) self.assertTrue(dup1 == null) class TestCommAttrWorld(BaseTestCommAttr, unittest.TestCase): def setUp(self): self.obj = MPI.COMM_WORLD.Dup() class TestCommAttrSelf(BaseTestCommAttr, unittest.TestCase): def setUp(self): self.obj = MPI.COMM_SELF.Dup() class BaseTestDatatypeAttr(BaseTestAttr): NULL = MPI.DATATYPE_NULL def testAttrCopyDelete(self): cls, obj, null = type(self.obj), self.obj, self.NULL # self.keyval = cls.Create_keyval( copy_fn=lambda o, k, v: cls.Dup(v), delete_fn=lambda o, k, v: cls.Free(v)) self.assertNotEqual(self.keyval, MPI.KEYVAL_INVALID) # obj1 = obj dup1 = obj1.Dup() obj1.Set_attr(self.keyval, dup1) self.assertTrue(dup1 != null) obj2 = obj1.Dup() dup2 = obj2.Get_attr(self.keyval) self.assertTrue(dup1 != dup2) obj2.Free() self.assertTrue(dup2 == null) self.obj.Delete_attr(self.keyval) self.assertTrue(dup1 == null) class TestDatatypeAttrBYTE(BaseTestDatatypeAttr, unittest.TestCase): def setUp(self): self.obj = MPI.BYTE.Dup() class TestDatatypeAttrINT(BaseTestDatatypeAttr, unittest.TestCase): def setUp(self): self.obj = MPI.INT.Dup() class TestDatatypeAttrFLOAT(BaseTestDatatypeAttr, unittest.TestCase): def setUp(self): self.obj = MPI.FLOAT.Dup() class TestWinAttr(BaseTestAttr, unittest.TestCase): NULL = MPI.WIN_NULL def setUp(self): win = MPI.Win.Create(MPI.BOTTOM, 1, MPI.INFO_NULL, MPI.COMM_SELF) self.obj = self.win = win @unittest.skipMPI('openmpi(<=1.5.1)') @unittest.skipMPI('PlatformMPI') def testAttrCopyDelete(self): # null = self.NULL def delete_fn(o, k, v): assert isinstance(o, MPI.Win) assert k == self.keyval assert v is win MPI.Win.Free(v) self.keyval = MPI.Win.Create_keyval(delete_fn=delete_fn) self.assertNotEqual(self.keyval, MPI.KEYVAL_INVALID) # win = MPI.Win.Create(MPI.BOTTOM, 1, MPI.INFO_NULL, MPI.COMM_SELF) self.obj.Set_attr(self.keyval, win) self.assertTrue(win != null) self.obj.Delete_attr(self.keyval) self.assertTrue(win == null) try: k = MPI.Datatype.Create_keyval() k = MPI.Datatype.Free_keyval(k) except NotImplementedError: unittest.disable(BaseTestDatatypeAttr, 'mpi-type-attr') SpectrumMPI = MPI.get_vendor()[0] == 'Spectrum MPI' try: if SpectrumMPI: raise NotImplementedError MPI.Win.Create(MPI.BOTTOM, 1, MPI.INFO_NULL, MPI.COMM_SELF).Free() k = MPI.Win.Create_keyval() k = MPI.Win.Free_keyval(k) except (NotImplementedError, MPI.Exception): unittest.disable(TestWinAttr, 'mpi-win-attr') if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_cco_buf.py000066400000000000000000000672321460670727200167600ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import arrayimpl from functools import reduce prod = lambda sequence,start=1: reduce(lambda x, y: x*y, sequence, start) def skip_op(typecode, op): if typecode in 'FDG': if op in (MPI.MAX, MPI.MIN): return True return False def maxvalue(a): try: typecode = a.typecode except AttributeError: typecode = a.dtype.char if typecode == ('f'): return 1e30 elif typecode == ('d'): return 1e300 else: return 2 ** (a.itemsize * 7) - 1 class BaseTestCCOBuf(object): COMM = MPI.COMM_NULL def testBarrier(self): self.COMM.Barrier() def testBcast(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): if rank == root: buf = array(root, typecode, root) else: buf = array( -1, typecode, root) self.COMM.Bcast(buf.as_mpi(), root=root) for value in buf: self.assertEqual(value, root) def testGather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): sbuf = array(root, typecode, root+1) if rank == root: rbuf = array(-1, typecode, (size,root+1)) else: rbuf = array([], typecode) self.COMM.Gather(sbuf.as_mpi(), rbuf.as_mpi(), root=root) if rank == root: for value in rbuf.flat: self.assertEqual(value, root) def testScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): rbuf = array(-1, typecode, size) if rank == root: sbuf = array(root, typecode, (size, size)) else: sbuf = array([], typecode) self.COMM.Scatter(sbuf.as_mpi(), rbuf.as_mpi(), root=root) for value in rbuf: self.assertEqual(value, root) def testAllgather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): sbuf = array(root, typecode, root+1) rbuf = array( -1, typecode, (size, root+1)) self.COMM.Allgather(sbuf.as_mpi(), rbuf.as_mpi()) for value in rbuf.flat: self.assertEqual(value, root) def testAlltoall(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): sbuf = array(root, typecode, (size, root+1)) rbuf = array( -1, typecode, (size, root+1)) self.COMM.Alltoall(sbuf.as_mpi(), rbuf.as_mpi_c(root+1)) for value in rbuf.flat: self.assertEqual(value, root) def assertAlmostEqual(self, first, second): num = complex(second-first) den = complex(second+first)/2 or 1.0 if (abs(num/den) > 1e-2): raise self.failureException('%r != %r' % (first, second)) def testReduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue for root in range(size): sbuf = array(range(size), typecode) rbuf = array(-1, typecode, size) self.COMM.Reduce(sbuf.as_mpi(), rbuf.as_mpi(), op, root) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if rank != root: self.assertEqual(value, -1) continue if op == MPI.SUM: if (i * size) < max_val: self.assertAlmostEqual(value, i*size) elif op == MPI.PROD: if (i ** size) < max_val: self.assertAlmostEqual(value, i**size) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testAllreduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue sbuf = array(range(size), typecode) rbuf = array(0, typecode, size) self.COMM.Allreduce(sbuf.as_mpi(), rbuf.as_mpi(), op) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: if (i * size) < max_val: self.assertAlmostEqual(value, i*size) elif op == MPI.PROD: if (i ** size) < max_val: self.assertAlmostEqual(value, i**size) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testReduceScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue rcnt = list(range(1,size+1)) sbuf = array([rank+1]*sum(rcnt), typecode) rbuf = array(-1, typecode, rank+1) self.COMM.Reduce_scatter(sbuf.as_mpi(), rbuf.as_mpi(), None, op) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: redval = sum(range(size))+size if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.PROD: redval = prod(range(1,size+1)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.MAX: self.assertEqual(value, size) elif op == MPI.MIN: self.assertEqual(value, 1) rbuf = array(-1, typecode, rank+1) self.COMM.Reduce_scatter(sbuf.as_mpi(), rbuf.as_mpi(), rcnt, op) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: redval = sum(range(size))+size if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.PROD: redval = prod(range(1,size+1)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.MAX: self.assertEqual(value, size) elif op == MPI.MIN: self.assertEqual(value, 1) def testReduceScatterBlock(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue for rcnt in range(1,size): sbuf = array([rank]*rcnt*size, typecode) rbuf = array(-1, typecode, rcnt) if op == MPI.PROD: sbuf = array([rank+1]*rcnt*size, typecode) self.COMM.Reduce_scatter_block(sbuf.as_mpi(), rbuf.as_mpi(), op) max_val = maxvalue(rbuf) v_sum = (size*(size-1))/2 v_prod = 1 for i in range(1,size+1): v_prod *= i v_max = size-1 v_min = 0 for i, value in enumerate(rbuf): if op == MPI.SUM: if v_sum <= max_val: self.assertAlmostEqual(value, v_sum) elif op == MPI.PROD: if v_prod <= max_val: self.assertAlmostEqual(value, v_prod) elif op == MPI.MAX: self.assertEqual(value, v_max) elif op == MPI.MIN: self.assertEqual(value, v_min) def testScan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() # -- for array, typecode in arrayimpl.subTest(self): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue sbuf = array(range(size), typecode) rbuf = array(0, typecode, size) self.COMM.Scan(sbuf.as_mpi(), rbuf.as_mpi(), op) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: if (i * (rank + 1)) < max_val: self.assertAlmostEqual(value, i * (rank + 1)) elif op == MPI.PROD: if (i ** (rank + 1)) < max_val: self.assertAlmostEqual(value, i ** (rank + 1)) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testExscan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue sbuf = array(range(size), typecode) rbuf = array(0, typecode, size) try: self.COMM.Exscan(sbuf.as_mpi(), rbuf.as_mpi(), op) except NotImplementedError: self.skipTest('mpi-exscan') if rank == 1: for i, value in enumerate(rbuf): self.assertEqual(value, i) elif rank > 1: max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: if (i * rank) < max_val: self.assertAlmostEqual(value, i * rank) elif op == MPI.PROD: if (i ** rank) < max_val: self.assertAlmostEqual(value, i ** rank) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testBcastTypeIndexed(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): datatype = array.TypeMap[typecode] for root in range(size): # if rank == root: buf = array(range(10), typecode).as_raw() else: buf = array(-1, typecode, 10).as_raw() indices = list(range(0, len(buf), 2)) newtype = datatype.Create_indexed_block(1, indices) newtype.Commit() newbuf = (buf, 1, newtype) self.COMM.Bcast(newbuf, root=root) newtype.Free() if rank != root: for i, value in enumerate(buf): if (i % 2): self.assertEqual(value, -1) else: self.assertEqual(value, i) # if rank == root: buf = array(range(10), typecode).as_raw() else: buf = array(-1, typecode, 10).as_raw() indices = list(range(1, len(buf), 2)) newtype = datatype.Create_indexed_block(1, indices) newtype.Commit() newbuf = (buf, 1, newtype) self.COMM.Bcast(newbuf, root) newtype.Free() if rank != root: for i, value in enumerate(buf): if not (i % 2): self.assertEqual(value, -1) else: self.assertEqual(value, i) class BaseTestCCOBufInplace(object): def testGather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): count = root+3 if rank == root: sbuf = MPI.IN_PLACE buf = array(-1, typecode, (size, count)) #buf.flat[(rank*count):((rank+1)*count)] = \ # array(root, typecode, count) s, e = rank*count, (rank+1)*count for i in range(s, e): buf.flat[i] = root rbuf = buf.as_mpi() else: buf = array(root, typecode, count) sbuf = buf.as_mpi() rbuf = None self.COMM.Gather(sbuf, rbuf, root=root) for value in buf.flat: self.assertEqual(value, root) @unittest.skipMPI('msmpi(==10.0.0)') def testScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(1, 10): if rank == root: buf = array(root, typecode, (size, count)) sbuf = buf.as_mpi() rbuf = MPI.IN_PLACE else: buf = array(-1, typecode, count) sbuf = None rbuf = buf.as_mpi() self.COMM.Scatter(sbuf, rbuf, root=root) for value in buf.flat: self.assertEqual(value, root) def testAllgather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for count in range(1, 10): buf = array(-1, typecode, (size, count)) #buf.flat[(rank*count):((rank+1)*count)] = \ # array(count, typecode, count) s, e = rank*count, (rank+1)*count for i in range(s, e): buf.flat[i] = count self.COMM.Allgather(MPI.IN_PLACE, buf.as_mpi()) for value in buf.flat: self.assertEqual(value, count) def assertAlmostEqual(self, first, second): num = complex(second-first) den = complex(second+first)/2 or 1.0 if (abs(num/den) > 1e-2): raise self.failureException('%r != %r' % (first, second)) def testReduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue for root in range(size): count = size if rank == root: buf = array(range(size), typecode) sbuf = MPI.IN_PLACE rbuf = buf.as_mpi() else: buf = array(range(size), typecode) buf2 = array(range(size), typecode) sbuf = buf.as_mpi() rbuf = buf2.as_mpi() self.COMM.Reduce(sbuf, rbuf, op, root) if rank == root: max_val = maxvalue(buf) for i, value in enumerate(buf): if op == MPI.SUM: if (i * size) < max_val: self.assertAlmostEqual(value, i*size) elif op == MPI.PROD: if (i ** size) < max_val: self.assertAlmostEqual(value, i**size) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testAllreduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue buf = array(range(size), typecode) sbuf = MPI.IN_PLACE rbuf = buf.as_mpi() self.COMM.Allreduce(sbuf, rbuf, op) max_val = maxvalue(buf) for i, value in enumerate(buf): if op == MPI.SUM: if (i * size) < max_val: self.assertAlmostEqual(value, i*size) elif op == MPI.PROD: if (i ** size) < max_val: self.assertAlmostEqual(value, i**size) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testReduceScatterBlock(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): # one of the ranks would fail as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue for rcnt in range(size): if op == MPI.PROD: rbuf = array([rank+1]*rcnt*size, typecode) else: rbuf = array([rank]*rcnt*size, typecode) self.COMM.Reduce_scatter_block(MPI.IN_PLACE, rbuf.as_mpi(), op) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if i >= rcnt: if op == MPI.PROD: self.assertEqual(value, rank+1) else: self.assertEqual(value, rank) else: if op == MPI.SUM: redval = sum(range(size)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.PROD: redval = prod(range(1,size+1)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.MAX: self.assertEqual(value, size-1) elif op == MPI.MIN: self.assertEqual(value, 0) @unittest.skipMPI('MVAPICH2') def testReduceScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue rcnt = list(range(1, size+1)) if op == MPI.PROD: rbuf = array([rank+1]*sum(rcnt), typecode) else: rbuf = array([rank]*sum(rcnt), typecode) self.COMM.Reduce_scatter(MPI.IN_PLACE, rbuf.as_mpi(), rcnt, op) max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if i >= rcnt[rank]: if op == MPI.PROD: self.assertEqual(value, rank+1) else: self.assertEqual(value, rank) else: if op == MPI.SUM: redval = sum(range(size)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.PROD: redval = prod(range(1,size+1)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.MAX: self.assertEqual(value, size-1) elif op == MPI.MIN: self.assertEqual(value, 0) @unittest.skipMPI('openmpi(<=1.8.4)') def testScan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() # -- for array, typecode in arrayimpl.subTest(self): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue buf = array(range(size), typecode) self.COMM.Scan(MPI.IN_PLACE, buf.as_mpi(), op) max_val = maxvalue(buf) for i, value in enumerate(buf): if op == MPI.SUM: if (i * (rank + 1)) < max_val: self.assertAlmostEqual(value, i * (rank + 1)) elif op == MPI.PROD: if (i ** (rank + 1)) < max_val: self.assertAlmostEqual(value, i ** (rank + 1)) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) @unittest.skipMPI('msmpi(<=4.2.0)') @unittest.skipMPI('openmpi(<=1.8.4)') def testExscan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue buf = array(range(size), typecode) try: self.COMM.Exscan(MPI.IN_PLACE, buf.as_mpi(), op) except NotImplementedError: self.skipTest('mpi-exscan') if rank == 1: for i, value in enumerate(buf): self.assertEqual(value, i) elif rank > 1: max_val = maxvalue(buf) for i, value in enumerate(buf): if op == MPI.SUM: if (i * rank) < max_val: self.assertAlmostEqual(value, i * rank) elif op == MPI.PROD: if (i ** rank) < max_val: self.assertAlmostEqual(value, i ** rank) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) class TestReduceLocal(unittest.TestCase): def testReduceLocal(self): for array, typecode in arrayimpl.subTest(self): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue size = 5 sbuf = array(range(1,size+1), typecode) rbuf = array(range(0,size+0), typecode) try: op.Reduce_local(sbuf.as_mpi(), rbuf.as_mpi()) except NotImplementedError: self.skipTest('mpi-op-reduce_local') for i, value in enumerate(rbuf): self.assertEqual(sbuf[i], i+1) if op == MPI.SUM: self.assertAlmostEqual(value, i+(i+1)) elif op == MPI.PROD: self.assertAlmostEqual(value, i*(i+1)) elif op == MPI.MAX: self.assertEqual(value, i+1) elif op == MPI.MIN: self.assertEqual(value, i) def testReduceLocalBadCount(self): for array, typecode in arrayimpl.subTest(self): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): sbuf = array(range(3), typecode) rbuf = array(range(3), typecode) def f(): op.Reduce_local(sbuf.as_mpi_c(2), rbuf.as_mpi_c(3)) self.assertRaises(ValueError, f) def f(): op.Reduce_local([sbuf.as_raw(), 1, MPI.INT], [rbuf.as_raw(), 1, MPI.SHORT]) self.assertRaises(ValueError, f) class TestCCOBufSelf(BaseTestCCOBuf, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCOBufWorld(BaseTestCCOBuf, unittest.TestCase): COMM = MPI.COMM_WORLD @unittest.skipMPI('MPICH1') @unittest.skipMPI('LAM/MPI') @unittest.skipIf(MPI.IN_PLACE == MPI.BOTTOM, 'mpi-in-place') class TestCCOBufInplaceSelf(BaseTestCCOBufInplace, unittest.TestCase): COMM = MPI.COMM_SELF @unittest.skipMPI('MPICH1') @unittest.skipMPI('LAM/MPI') @unittest.skipIf(MPI.IN_PLACE == MPI.BOTTOM, 'mpi-in-place') class TestCCOBufInplaceWorld(BaseTestCCOBufInplace, unittest.TestCase): COMM = MPI.COMM_WORLD @unittest.skipMPI('IntelMPI', MPI.COMM_WORLD.Get_size() > 1) def testReduceScatter(self): super(TestCCOBufInplaceWorld, self).testReduceScatter() class TestCCOBufSelfDup(TestCCOBufSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() @unittest.skipMPI('openmpi(<1.4.0)', MPI.Query_thread() > MPI.THREAD_SINGLE) class TestCCOBufWorldDup(TestCCOBufWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_cco_nb_buf.py000066400000000000000000000662721460670727200174420ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import arrayimpl from functools import reduce prod = lambda sequence,start=1: reduce(lambda x, y: x*y, sequence, start) def skip_op(typecode, op): if typecode in 'FDG': if op in (MPI.MAX, MPI.MIN): return True return False def maxvalue(a): try: typecode = a.typecode except AttributeError: typecode = a.dtype.char if typecode == ('f'): return 1e30 elif typecode == ('d'): return 1e300 else: return 2 ** (a.itemsize * 7) - 1 @unittest.skipMPI('msmpi(<8.1.0)') class BaseTestCCOBuf(object): COMM = MPI.COMM_NULL def testBarrier(self): self.COMM.Ibarrier().Wait() def testBcast(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): if rank == root: buf = array(root, typecode, root) else: buf = array( -1, typecode, root) self.COMM.Ibcast(buf.as_mpi(), root=root).Wait() for value in buf: self.assertEqual(value, root) def testGather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): sbuf = array(root, typecode, root+1) if rank == root: rbuf = array(-1, typecode, (size,root+1)) else: rbuf = array([], typecode) self.COMM.Igather(sbuf.as_mpi(), rbuf.as_mpi(), root=root).Wait() if rank == root: for value in rbuf.flat: self.assertEqual(value, root) def testScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): rbuf = array(-1, typecode, size) if rank == root: sbuf = array(root, typecode, (size, size)) else: sbuf = array([], typecode) self.COMM.Iscatter(sbuf.as_mpi(), rbuf.as_mpi(), root=root).Wait() for value in rbuf: self.assertEqual(value, root) def testAllgather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): sbuf = array(root, typecode, root+1) rbuf = array( -1, typecode, (size, root+1)) self.COMM.Iallgather(sbuf.as_mpi(), rbuf.as_mpi()).Wait() for value in rbuf.flat: self.assertEqual(value, root) def testAlltoall(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): sbuf = array(root, typecode, (size, root+1)) rbuf = array( -1, typecode, (size, root+1)) self.COMM.Ialltoall(sbuf.as_mpi(), rbuf.as_mpi_c(root+1)).Wait() for value in rbuf.flat: self.assertEqual(value, root) def assertAlmostEqual(self, first, second): num = complex(second-first) den = complex(second+first)/2 or 1.0 if (abs(num/den) > 1e-2): raise self.failureException('%r != %r' % (first, second)) def testReduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue for root in range(size): sbuf = array(range(size), typecode) rbuf = array(-1, typecode, size) self.COMM.Ireduce(sbuf.as_mpi(), rbuf.as_mpi(), op, root).Wait() max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if rank != root: self.assertEqual(value, -1) continue if op == MPI.SUM: if (i * size) < max_val: self.assertAlmostEqual(value, i*size) elif op == MPI.PROD: if (i ** size) < max_val: self.assertAlmostEqual(value, i**size) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testAllreduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue sbuf = array(range(size), typecode) rbuf = array(0, typecode, size) self.COMM.Iallreduce(sbuf.as_mpi(), rbuf.as_mpi(), op).Wait() max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: if (i * size) < max_val: self.assertAlmostEqual(value, i*size) elif op == MPI.PROD: if (i ** size) < max_val: self.assertAlmostEqual(value, i**size) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) @unittest.skipMPI('openmpi(<=1.8.3)') def testReduceScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue rcnt = list(range(1,size+1)) sbuf = array([rank+1]*sum(rcnt), typecode) rbuf = array(-1, typecode, rank+1) self.COMM.Ireduce_scatter(sbuf.as_mpi(), rbuf.as_mpi(), None, op).Wait() max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: redval = sum(range(size))+size if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.PROD: redval = prod(range(1,size+1)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.MAX: self.assertEqual(value, size) elif op == MPI.MIN: self.assertEqual(value, 1) rbuf = array(-1, typecode, rank+1) self.COMM.Ireduce_scatter(sbuf.as_mpi(), rbuf.as_mpi(), rcnt, op).Wait() max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: redval = sum(range(size))+size if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.PROD: redval = prod(range(1,size+1)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.MAX: self.assertEqual(value, size) elif op == MPI.MIN: self.assertEqual(value, 1) def testReduceScatterBlock(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue for rcnt in range(1, size+1): sbuf = array([rank]*rcnt*size, typecode) rbuf = array(-1, typecode, rcnt) if op == MPI.PROD: sbuf = array([rank+1]*rcnt*size, typecode) self.COMM.Ireduce_scatter_block(sbuf.as_mpi(), rbuf.as_mpi(), op).Wait() max_val = maxvalue(rbuf) v_sum = (size*(size-1))/2 v_prod = 1 for i in range(1,size+1): v_prod *= i v_max = size-1 v_min = 0 for i, value in enumerate(rbuf): if op == MPI.SUM: if v_sum <= max_val: self.assertAlmostEqual(value, v_sum) elif op == MPI.PROD: if v_prod <= max_val: self.assertAlmostEqual(value, v_prod) elif op == MPI.MAX: self.assertEqual(value, v_max) elif op == MPI.MIN: self.assertEqual(value, v_min) def testScan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() # -- for array, typecode in arrayimpl.subTest(self): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue sbuf = array(range(size), typecode) rbuf = array(0, typecode, size) self.COMM.Iscan(sbuf.as_mpi(), rbuf.as_mpi(), op).Wait() max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: if (i * (rank + 1)) < max_val: self.assertAlmostEqual(value, i * (rank + 1)) elif op == MPI.PROD: if (i ** (rank + 1)) < max_val: self.assertAlmostEqual(value, i ** (rank + 1)) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) @unittest.skipMPI('openmpi(<=1.8.1)') def testExscan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue sbuf = array(range(size), typecode) rbuf = array(0, typecode, size) self.COMM.Iexscan(sbuf.as_mpi(), rbuf.as_mpi(), op).Wait() if rank == 1: for i, value in enumerate(rbuf): self.assertEqual(value, i) elif rank > 1: max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if op == MPI.SUM: if (i * rank) < max_val: self.assertAlmostEqual(value, i * rank) elif op == MPI.PROD: if (i ** rank) < max_val: self.assertAlmostEqual(value, i ** rank) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testBcastTypeIndexed(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): datatype = array.TypeMap[typecode] for root in range(size): # if rank == root: buf = array(range(10), typecode).as_raw() else: buf = array(-1, typecode, 10).as_raw() indices = list(range(0, len(buf), 2)) newtype = datatype.Create_indexed_block(1, indices) newtype.Commit() newbuf = (buf, 1, newtype) self.COMM.Ibcast(newbuf, root=root).Wait() newtype.Free() if rank != root: for i, value in enumerate(buf): if (i % 2): self.assertEqual(value, -1) else: self.assertEqual(value, i) # if rank == root: buf = array(range(10), typecode).as_raw() else: buf = array(-1, typecode, 10).as_raw() indices = list(range(1, len(buf), 2)) newtype = datatype.Create_indexed_block(1, indices) newtype.Commit() newbuf = (buf, 1, newtype) self.COMM.Ibcast(newbuf, root).Wait() newtype.Free() if rank != root: for i, value in enumerate(buf): if not (i % 2): self.assertEqual(value, -1) else: self.assertEqual(value, i) @unittest.skipMPI('MVAPICH2') @unittest.skipMPI('msmpi(<8.1.0)') class BaseTestCCOBufInplace(object): def testGather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): count = root+3 if rank == root: sbuf = MPI.IN_PLACE buf = array(-1, typecode, (size, count)) #buf.flat[(rank*count):((rank+1)*count)] = \ # array(root, typecode, count) s, e = rank*count, (rank+1)*count for i in range(s, e): buf.flat[i] = root rbuf = buf.as_mpi() else: buf = array(root, typecode, count) sbuf = buf.as_mpi() rbuf = None self.COMM.Igather(sbuf, rbuf, root=root).Wait() for value in buf.flat: self.assertEqual(value, root) @unittest.skipMPI('msmpi(==10.0.0)') def testScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(1, 10): if rank == root: buf = array(root, typecode, (size, count)) sbuf = buf.as_mpi() rbuf = MPI.IN_PLACE else: buf = array(-1, typecode, count) sbuf = None rbuf = buf.as_mpi() self.COMM.Iscatter(sbuf, rbuf, root=root).Wait() for value in buf.flat: self.assertEqual(value, root) def testAllgather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for count in range(1, 10): buf = array(-1, typecode, (size, count)) #buf.flat[(rank*count):((rank+1)*count)] = \ # array(count, typecode, count) s, e = rank*count, (rank+1)*count for i in range(s, e): buf.flat[i] = count self.COMM.Iallgather(MPI.IN_PLACE, buf.as_mpi()).Wait() for value in buf.flat: self.assertEqual(value, count) def assertAlmostEqual(self, first, second): num = complex(second-first) den = complex(second+first)/2 or 1.0 if (abs(num/den) > 1e-2): raise self.failureException('%r != %r' % (first, second)) def testReduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue for root in range(size): count = size if rank == root: buf = array(range(size), typecode) sbuf = MPI.IN_PLACE rbuf = buf.as_mpi() else: buf = array(range(size), typecode) buf2 = array(range(size), typecode) sbuf = buf.as_mpi() rbuf = buf2.as_mpi() self.COMM.Ireduce(sbuf, rbuf, op, root).Wait() if rank == root: max_val = maxvalue(buf) for i, value in enumerate(buf): if op == MPI.SUM: if (i * size) < max_val: self.assertAlmostEqual(value, i*size) elif op == MPI.PROD: if (i ** size) < max_val: self.assertAlmostEqual(value, i**size) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) def testAllreduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue buf = array(range(size), typecode) sbuf = MPI.IN_PLACE rbuf = buf.as_mpi() self.COMM.Iallreduce(sbuf, rbuf, op).Wait() max_val = maxvalue(buf) for i, value in enumerate(buf): if op == MPI.SUM: if (i * size) < max_val: self.assertAlmostEqual(value, i*size) elif op == MPI.PROD: if (i ** size) < max_val: self.assertAlmostEqual(value, i**size) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) @unittest.skipMPI('openmpi(<=1.8.6)') def testReduceScatterBlock(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue for rcnt in range(size): if op == MPI.PROD: rbuf = array([rank+1]*rcnt*size, typecode) else: rbuf = array([rank]*rcnt*size, typecode) self.COMM.Ireduce_scatter_block(MPI.IN_PLACE, rbuf.as_mpi(), op).Wait() max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if i >= rcnt: if op == MPI.PROD: self.assertEqual(value, rank+1) else: self.assertEqual(value, rank) else: if op == MPI.SUM: redval = sum(range(size)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.PROD: redval = prod(range(1,size+1)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.MAX: self.assertEqual(value, size-1) elif op == MPI.MIN: self.assertEqual(value, 0) @unittest.skipMPI('openmpi(<=1.8.6)') def testReduceScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): if skip_op(typecode, op): continue rcnt = list(range(1, size+1)) if op == MPI.PROD: rbuf = array([rank+1]*sum(rcnt), typecode) else: rbuf = array([rank]*sum(rcnt), typecode) self.COMM.Ireduce_scatter(MPI.IN_PLACE, rbuf.as_mpi(), rcnt, op).Wait() max_val = maxvalue(rbuf) for i, value in enumerate(rbuf): if i >= rcnt[rank]: if op == MPI.PROD: self.assertEqual(value, rank+1) else: self.assertEqual(value, rank) else: if op == MPI.SUM: redval = sum(range(size)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.PROD: redval = prod(range(1,size+1)) if redval < max_val: self.assertAlmostEqual(value, redval) elif op == MPI.MAX: self.assertEqual(value, size-1) elif op == MPI.MIN: self.assertEqual(value, 0) @unittest.skipMPI('openmpi(<=1.8.4)') def testScan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() # -- for array, typecode in arrayimpl.subTest(self): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue buf = array(range(size), typecode) self.COMM.Iscan(MPI.IN_PLACE, buf.as_mpi(), op).Wait() max_val = maxvalue(buf) for i, value in enumerate(buf): if op == MPI.SUM: if (i * (rank + 1)) < max_val: self.assertAlmostEqual(value, i * (rank + 1)) elif op == MPI.PROD: if (i ** (rank + 1)) < max_val: self.assertAlmostEqual(value, i ** (rank + 1)) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) @unittest.skipMPI('openmpi(<=1.8.4)') def testExscan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): # segfault as of OpenMPI 4.1.1 if unittest.is_mpi_gpu('openmpi', array): continue for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): if skip_op(typecode, op): continue buf = array(range(size), typecode) try: self.COMM.Iexscan(MPI.IN_PLACE, buf.as_mpi(), op).Wait() except NotImplementedError: self.skipTest('mpi-exscan') if rank == 1: for i, value in enumerate(buf): self.assertEqual(value, i) elif rank > 1: max_val = maxvalue(buf) for i, value in enumerate(buf): if op == MPI.SUM: if (i * rank) < max_val: self.assertAlmostEqual(value, i * rank) elif op == MPI.PROD: if (i ** rank) < max_val: self.assertAlmostEqual(value, i ** rank) elif op == MPI.MAX: self.assertEqual(value, i) elif op == MPI.MIN: self.assertEqual(value, i) class TestCCOBufSelf(BaseTestCCOBuf, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCOBufWorld(BaseTestCCOBuf, unittest.TestCase): COMM = MPI.COMM_WORLD class TestCCOBufInplaceSelf(BaseTestCCOBufInplace, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCOBufInplaceWorld(BaseTestCCOBufInplace, unittest.TestCase): COMM = MPI.COMM_WORLD @unittest.skipMPI('IntelMPI', MPI.COMM_WORLD.Get_size() > 1) def testReduceScatter(self): super(TestCCOBufInplaceWorld, self).testReduceScatter() class TestCCOBufSelfDup(TestCCOBufSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() class TestCCOBufWorldDup(TestCCOBufWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() try: MPI.COMM_SELF.Ibarrier().Wait() except NotImplementedError: unittest.disable(BaseTestCCOBuf, 'mpi-nbc') unittest.disable(BaseTestCCOBufInplace, 'mpi-nbc') if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_cco_nb_vec.py000066400000000000000000000406021460670727200174300ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import arrayimpl def maxvalue(a): try: typecode = a.typecode except AttributeError: typecode = a.dtype.char if typecode == ('f'): return 1e30 elif typecode == ('d'): return 1e300 else: return 2 ** (a.itemsize * 7) - 1 @unittest.skipMPI('msmpi(<8.1.0)') @unittest.skipMPI('openmpi(<2.0.0)') class BaseTestCCOVec(object): COMM = MPI.COMM_NULL def testGatherv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(size): sbuf = array(root, typecode, count) rbuf = array( -1, typecode, size*size) counts = [count] * size displs = list(range(0, size*size, size)) recvbuf = rbuf.as_mpi_v(counts, displs) if rank != root: recvbuf=None self.COMM.Igatherv(sbuf.as_mpi(), recvbuf, root).Wait() if recvbuf is not None: for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, root) for vb in b: self.assertEqual(vb, -1) def testGatherv2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(size): sbuf = array(root, typecode, size) rbuf = array( -1, typecode, size*size) sendbuf = sbuf.as_mpi_c(count) recvbuf = rbuf.as_mpi_v(count, size) if rank != root: recvbuf=None self.COMM.Igatherv(sendbuf, recvbuf, root).Wait() if recvbuf is not None: for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, root) for vb in b: self.assertEqual(vb, -1) def testGatherv3(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(size+1): # sbuf = array(root, typecode, count).as_raw() rbuf = array( -1, typecode, count*size).as_raw() sendbuf = sbuf recvbuf = [rbuf, count] if rank != root: recvbuf=None self.COMM.Igatherv(sendbuf, recvbuf, root).Wait() if recvbuf is not None: for v in rbuf: self.assertEqual(v, root) # sbuf = array(root, typecode, count).as_raw() if rank == root: rbuf = array( -1, typecode, count*size).as_raw() else: rbuf = None self.COMM.Gatherv(sbuf, rbuf, root) self.COMM.Barrier() if rank == root: for v in rbuf: self.assertEqual(v, root) def testScatterv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(size): sbuf = array(root, typecode, size*size) rbuf = array( -1, typecode, count) counts = [count] * size displs = list(range(0, size*size, size)) sendbuf = sbuf.as_mpi_v(counts, displs) if rank != root: sendbuf = None self.COMM.Iscatterv(sendbuf, rbuf.as_mpi(), root).Wait() for vr in rbuf: self.assertEqual(vr, root) def testScatterv2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(size): sbuf = array(root, typecode, size*size) rbuf = array( -1, typecode, size) sendbuf = sbuf.as_mpi_v(count, size) recvbuf = rbuf.as_mpi_c(count) if rank != root: sendbuf = None self.COMM.Iscatterv(sendbuf, recvbuf, root).Wait() a, b = rbuf[:count], rbuf[count:] for va in a: self.assertEqual(va, root) for vb in b: self.assertEqual(vb, -1) def testScatterv3(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(size+1): # sbuf = array(root, typecode, count*size).as_raw() rbuf = array( -1, typecode, count).as_raw() sendbuf = [sbuf, count] recvbuf = rbuf if rank != root: sendbuf = None self.COMM.Iscatterv(sendbuf, recvbuf, root).Wait() for v in rbuf: self.assertEqual(v, root) # if rank == root: sbuf = array(root, typecode, count*size).as_raw() else: sbuf = None rbuf = array( -1, typecode, count).as_raw() self.COMM.Scatterv(sbuf, rbuf, root) for v in rbuf: self.assertEqual(v, root) def testAllgatherv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(size): sbuf = array(root, typecode, count) rbuf = array( -1, typecode, size*size) counts = [count] * size displs = list(range(0, size*size, size)) sendbuf = sbuf.as_mpi() recvbuf = rbuf.as_mpi_v(counts, displs) self.COMM.Iallgatherv(sendbuf, recvbuf).Wait() for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, root) for vb in b: self.assertEqual(vb, -1) def testAllgatherv2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(size): sbuf = array(root, typecode, size) rbuf = array( -1, typecode, size*size) sendbuf = sbuf.as_mpi_c(count) recvbuf = rbuf.as_mpi_v(count, size) self.COMM.Iallgatherv(sendbuf, recvbuf).Wait() for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, root) for vb in b: self.assertEqual(vb, -1) def testAllgatherv3(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(size+1): # sbuf = array(root, typecode, count).as_raw() rbuf = array( -1, typecode, count*size).as_raw() sendbuf = sbuf recvbuf = [rbuf, count] self.COMM.Iallgatherv(sendbuf, recvbuf).Wait() for v in rbuf: self.assertEqual(v, root) # sbuf = array(root, typecode, count).as_raw() rbuf = array( -1, typecode, count*size).as_raw() self.COMM.Iallgatherv(sbuf, rbuf).Wait() for v in rbuf: self.assertEqual(v, root) def testAlltoallv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(size): sbuf = array(root, typecode, size*size) rbuf = array( -1, typecode, size*size) counts = [count] * size displs = list(range(0, size*size, size)) sendbuf = sbuf.as_mpi_v(counts, displs) recvbuf = rbuf.as_mpi_v(counts, displs) self.COMM.Ialltoallv(sendbuf, recvbuf).Wait() for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, root) for vb in b: self.assertEqual(vb, -1) def testAlltoallv2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(size): sbuf = array(root, typecode, size*size) rbuf = array( -1, typecode, size*size) sendbuf = sbuf.as_mpi_v(count, size) recvbuf = rbuf.as_mpi_v(count, size) self.COMM.Ialltoallv(sendbuf, recvbuf).Wait() for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, root) for vb in b: self.assertEqual(vb, -1) def testAlltoallv3(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(size+1): # sbuf = array(root, typecode, count*size).as_raw() rbuf = array( -1, typecode, count*size).as_raw() sendbuf = [sbuf, count] recvbuf = [rbuf, count] self.COMM.Ialltoallv(sendbuf, recvbuf).Wait() for v in rbuf: self.assertEqual(v, root) # sbuf = array(root, typecode, count*size).as_raw() rbuf = array( -1, typecode, count*size).as_raw() self.COMM.Ialltoallv(sbuf, rbuf).Wait() for v in rbuf: self.assertEqual(v, root) def testAlltoallw(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for n in range(1, size+1): sbuf = array( n, typecode, (size, n)) rbuf = array(-1, typecode, (size, n)) sdt, rdt = sbuf.mpidtype, rbuf.mpidtype sdsp = list(range(0, size*n*sdt.extent, n*sdt.extent)) rdsp = list(range(0, size*n*rdt.extent, n*rdt.extent)) smsg = (sbuf.as_raw(), ([n]*size, sdsp), [sdt]*size) rmsg = (rbuf.as_raw(), ([n]*size, rdsp), [rdt]*size) try: self.COMM.Ialltoallw(smsg, rmsg).Wait() except NotImplementedError: self.skipTest('mpi-ialltoallw') for v in rbuf.flat: self.assertEqual(v, n) @unittest.skipMPI('msmpi(<8.1.0)') @unittest.skipMPI('openmpi(<2.0.0)') class BaseTestCCOVecInplace(object): COMM = MPI.COMM_NULL def testAlltoallv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for count in range(size): rbuf = array(-1, typecode, size*size) counts = [count] * size displs = list(range(0, size*size, size)) for i in range(size): for j in range(count): rbuf[i*size+j] = rank recvbuf = rbuf.as_mpi_v(counts, displs) self.COMM.Ialltoallv(MPI.IN_PLACE, recvbuf).Wait() for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, i) for vb in b: self.assertEqual(vb, -1) def testAlltoallw(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for count in range(size): rbuf = array(-1, typecode, size*size) for i in range(size): for j in range(count): rbuf[i*size+j] = rank rdt = rbuf.mpidtype rdsp = list(range(0, size*size*rdt.extent, size*rdt.extent)) rmsg = (rbuf.as_raw(), ([count]*size, rdsp), [rdt]*size) try: self.COMM.Ialltoallw(MPI.IN_PLACE, rmsg).Wait() except NotImplementedError: self.skipTest('mpi-ialltoallw') for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, i) for vb in b: self.assertEqual(vb, -1) def testAlltoallw2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for count in range(size): rbuf = array(-1, typecode, size*size) for i in range(size): for j in range(count): rbuf[i*size+j] = rank rdt = rbuf.mpidtype rdsp = list(range(0, size*size*rdt.extent, size*rdt.extent)) rmsg = (rbuf.as_raw(), [count]*size, rdsp, [rdt]*size) try: self.COMM.Ialltoallw(MPI.IN_PLACE, rmsg).Wait() except NotImplementedError: self.skipTest('mpi-ialltoallw') for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, i) for vb in b: self.assertEqual(vb, -1) class TestCCOVecSelf(BaseTestCCOVec, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCOVecWorld(BaseTestCCOVec, unittest.TestCase): COMM = MPI.COMM_WORLD class TestCCOVecSelfDup(TestCCOVecSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() class TestCCOVecWorldDup(TestCCOVecWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() class TestCCOVecInplaceSelf(BaseTestCCOVecInplace, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCOVecInplaceWorld(BaseTestCCOVecInplace, unittest.TestCase): COMM = MPI.COMM_WORLD try: MPI.COMM_SELF.Ibarrier().Wait() except NotImplementedError: unittest.disable(BaseTestCCOVec, 'mpi-nbc') if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_cco_ngh_buf.py000066400000000000000000000211251460670727200176030ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import arrayimpl def create_topo_comms(comm): size = comm.Get_size() rank = comm.Get_rank() # Cartesian n = int(size**1/2.0) m = int(size**1/3.0) if m*m*m == size: dims = [m, m, m] elif n*n == size: dims = [n, n] else: dims = [size] periods = [True] * len(dims) yield comm.Create_cart(dims, periods=periods) # Graph index, edges = [0], [] for i in range(size): pos = index[-1] index.append(pos+2) edges.append((i-1)%size) edges.append((i+1)%size) yield comm.Create_graph(index, edges) # Dist Graph sources = [(rank-2)%size, (rank-1)%size] destinations = [(rank+1)%size, (rank+2)%size] yield comm.Create_dist_graph_adjacent(sources, destinations) def get_neighbors_count(comm): topo = comm.Get_topology() if topo == MPI.CART: ndim = comm.Get_dim() return 2*ndim, 2*ndim if topo == MPI.GRAPH: rank = comm.Get_rank() nneighbors = comm.Get_neighbors_count(rank) return nneighbors, nneighbors if topo == MPI.DIST_GRAPH: indeg, outdeg, w = comm.Get_dist_neighbors_count() return indeg, outdeg return 0, 0 def have_feature(): cartcomm = MPI.COMM_SELF.Create_cart([1], periods=[0]) try: cartcomm.neighbor_allgather(None) return True except NotImplementedError: return False finally: cartcomm.Free() @unittest.skipIf(not have_feature(), 'mpi-neighbor') class BaseTestCCONghBuf(object): COMM = MPI.COMM_NULL def testNeighborAllgather(self): for comm in create_topo_comms(self.COMM): rsize, ssize = get_neighbors_count(comm) for array, typecode in arrayimpl.subTest(self): if unittest.is_mpi_gpu('openmpi', array): # segfault as of OpenMPI 4.1.1; TODO(leofang): why? if array.backend == 'numba': continue for v in range(3): sbuf = array( v, typecode, 3) rbuf = array(-1, typecode, (rsize, 3)) comm.Neighbor_allgather(sbuf.as_mpi(), rbuf.as_mpi()) for value in rbuf.flat: self.assertEqual(value, v) sbuf = array( v, typecode, 3) rbuf = array(-1, typecode, (rsize, 3)) comm.Neighbor_allgatherv(sbuf.as_mpi_c(3), rbuf.as_mpi_c(3)) for value in rbuf.flat: self.assertEqual(value, v) sbuf = array( v, typecode, 3) rbuf = array(-1, typecode, (rsize, 3)) comm.Ineighbor_allgather(sbuf.as_mpi(), rbuf.as_mpi()).Wait() for value in rbuf.flat: self.assertEqual(value, v) sbuf = array( v, typecode, 3) rbuf = array(-1, typecode, (rsize, 3)) comm.Ineighbor_allgatherv(sbuf.as_mpi_c(3), rbuf.as_mpi_c(3)).Wait() for value in rbuf.flat: self.assertEqual(value, v) comm.Free() def testNeighborAlltoall(self): for comm in create_topo_comms(self.COMM): rsize, ssize = get_neighbors_count(comm) for array, typecode in arrayimpl.subTest(self): for v in range(3): sbuf = array( v, typecode, (ssize, 3)) rbuf = array(-1, typecode, (rsize, 3)) comm.Neighbor_alltoall(sbuf.as_mpi(), rbuf.as_mpi_c(3)) for value in rbuf.flat: self.assertEqual(value, v) sbuf = array( v, typecode, (ssize, 3)) rbuf = array(-1, typecode, (rsize, 3)) comm.Neighbor_alltoall(sbuf.as_mpi(), rbuf.as_mpi()) for value in rbuf.flat: self.assertEqual(value, v) sbuf = array( v, typecode, (ssize, 3)) rbuf = array(-1, typecode, (rsize, 3)) comm.Neighbor_alltoallv(sbuf.as_mpi_c(3), rbuf.as_mpi_c(3)) for value in rbuf.flat: self.assertEqual(value, v) sbuf = array( v, typecode, (ssize, 3)) rbuf = array(-1, typecode, (rsize, 3)) comm.Ineighbor_alltoall(sbuf.as_mpi(), rbuf.as_mpi()).Wait() for value in rbuf.flat: self.assertEqual(value, v) sbuf = array( v, typecode, (ssize, 3)) rbuf = array(-1, typecode, (rsize, 3)) comm.Ineighbor_alltoallv(sbuf.as_mpi_c(3), rbuf.as_mpi_c(3)).Wait() for value in rbuf.flat: self.assertEqual(value, v) comm.Free() def testNeighborAlltoallw(self): size = self.COMM.Get_size() for comm in create_topo_comms(self.COMM): rsize, ssize = get_neighbors_count(comm) for array, typecode in arrayimpl.subTest(self): for n in range(1,4): for v in range(3): sbuf = array( v, typecode, (ssize, n)) rbuf = array(-1, typecode, (rsize, n)) sdt, rdt = sbuf.mpidtype, rbuf.mpidtype sdsp = list(range(0, ssize*n*sdt.extent, n*sdt.extent)) rdsp = list(range(0, rsize*n*rdt.extent, n*rdt.extent)) smsg = [sbuf.as_raw(), ([n]*ssize, sdsp), [sdt]*ssize] rmsg = (rbuf.as_raw(), ([n]*rsize, rdsp), [rdt]*rsize) try: comm.Neighbor_alltoallw(smsg, rmsg) except NotImplementedError: self.skipTest('mpi-neighbor_alltoallw') for value in rbuf.flat: self.assertEqual(value, v) smsg[0] = array(v+1, typecode, (ssize, n)).as_raw() try: comm.Ineighbor_alltoallw(smsg, rmsg).Wait() except NotImplementedError: self.skipTest('mpi-ineighbor_alltoallw') for value in rbuf.flat: self.assertEqual(value, v+1) comm.Free() def testNeighborAlltoallwBottom(self): size = self.COMM.Get_size() for comm in create_topo_comms(self.COMM): rsize, ssize = get_neighbors_count(comm) for array, typecode in arrayimpl.subTest(self): for n in range(1,4): for v in range(3): sbuf = array( v, typecode, (ssize, n)) rbuf = array(-1, typecode, (rsize, n)) saddr = MPI.Get_address(sbuf.as_raw()) raddr = MPI.Get_address(rbuf.as_raw()) sdt, rdt = sbuf.mpidtype, rbuf.mpidtype sdsp = list(range(0, ssize*n*sdt.extent, n*sdt.extent)) rdsp = list(range(0, rsize*n*rdt.extent, n*rdt.extent)) sdsp = [saddr + d for d in sdsp] rdsp = [raddr + d for d in rdsp] smsg = [MPI.BOTTOM, ([n]*ssize, sdsp), [sdt]*ssize] rmsg = (MPI.BOTTOM, ([n]*rsize, rdsp), [rdt]*rsize) try: comm.Neighbor_alltoallw(smsg, rmsg) except NotImplementedError: self.skipTest('mpi-neighbor_alltoallw') for value in rbuf.flat: self.assertEqual(value, v) comm.Free() class TestCCONghBufSelf(BaseTestCCONghBuf, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCONghBufWorld(BaseTestCCONghBuf, unittest.TestCase): COMM = MPI.COMM_WORLD class TestCCONghBufSelfDup(TestCCONghBufSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() class TestCCONghBufWorldDup(TestCCONghBufWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() name, version = MPI.get_vendor() if name == 'Open MPI': if version < (1,8,4): _create_topo_comms = create_topo_comms def create_topo_comms(comm): for c in _create_topo_comms(comm): if c.size * 2 < sum(c.degrees): c.Free(); continue yield c if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_cco_ngh_obj.py000066400000000000000000000067261460670727200176130ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest _basic = [None, True, False, -7, 0, 7, 2**31, -2**63+1, 2**63-1, -2.17, 0.0, 3.14, 1+2j, 2-3j, 'mpi4py', ] messages = _basic messages += [ list(_basic), tuple(_basic), dict([('k%d' % key, val) for key, val in enumerate(_basic)]) ] messages = messages + [messages] def create_topo_comms(comm): size = comm.Get_size() rank = comm.Get_rank() # Cartesian n = int(size**1/2.0) m = int(size**1/3.0) if m*m*m == size: dims = [m, m, m] elif n*n == size: dims = [n, n] else: dims = [size] periods = [True] * len(dims) yield comm.Create_cart(dims, periods=periods) # Graph index, edges = [0], [] for i in range(size): pos = index[-1] index.append(pos+2) edges.append((i-1)%size) edges.append((i+1)%size) yield comm.Create_graph(index, edges) # Dist Graph sources = [(rank-2)%size, (rank-1)%size] destinations = [(rank+1)%size, (rank+2)%size] yield comm.Create_dist_graph_adjacent(sources, destinations) def get_neighbors_count(comm): topo = comm.Get_topology() if topo == MPI.CART: ndim = comm.Get_dim() return 2*ndim, 2*ndim if topo == MPI.GRAPH: rank = comm.Get_rank() nneighbors = comm.Get_neighbors_count(rank) return nneighbors, nneighbors if topo == MPI.DIST_GRAPH: indeg, outdeg, w = comm.Get_dist_neighbors_count() return indeg, outdeg return 0, 0 def have_feature(): cartcomm = MPI.COMM_SELF.Create_cart([1], periods=[0]) try: cartcomm.neighbor_allgather(None) return True except NotImplementedError: return False finally: cartcomm.Free() @unittest.skipIf(not have_feature(), 'mpi-neighbor') class BaseTestCCONghObj(object): COMM = MPI.COMM_NULL @unittest.skipMPI('openmpi(<2.2.0)') def testNeighborAllgather(self): for comm in create_topo_comms(self.COMM): rsize, ssize = get_neighbors_count(comm) for smess in messages: rmess = comm.neighbor_allgather(smess) self.assertEqual(rmess, [smess] * rsize) comm.Free() def testNeighborAlltoall(self): for comm in create_topo_comms(self.COMM): rsize, ssize = get_neighbors_count(comm) for smess in messages: rmess = comm.neighbor_alltoall([smess] * ssize) self.assertEqual(rmess, [smess] * rsize) comm.Free() class TestCCONghObjSelf(BaseTestCCONghObj, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCONghObjWorld(BaseTestCCONghObj, unittest.TestCase): COMM = MPI.COMM_WORLD class TestCCONghObjSelfDup(TestCCONghObjSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() class TestCCONghObjWorldDup(TestCCONghObjWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() name, version = MPI.get_vendor() if name == 'Open MPI': if version < (1,8,4): _create_topo_comms = create_topo_comms def create_topo_comms(comm): for c in _create_topo_comms(comm): if c.size * 2 < sum(c.degrees): c.Free(); continue yield c if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_cco_obj.py000066400000000000000000000171301460670727200167460ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest from functools import reduce cumsum = lambda seq: reduce(lambda x, y: x+y, seq, 0) cumprod = lambda seq: reduce(lambda x, y: x*y, seq, 1) _basic = [None, True, False, -7, 0, 7, 2**31, -2**63+1, 2**63-1, -2.17, 0.0, 3.14, 1+2j, 2-3j, 'mpi4py', ] messages = _basic messages += [ list(_basic), tuple(_basic), dict([('k%d' % key, val) for key, val in enumerate(_basic)]) ] class BaseTestCCOObj(object): COMM = MPI.COMM_NULL def testBarrier(self): self.COMM.barrier() def testBcast(self): for smess in messages: for root in range(self.COMM.Get_size()): rmess = self.COMM.bcast(smess, root=root) self.assertEqual(smess, rmess) def testGather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages + [messages]: for root in range(size): rmess = self.COMM.gather(smess, root=root) if rank == root: self.assertEqual(rmess, [smess] * size) else: self.assertEqual(rmess, None) def testScatter(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages + [messages]: for root in range(size): if rank == root: rmess = self.COMM.scatter([smess] * size, root=root) else: rmess = self.COMM.scatter(None, root=root) self.assertEqual(rmess, smess) def testAllgather(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages + [messages]: rmess = self.COMM.allgather(smess) self.assertEqual(rmess, [smess] * size) def testAlltoall(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages + [messages]: rmess = self.COMM.alltoall([smess] * size) self.assertEqual(rmess, [smess] * size) def testReduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for root in range(size): for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN, MPI.MAXLOC, MPI.MINLOC, MPI.REPLACE, MPI.NO_OP): if op == MPI.OP_NULL: continue if op in (MPI.MAXLOC, MPI.MINLOC): sendobj = (rank, rank) else: sendobj = rank value = self.COMM.reduce(sendobj, op=op, root=root) if rank != root: self.assertTrue(value is None) else: if op == MPI.SUM: self.assertEqual(value, cumsum(range(size))) elif op == MPI.PROD: self.assertEqual(value, cumprod(range(size))) elif op == MPI.MAX: self.assertEqual(value, size-1) elif op == MPI.MIN: self.assertEqual(value, 0) elif op == MPI.MAXLOC: self.assertEqual(value[0], size-1) self.assertEqual(value[1], size-1) elif op == MPI.MINLOC: self.assertEqual(value[0], 0) self.assertEqual(value[1], 0) elif op == MPI.REPLACE: self.assertEqual(value, size-1) elif op == MPI.NO_OP: self.assertEqual(value, 0) def testAllreduce(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN, MPI.MAXLOC, MPI.MINLOC, MPI.REPLACE, MPI.NO_OP): if op == MPI.OP_NULL: continue if op in (MPI.MAXLOC, MPI.MINLOC): sendobj = (rank, rank) else: sendobj = rank value = self.COMM.allreduce(sendobj, op) if op == MPI.SUM: self.assertEqual(value, cumsum(range(size))) elif op == MPI.PROD: self.assertEqual(value, cumprod(range(size))) elif op == MPI.MAX: self.assertEqual(value, size-1) elif op == MPI.MIN: self.assertEqual(value, 0) elif op == MPI.MAXLOC: self.assertEqual(value[1], size-1) elif op == MPI.MINLOC: self.assertEqual(value[1], 0) elif op == MPI.REPLACE: self.assertEqual(value, size-1) elif op == MPI.NO_OP: self.assertEqual(value, 0) def testScan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() # -- sscan = self.COMM.scan(size, op=MPI.SUM) self.assertEqual(sscan, cumsum([size]*(rank+1))) # -- rscan = self.COMM.scan(rank, op=MPI.SUM) self.assertEqual(rscan, cumsum(range(rank+1))) # -- minloc = self.COMM.scan((rank, rank), op=MPI.MINLOC) maxloc = self.COMM.scan((rank, rank), op=MPI.MAXLOC) self.assertEqual(minloc, (0, 0)) self.assertEqual(maxloc, (rank, rank)) # -- if MPI.REPLACE != MPI.OP_NULL: rscan = self.COMM.scan(rank, op=MPI.REPLACE) self.assertEqual(rscan, rank) # -- if MPI.NO_OP != MPI.OP_NULL: rscan = self.COMM.scan(rank, op=MPI.NO_OP) self.assertEqual(rscan, 0) def testExscan(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() # -- sscan = self.COMM.exscan(size, op=MPI.SUM) if rank == 0: self.assertTrue(sscan is None) else: self.assertEqual(sscan, cumsum([size]*(rank))) # -- rscan = self.COMM.exscan(rank, op=MPI.SUM) if rank == 0: self.assertTrue(rscan is None) else: self.assertEqual(rscan, cumsum(range(rank))) # -- minloc = self.COMM.exscan((rank, rank), op=MPI.MINLOC) maxloc = self.COMM.exscan((rank, rank), op=MPI.MAXLOC) if rank == 0: self.assertEqual(minloc, None) self.assertEqual(maxloc, None) else: self.assertEqual(minloc, (0, 0)) self.assertEqual(maxloc, (rank-1, rank-1)) # -- if MPI.REPLACE != MPI.OP_NULL: rscan = self.COMM.exscan(rank, op=MPI.REPLACE) if rank == 0: self.assertTrue(rscan is None) else: self.assertEqual(rscan, rank-1) # -- if MPI.NO_OP != MPI.OP_NULL: rscan = self.COMM.exscan(rank, op=MPI.NO_OP) if rank == 0: self.assertTrue(rscan is None) else: self.assertEqual(rscan, 0) class TestCCOObjSelf(BaseTestCCOObj, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCOObjWorld(BaseTestCCOObj, unittest.TestCase): COMM = MPI.COMM_WORLD class TestCCOObjSelfDup(TestCCOObjSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() @unittest.skipMPI('openmpi(<1.4.0)', MPI.Query_thread() > MPI.THREAD_SINGLE) class TestCCOObjWorldDup(TestCCOObjWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_cco_obj_inter.py000066400000000000000000000170611460670727200201520ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest from functools import reduce cumsum = lambda seq: reduce(lambda x, y: x+y, seq, 0) cumprod = lambda seq: reduce(lambda x, y: x*y, seq, 1) _basic = [None, True, False, -7, 0, 7, 2**31, -2**63+1, 2**63-1, -2.17, 0.0, 3.14, 1+2j, 2-3j, 'mpi4py', ] messages = _basic messages += [ list(_basic), tuple(_basic), dict([('k%d' % key, val) for key, val in enumerate(_basic)]) ] @unittest.skipMPI('openmpi(<1.6.0)') @unittest.skipMPI('MPICH1') @unittest.skipIf(MPI.ROOT == MPI.PROC_NULL, 'mpi-root') @unittest.skipIf(MPI.COMM_WORLD.Get_size() < 2, 'mpi-world-size<2') class BaseTestCCOObjInter(object): BASECOMM = MPI.COMM_NULL INTRACOMM = MPI.COMM_NULL INTERCOMM = MPI.COMM_NULL def setUp(self): size = self.BASECOMM.Get_size() rank = self.BASECOMM.Get_rank() if rank < size // 2 : self.COLOR = 0 self.LOCAL_LEADER = 0 self.REMOTE_LEADER = size // 2 else: self.COLOR = 1 self.LOCAL_LEADER = 0 self.REMOTE_LEADER = 0 self.INTRACOMM = self.BASECOMM.Split(self.COLOR, key=0) Create_intercomm = MPI.Intracomm.Create_intercomm self.INTERCOMM = Create_intercomm(self.INTRACOMM, self.LOCAL_LEADER, self.BASECOMM, self.REMOTE_LEADER) def tearDown(self): self.INTRACOMM.Free() self.INTERCOMM.Free() @unittest.skipMPI('MPICH2(<1.0.8)') def testBarrier(self): self.INTERCOMM.Barrier() def testBcast(self): rank = self.INTERCOMM.Get_rank() size = self.INTERCOMM.Get_size() rsize = self.INTERCOMM.Get_remote_size() for smess in messages + [messages]: for color in [0, 1]: if self.COLOR == color: for root in range(size): if root == rank: rmess = self.INTERCOMM.bcast(smess, root=MPI.ROOT) else: rmess = self.INTERCOMM.bcast(None, root=MPI.PROC_NULL) self.assertEqual(rmess, None) else: for root in range(rsize): rmess = self.INTERCOMM.bcast(None, root=root) self.assertEqual(rmess, smess) def testGather(self): rank = self.INTERCOMM.Get_rank() size = self.INTERCOMM.Get_size() rsize = self.INTERCOMM.Get_remote_size() for smess in messages + [messages]: for color in [0, 1]: if self.COLOR == color: for root in range(size): if root == rank: rmess = self.INTERCOMM.gather(smess, root=MPI.ROOT) self.assertEqual(rmess, [smess] * rsize) else: rmess = self.INTERCOMM.gather(None, root=MPI.PROC_NULL) self.assertEqual(rmess, None) else: for root in range(rsize): rmess = self.INTERCOMM.gather(smess, root=root) self.assertEqual(rmess, None) @unittest.skipMPI('msmpi(<8.0.0)') def testScatter(self): rank = self.INTERCOMM.Get_rank() size = self.INTERCOMM.Get_size() rsize = self.INTERCOMM.Get_remote_size() for smess in messages + [messages]: for color in [0, 1]: if self.COLOR == color: for root in range(size): if root == rank: rmess = self.INTERCOMM.scatter([smess] * rsize, root=MPI.ROOT) else: rmess = self.INTERCOMM.scatter(None, root=MPI.PROC_NULL) self.assertEqual(rmess, None) else: for root in range(rsize): rmess = self.INTERCOMM.scatter(None, root=root) self.assertEqual(rmess, smess) @unittest.skipMPI('MPICH2(<1.0.8)') def testAllgather(self): rank = self.INTERCOMM.Get_rank() size = self.INTERCOMM.Get_size() rsize = self.INTERCOMM.Get_remote_size() for smess in messages + [messages]: rmess = self.INTERCOMM.allgather(smess) self.assertEqual(rmess, [smess] * rsize) def testAlltoall(self): rank = self.INTERCOMM.Get_rank() size = self.INTERCOMM.Get_size() rsize = self.INTERCOMM.Get_remote_size() for smess in messages + [messages]: rmess = self.INTERCOMM.alltoall([smess] * rsize) self.assertEqual(rmess, [smess] * rsize) def testReduce(self): rank = self.INTERCOMM.Get_rank() size = self.INTERCOMM.Get_size() rsize = self.INTERCOMM.Get_remote_size() for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): for color in [0, 1]: if self.COLOR == color: for root in range(size): if root == rank: value = self.INTERCOMM.reduce(None, op=op, root=MPI.ROOT) if op == MPI.SUM: self.assertEqual(value, cumsum(range(rsize))) elif op == MPI.PROD: self.assertEqual(value, cumprod(range(rsize))) elif op == MPI.MAX: self.assertEqual(value, rsize-1) elif op == MPI.MIN: self.assertEqual(value, 0) else: value = self.INTERCOMM.reduce(None, op=op, root=MPI.PROC_NULL) self.assertEqual(value, None) else: for root in range(rsize): value = self.INTERCOMM.reduce(rank, op=op, root=root) self.assertEqual(value, None) @unittest.skipMPI('MPICH2(<1.0.8)') def testAllreduce(self): rank = self.INTERCOMM.Get_rank() size = self.INTERCOMM.Get_size() rsize = self.INTERCOMM.Get_remote_size() for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD): value = self.INTERCOMM.allreduce(rank, op) if op == MPI.SUM: self.assertEqual(value, cumsum(range(rsize))) elif op == MPI.PROD: self.assertEqual(value, cumprod(range(rsize))) elif op == MPI.MAX: self.assertEqual(value, rsize-1) elif op == MPI.MIN: self.assertEqual(value, 0) class TestCCOObjInter(BaseTestCCOObjInter, unittest.TestCase): BASECOMM = MPI.COMM_WORLD class TestCCOObjInterDup(TestCCOObjInter): def setUp(self): self.BASECOMM = self.BASECOMM.Dup() super(TestCCOObjInterDup, self).setUp() def tearDown(self): self.BASECOMM.Free() super(TestCCOObjInterDup, self).tearDown() class TestCCOObjInterDupDup(TestCCOObjInterDup): BASECOMM = MPI.COMM_WORLD INTERCOMM_ORIG = MPI.COMM_NULL def setUp(self): super(TestCCOObjInterDupDup, self).setUp() self.INTERCOMM_ORIG = self.INTERCOMM self.INTERCOMM = self.INTERCOMM.Dup() def tearDown(self): super(TestCCOObjInterDupDup, self).tearDown() self.INTERCOMM_ORIG.Free() if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_cco_vec.py000066400000000000000000000433131460670727200167530ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import arrayimpl def maxvalue(a): try: typecode = a.typecode except AttributeError: typecode = a.dtype.char if typecode == ('f'): return 1e30 elif typecode == ('d'): return 1e300 else: return 2 ** (a.itemsize * 7) - 1 class BaseTestCCOVec(object): COMM = MPI.COMM_NULL def testGatherv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(size): sbuf = array(root, typecode, count) rbuf = array( -1, typecode, size*size) counts = [count] * size displs = list(range(0, size*size, size)) recvbuf = rbuf.as_mpi_v(counts, displs) if rank != root: recvbuf=None self.COMM.Barrier() self.COMM.Gatherv(sbuf.as_mpi(), recvbuf, root) self.COMM.Barrier() if recvbuf is not None: for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, root) for vb in b: self.assertEqual(vb, -1) def testGatherv2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(size): sbuf = array(root, typecode, size) rbuf = array( -1, typecode, size*size) sendbuf = sbuf.as_mpi_c(count) recvbuf = rbuf.as_mpi_v(count, size) if rank != root: recvbuf=None self.COMM.Barrier() self.COMM.Gatherv(sendbuf, recvbuf, root) self.COMM.Barrier() if recvbuf is not None: for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, root) for vb in b: self.assertEqual(vb, -1) def testGatherv3(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(size+1): # sbuf = array(root, typecode, count).as_raw() rbuf = array( -1, typecode, count*size).as_raw() sendbuf = sbuf recvbuf = [rbuf, count] if rank != root: recvbuf=None self.COMM.Barrier() self.COMM.Gatherv(sendbuf, recvbuf, root) self.COMM.Barrier() if recvbuf is not None: for v in rbuf: self.assertEqual(v, root) # sbuf = array(root, typecode, count).as_raw() if rank == root: rbuf = array( -1, typecode, count*size).as_raw() else: rbuf = None self.COMM.Gatherv(sbuf, rbuf, root) self.COMM.Barrier() if rank == root: for v in rbuf: self.assertEqual(v, root) def testScatterv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(size): sbuf = array(root, typecode, size*size) rbuf = array( -1, typecode, count) counts = [count] * size displs = list(range(0, size*size, size)) sendbuf = sbuf.as_mpi_v(counts, displs) if rank != root: sendbuf = None self.COMM.Scatterv(sendbuf, rbuf.as_mpi(), root) for vr in rbuf: self.assertEqual(vr, root) def testScatterv2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(size): sbuf = array(root, typecode, size*size) rbuf = array( -1, typecode, size) sendbuf = sbuf.as_mpi_v(count, size) recvbuf = rbuf.as_mpi_c(count) if rank != root: sendbuf = None self.COMM.Scatterv(sendbuf, recvbuf, root) a, b = rbuf[:count], rbuf[count:] for va in a: self.assertEqual(va, root) for vb in b: self.assertEqual(vb, -1) def testScatterv3(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(size+1): # sbuf = array(root, typecode, count*size).as_raw() rbuf = array( -1, typecode, count).as_raw() sendbuf = [sbuf, count] recvbuf = rbuf if rank != root: sendbuf = None self.COMM.Scatterv(sendbuf, recvbuf, root) for v in rbuf: self.assertEqual(v, root) # if rank == root: sbuf = array(root, typecode, count*size).as_raw() else: sbuf = None rbuf = array( -1, typecode, count).as_raw() self.COMM.Scatterv(sbuf, rbuf, root) for v in rbuf: self.assertEqual(v, root) def testAllgatherv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(size): sbuf = array(root, typecode, count) rbuf = array( -1, typecode, size*size) counts = [count] * size displs = list(range(0, size*size, size)) sendbuf = sbuf.as_mpi() recvbuf = rbuf.as_mpi_v(counts, displs) self.COMM.Allgatherv(sendbuf, recvbuf) for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, root) for vb in b: self.assertEqual(vb, -1) def testAllgatherv2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(size): sbuf = array(root, typecode, size) rbuf = array( -1, typecode, size*size) sendbuf = sbuf.as_mpi_c(count) recvbuf = rbuf.as_mpi_v(count, size) self.COMM.Allgatherv(sendbuf, recvbuf) for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, root) for vb in b: self.assertEqual(vb, -1) def testAllgatherv3(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(size+1): # sbuf = array(root, typecode, count).as_raw() rbuf = array( -1, typecode, count*size).as_raw() sendbuf = sbuf recvbuf = [rbuf, count] self.COMM.Allgatherv(sendbuf, recvbuf) for v in rbuf: self.assertEqual(v, root) # sbuf = array(root, typecode, count).as_raw() rbuf = array( -1, typecode, count*size).as_raw() self.COMM.Allgatherv(sbuf, rbuf) for v in rbuf: self.assertEqual(v, root) def testAlltoallv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(size): sbuf = array(root, typecode, size*size) rbuf = array( -1, typecode, size*size) counts = [count] * size displs = list(range(0, size*size, size)) sendbuf = sbuf.as_mpi_v(counts, displs) recvbuf = rbuf.as_mpi_v(counts, displs) self.COMM.Alltoallv(sendbuf, recvbuf) for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, root) for vb in b: self.assertEqual(vb, -1) def testAlltoallv2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(size): sbuf = array(root, typecode, size*size) rbuf = array( -1, typecode, size*size) sendbuf = sbuf.as_mpi_v(count, size) recvbuf = rbuf.as_mpi_v(count, size) self.COMM.Alltoallv(sendbuf, recvbuf) for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, root) for vb in b: self.assertEqual(vb, -1) def testAlltoallv3(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for root in range(size): for count in range(size+1): # sbuf = array(root, typecode, count*size).as_raw() rbuf = array( -1, typecode, count*size).as_raw() sendbuf = [sbuf, count] recvbuf = [rbuf, count] self.COMM.Alltoallv(sendbuf, recvbuf) for v in rbuf: self.assertEqual(v, root) # sbuf = array(root, typecode, count*size).as_raw() rbuf = array( -1, typecode, count*size).as_raw() self.COMM.Alltoallv(sbuf, rbuf) for v in rbuf: self.assertEqual(v, root) def testAlltoallw(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for n in range(1,size+1): sbuf = array( n, typecode, (size, n)) rbuf = array(-1, typecode, (size, n)) sdt, rdt = sbuf.mpidtype, rbuf.mpidtype sdsp = list(range(0, size*n*sdt.extent, n*sdt.extent)) rdsp = list(range(0, size*n*rdt.extent, n*rdt.extent)) smsg = (sbuf.as_raw(), ([n]*size, sdsp), [sdt]*size) rmsg = (rbuf.as_raw(), ([n]*size, rdsp), [rdt]*size) try: self.COMM.Alltoallw(smsg, rmsg) except NotImplementedError: self.skipTest('mpi-alltoallw') for value in rbuf.flat: self.assertEqual(value, n) def testAlltoallwBottom(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for n in range(1, size+1): sbuf = array( n, typecode, (size, n)) rbuf = array(-1, typecode, (size, n)) saddr = MPI.Get_address(sbuf.as_raw()) raddr = MPI.Get_address(rbuf.as_raw()) sdt, rdt = sbuf.mpidtype, rbuf.mpidtype stypes = [ MPI.Datatype.Create_struct([n], [saddr+d], [sdt]).Commit() for d in list(range(0, size*n*sdt.extent, n*sdt.extent)) ] rtypes = [ MPI.Datatype.Create_struct([n], [raddr+d], [sdt]).Commit() for d in list(range(0, size*n*rdt.extent, n*rdt.extent)) ] smsg = (MPI.BOTTOM, ([1]*size, [0]*size), stypes) rmsg = (MPI.BOTTOM, ([1]*size, [0]*size), rtypes) try: self.COMM.Alltoallw(smsg, rmsg) except NotImplementedError: self.skipTest('mpi-alltoallw') finally: for t in stypes: t.Free() for t in rtypes: t.Free() for value in rbuf.flat: self.assertEqual(value, n) @unittest.skipMPI('msmpi(<8.1.0)') @unittest.skipMPI('openmpi(<1.8.0)') @unittest.skipIf(MPI.BOTTOM == MPI.IN_PLACE, 'mpi-in-place') class BaseTestCCOVecInplace(object): COMM = MPI.COMM_NULL def testAlltoallv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for count in range(size): rbuf = array(-1, typecode, size*size) counts = [count] * size displs = list(range(0, size*size, size)) for i in range(size): for j in range(count): rbuf[i*size+j] = rank recvbuf = rbuf.as_mpi_v(counts, displs) self.COMM.Alltoallv(MPI.IN_PLACE, recvbuf) for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, i) for vb in b: self.assertEqual(vb, -1) def testAlltoallw(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for count in range(size): rbuf = array(-1, typecode, size*size) for i in range(size): for j in range(count): rbuf[i*size+j] = rank rdt = rbuf.mpidtype rdsp = list(range(0, size*size*rdt.extent, size*rdt.extent)) rmsg = (rbuf.as_raw(), ([count]*size, rdsp), [rdt]*size) try: self.COMM.Alltoallw(MPI.IN_PLACE, rmsg) except NotImplementedError: self.skipTest('mpi-alltoallw') for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, i) for vb in b: self.assertEqual(vb, -1) def testAlltoallw2(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): for count in range(size): rbuf = array(-1, typecode, size*size) for i in range(size): for j in range(count): rbuf[i*size+j] = rank rdt = rbuf.mpidtype rdsp = list(range(0, size*size*rdt.extent, size*rdt.extent)) rmsg = (rbuf.as_raw(), [count]*size, rdsp, [rdt]*size) try: self.COMM.Alltoallw(MPI.IN_PLACE, rmsg) except NotImplementedError: self.skipTest('mpi-alltoallw') for i in range(size): row = rbuf[i*size:(i+1)*size] a, b = row[:count], row[count:] for va in a: self.assertEqual(va, i) for vb in b: self.assertEqual(vb, -1) class TestCCOVecSelf(BaseTestCCOVec, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCOVecWorld(BaseTestCCOVec, unittest.TestCase): COMM = MPI.COMM_WORLD class TestCCOVecSelfDup(TestCCOVecSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() class TestCCOVecWorldDup(TestCCOVecWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() class TestCCOVecInplaceSelf(BaseTestCCOVecInplace, unittest.TestCase): COMM = MPI.COMM_SELF class TestCCOVecInplaceWorld(BaseTestCCOVecInplace, unittest.TestCase): COMM = MPI.COMM_WORLD if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_cffi.py000066400000000000000000000042571460670727200162650ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest try: import cffi except ImportError: cffi = None @unittest.skipIf(cffi is None, 'cffi') class TestCFFI(unittest.TestCase): mpitypes = [ MPI.Datatype, MPI.Request, MPI.Info, MPI.Errhandler, MPI.Group, MPI.Win, MPI.Op, MPI.File, MPI.Message, MPI.Comm, ] objects = [ MPI.DATATYPE_NULL, MPI.INT, MPI.DOUBLE, MPI.REQUEST_NULL, MPI.INFO_NULL, MPI.INFO_ENV, MPI.ERRHANDLER_NULL, MPI.ERRORS_RETURN, MPI.ERRORS_ARE_FATAL, MPI.GROUP_NULL, MPI.GROUP_EMPTY, MPI.WIN_NULL, MPI.OP_NULL, MPI.SUM, MPI.MIN, MPI.MAX, MPI.FILE_NULL, MPI.MESSAGE_NULL, MPI.MESSAGE_NO_PROC, MPI.COMM_NULL, MPI.COMM_SELF, MPI.COMM_WORLD, ] def testHandleAddress(self): ffi = cffi.FFI() typemap = {ffi.sizeof('int'): 'int', ffi.sizeof('void*'): 'void*'} typename = lambda t: t.__name__.rsplit('.', 1)[-1] for tp in self.mpitypes: handle_t = typemap[MPI._sizeof(tp)] mpi_t = 'MPI_' + typename(tp) ffi.cdef("typedef %s %s;" % (handle_t, mpi_t)) for obj in self.objects: if isinstance(obj, MPI.Comm): mpi_t = 'MPI_Comm' else: mpi_t = 'MPI_' + typename(type(obj)) oldobj = obj newobj = type(obj)() handle_old = ffi.cast(mpi_t+'*', MPI._addressof(oldobj)) handle_new = ffi.cast(mpi_t+'*', MPI._addressof(newobj)) handle_new[0] = handle_old[0] self.assertEqual(oldobj, newobj) def testHandleValue(self): ffi = cffi.FFI() typemap = {ffi.sizeof('uint32_t'): 'uint32_t', ffi.sizeof('uint64_t'): 'uint64_t',} for obj in self.objects: uintptr_t = typemap[MPI._sizeof(obj)] handle = ffi.cast(uintptr_t+'*', MPI._addressof(obj))[0] self.assertEqual(handle, MPI._handleof(obj)) if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_comm.py000066400000000000000000000147201460670727200163050ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest class TestCommNull(unittest.TestCase): def testContructor(self): comm = MPI.Comm() self.assertEqual(comm, MPI.COMM_NULL) self.assertFalse(comm is MPI.COMM_NULL) def construct(): MPI.Comm((1,2,3)) self.assertRaises(TypeError, construct) def testContructorIntra(self): comm_null = MPI.Intracomm() self.assertFalse(comm_null is MPI.COMM_NULL) self.assertEqual(comm_null, MPI.COMM_NULL) def testContructorInter(self): comm_null = MPI.Intercomm() self.assertFalse(comm_null is MPI.COMM_NULL) self.assertEqual(comm_null, MPI.COMM_NULL) class BaseTestComm(object): def testContructor(self): comm = MPI.Comm(self.COMM) self.assertEqual(comm, self.COMM) self.assertFalse(comm is self.COMM) def testPyProps(self): comm = self.COMM self.assertEqual(comm.Get_size(), comm.size) self.assertEqual(comm.Get_rank(), comm.rank) self.assertEqual(comm.Is_intra(), comm.is_intra) self.assertEqual(comm.Is_inter(), comm.is_inter) self.assertEqual(comm.Get_topology(), comm.topology) def testGroup(self): comm = self.COMM group = self.COMM.Get_group() self.assertEqual(comm.Get_size(), group.Get_size()) self.assertEqual(comm.Get_rank(), group.Get_rank()) group.Free() self.assertEqual(group, MPI.GROUP_NULL) def testCloneFree(self): comm = self.COMM.Clone() comm.Free() self.assertEqual(comm, MPI.COMM_NULL) def testCompare(self): results = (MPI.IDENT, MPI.CONGRUENT, MPI.SIMILAR, MPI.UNEQUAL) ccmp = MPI.Comm.Compare(self.COMM, MPI.COMM_WORLD) self.assertTrue(ccmp in results) ccmp = MPI.Comm.Compare(self.COMM, self.COMM) self.assertEqual(ccmp, MPI.IDENT) comm = self.COMM.Dup() ccmp = MPI.Comm.Compare(self.COMM, comm) comm.Free() self.assertEqual(ccmp, MPI.CONGRUENT) def testIsInter(self): is_inter = self.COMM.Is_inter() self.assertTrue(type(is_inter) is bool) def testGetSetName(self): try: name = self.COMM.Get_name() self.COMM.Set_name('comm') self.assertEqual(self.COMM.Get_name(), 'comm') self.COMM.Set_name(name) self.assertEqual(self.COMM.Get_name(), name) except NotImplementedError: self.skipTest('mpi-comm-name') def testGetParent(self): try: parent = MPI.Comm.Get_parent() except NotImplementedError: self.skipTest('mpi-comm-get_parent') def testDupWithInfo(self): info = None self.COMM.Dup(info).Free() info = MPI.INFO_NULL self.COMM.Dup(info).Free() self.COMM.Dup_with_info(info).Free() info = MPI.Info.Create() self.COMM.Dup(info).Free() self.COMM.Dup_with_info(info).Free() info.Free() @unittest.skipMPI('mpich(<=3.1.0)', MPI.Query_thread() > MPI.THREAD_SINGLE) def testIDup(self): try: comm, request = self.COMM.Idup() except NotImplementedError: self.skipTest('mpi-comm-idup') request.Wait() ccmp = MPI.Comm.Compare(self.COMM, comm) comm.Free() self.assertEqual(ccmp, MPI.CONGRUENT) def testGetSetInfo(self): #info = MPI.INFO_NULL #self.COMM.Set_info(info) info = MPI.Info.Create() self.COMM.Set_info(info) info.Free() info = self.COMM.Get_info() self.COMM.Set_info(info) info.Free() def testCreate(self): group = self.COMM.Get_group() comm = self.COMM.Create(group) ccmp = MPI.Comm.Compare(self.COMM, comm) self.assertEqual(ccmp, MPI.CONGRUENT) comm.Free() group.Free() @unittest.skipMPI('openmpi(<=1.8.1)') def testCreateGroup(self): group = self.COMM.Get_group() try: try: comm = self.COMM.Create_group(group) ccmp = MPI.Comm.Compare(self.COMM, comm) self.assertEqual(ccmp, MPI.CONGRUENT) comm.Free() finally: group.Free() except NotImplementedError: self.skipTest('mpi-comm-create_group') @unittest.skipMPI('openmpi(==2.0.0)') def testSplitType(self): try: MPI.COMM_SELF.Split_type(MPI.COMM_TYPE_SHARED).Free() except NotImplementedError: self.skipTest('mpi-comm-split_type') #comm = self.COMM.Split_type(MPI.UNDEFINED) #self.assertEqual(comm, MPI.COMM_NULL) comm = self.COMM.Split_type(MPI.COMM_TYPE_SHARED) self.assertNotEqual(comm, MPI.COMM_NULL) size = self.COMM.Get_size() rank = self.COMM.Get_rank() if size == 1: self.assertEqual(comm.size, 1) self.assertEqual(comm.rank, 0) comm.Free() for root in range(size): if rank == root: split_type = MPI.COMM_TYPE_SHARED else: split_type = MPI.UNDEFINED comm = self.COMM.Split_type(split_type) if rank == root: self.assertNotEqual(comm, MPI.COMM_NULL) self.assertEqual(comm.size, 1) self.assertEqual(comm.rank, 0) comm.Free() else: self.assertEqual(comm, MPI.COMM_NULL) class TestCommSelf(BaseTestComm, unittest.TestCase): def setUp(self): self.COMM = MPI.COMM_SELF def testSize(self): size = self.COMM.Get_size() self.assertEqual(size, 1) def testRank(self): rank = self.COMM.Get_rank() self.assertEqual(rank, 0) class TestCommWorld(BaseTestComm, unittest.TestCase): def setUp(self): self.COMM = MPI.COMM_WORLD def testSize(self): size = self.COMM.Get_size() self.assertTrue(size >= 1) def testRank(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() self.assertTrue(rank >= 0 and rank < size) class TestCommSelfDup(TestCommSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() @unittest.skipMPI('openmpi(<1.4.0)', MPI.Query_thread() > MPI.THREAD_SINGLE) class TestCommWorldDup(TestCommWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_comm_inter.py000066400000000000000000000056111460670727200175050ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest @unittest.skipIf(MPI.COMM_WORLD.Get_size() < 2, 'mpi-world-size<2') class BaseTestIntercomm(object): BASECOMM = MPI.COMM_NULL INTRACOMM = MPI.COMM_NULL INTERCOMM = MPI.COMM_NULL def setUp(self): size = self.BASECOMM.Get_size() rank = self.BASECOMM.Get_rank() if rank < size // 2 : self.COLOR = 0 self.LOCAL_LEADER = 0 self.REMOTE_LEADER = size // 2 else: self.COLOR = 1 self.LOCAL_LEADER = 0 self.REMOTE_LEADER = 0 self.INTRACOMM = self.BASECOMM.Split(self.COLOR, key=0) Create_intercomm = MPI.Intracomm.Create_intercomm self.INTERCOMM = Create_intercomm(self.INTRACOMM, self.LOCAL_LEADER, self.BASECOMM, self.REMOTE_LEADER) def tearDown(self): self.INTRACOMM.Free() self.INTERCOMM.Free() def testFortran(self): intercomm = self.INTERCOMM fint = intercomm.py2f() newcomm = MPI.Comm.f2py(fint) self.assertEqual(newcomm, intercomm) self.assertTrue(type(newcomm) is MPI.Intercomm) def testLocalGroupSizeRank(self): intercomm = self.INTERCOMM local_group = intercomm.Get_group() self.assertEqual(local_group.size, intercomm.Get_size()) self.assertEqual(local_group.size, intercomm.size) self.assertEqual(local_group.rank, intercomm.Get_rank()) self.assertEqual(local_group.rank, intercomm.rank) local_group.Free() def testRemoteGroupSize(self): intercomm = self.INTERCOMM remote_group = intercomm.Get_remote_group() self.assertEqual(remote_group.size, intercomm.Get_remote_size()) self.assertEqual(remote_group.size, intercomm.remote_size) remote_group.Free() def testMerge(self): basecomm = self.BASECOMM intercomm = self.INTERCOMM if basecomm.rank < basecomm.size // 2: high = False else: high = True intracomm = intercomm.Merge(high) self.assertEqual(intracomm.size, basecomm.size) self.assertEqual(intracomm.rank, basecomm.rank) intracomm.Free() class TestIntercomm(BaseTestIntercomm, unittest.TestCase): BASECOMM = MPI.COMM_WORLD class TestIntercommDup(TestIntercomm): def setUp(self): self.BASECOMM = self.BASECOMM.Dup() super(TestIntercommDup, self).setUp() def tearDown(self): self.BASECOMM.Free() super(TestIntercommDup, self).tearDown() class TestIntercommDupDup(TestIntercomm): def setUp(self): super(TestIntercommDupDup, self).setUp() INTERCOMM = self.INTERCOMM self.INTERCOMM = self.INTERCOMM.Dup() INTERCOMM.Free() if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_comm_topo.py000066400000000000000000000222111460670727200173400ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest class BaseTestTopo(object): COMM = MPI.COMM_NULL def checkFortran(self, oldcomm): fint = oldcomm.py2f() newcomm = MPI.Comm.f2py(fint) self.assertEqual(newcomm, oldcomm) self.assertEqual(type(newcomm), type(oldcomm)) def testCartcomm(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() for ndim in (1,2,3,4,5): dims = MPI.Compute_dims(size, [0]*ndim) periods = [True] * len(dims) topo = comm.Create_cart(dims, periods=periods) self.assertTrue(topo.is_topo) self.assertTrue(topo.topology, MPI.CART) self.checkFortran(topo) self.assertEqual(topo.dim, len(dims)) self.assertEqual(topo.ndim, len(dims)) coordinates = topo.coords self.assertEqual(coordinates, topo.Get_coords(topo.rank)) neighbors = [] for i in range(ndim): for d in (-1, +1): coord = list(coordinates) coord[i] = (coord[i]+d) % dims[i] neigh = topo.Get_cart_rank(coord) self.assertEqual(coord, topo.Get_coords(neigh)) source, dest = topo.Shift(i, d) self.assertEqual(neigh, dest) neighbors.append(neigh) self.assertEqual(topo.indegree, len(neighbors)) self.assertEqual(topo.outdegree, len(neighbors)) self.assertEqual(topo.inedges, neighbors) self.assertEqual(topo.outedges, neighbors) inedges, outedges = topo.inoutedges self.assertEqual(inedges, neighbors) self.assertEqual(outedges, neighbors) if ndim == 1: topo.Free() continue for i in range(ndim): rem_dims = [1]*ndim rem_dims[i] = 0 sub = topo.Sub(rem_dims) if sub != MPI.COMM_NULL: self.assertEqual(sub.dim, ndim-1) dims = topo.dims del dims[i] self.assertEqual(sub.dims, dims) sub.Free() topo.Free() @unittest.skipMPI('MPI(<2.0)') def testCartcommZeroDim(self): comm = self.COMM topo = comm.Create_cart([]) if topo == MPI.COMM_NULL: return self.assertEqual(topo.dim, 0) self.assertEqual(topo.dims, []) self.assertEqual(topo.periods, []) self.assertEqual(topo.coords, []) rank = topo.Get_cart_rank([]) self.assertEqual(rank, 0) inedges, outedges = topo.inoutedges self.assertEqual(inedges, []) self.assertEqual(outedges, []) topo.Free() def testGraphcomm(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() index, edges = [0], [] for i in range(size): pos = index[-1] index.append(pos+2) edges.append((i-1)%size) edges.append((i+1)%size) topo = comm.Create_graph(index[1:], edges) self.assertTrue(topo.is_topo) self.assertTrue(topo.topology, MPI.GRAPH) self.checkFortran(topo) topo.Free() topo = comm.Create_graph(index, edges) self.assertEqual(topo.dims, (len(index)-1, len(edges))) self.assertEqual(topo.nnodes, len(index)-1) self.assertEqual(topo.nedges, len(edges)) self.assertEqual(topo.index, index[1:]) self.assertEqual(topo.edges, edges) neighbors = edges[index[rank]:index[rank+1]] self.assertEqual(neighbors, topo.neighbors) for rank in range(size): neighs = topo.Get_neighbors(rank) self.assertEqual(neighs, [(rank-1)%size, (rank+1)%size]) self.assertEqual(topo.indegree, len(neighbors)) self.assertEqual(topo.outdegree, len(neighbors)) self.assertEqual(topo.inedges, neighbors) self.assertEqual(topo.outedges, neighbors) inedges, outedges = topo.inoutedges self.assertEqual(inedges, neighbors) self.assertEqual(outedges, neighbors) topo.Free() @unittest.skipMPI('msmpi') def testDistgraphcommAdjacent(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() try: topo = comm.Create_dist_graph_adjacent(None, None) topo.Free() except NotImplementedError: self.skipTest('mpi-comm-create_dist_graph_adjacent') # sources = [(rank-2)%size, (rank-1)%size] destinations = [(rank+1)%size, (rank+2)%size] topo = comm.Create_dist_graph_adjacent(sources, destinations) self.assertTrue(topo.is_topo) self.assertTrue(topo.topology, MPI.DIST_GRAPH) self.checkFortran(topo) self.assertEqual(topo.Get_dist_neighbors_count(), (2, 2, False)) self.assertEqual(topo.Get_dist_neighbors(), (sources, destinations, None)) self.assertEqual(topo.indegree, len(sources)) self.assertEqual(topo.outdegree, len(destinations)) self.assertEqual(topo.inedges, sources) self.assertEqual(topo.outedges, destinations) inedges, outedges = topo.inoutedges self.assertEqual(inedges, sources) self.assertEqual(outedges, destinations) topo.Free() # sourceweights = [1, 2] destweights = [3, 4] weights = (sourceweights, destweights) topo = comm.Create_dist_graph_adjacent(sources, destinations, sourceweights, destweights) self.assertEqual(topo.Get_dist_neighbors_count(), (2, 2, True)) self.assertEqual(topo.Get_dist_neighbors(), (sources, destinations, weights)) topo.Free() # topo = comm.Create_dist_graph_adjacent(sources, None, MPI.UNWEIGHTED, None) self.assertEqual(topo.Get_dist_neighbors_count(), (2, 0, False)) self.assertEqual(topo.Get_dist_neighbors(), (sources, [], None)) topo.Free() topo = comm.Create_dist_graph_adjacent(None, destinations, None, MPI.UNWEIGHTED) self.assertEqual(topo.Get_dist_neighbors_count(), (0, 2, False)) self.assertEqual(topo.Get_dist_neighbors(), ([], destinations, None)) topo.Free() if MPI.VERSION < 3: return topo = comm.Create_dist_graph_adjacent([], [], MPI.WEIGHTS_EMPTY, MPI.WEIGHTS_EMPTY) self.assertEqual(topo.Get_dist_neighbors_count(), (0, 0, True)) self.assertEqual(topo.Get_dist_neighbors(), ([], [], ([], []))) topo.Free() @unittest.skipMPI('msmpi') @unittest.skipMPI('PlatformMPI') def testDistgraphcomm(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() # try: topo = comm.Create_dist_graph([], [], [], MPI.UNWEIGHTED) topo.Free() except NotImplementedError: self.skipTest('mpi-comm-create_dist_graph') # sources = [rank] degrees = [3] destinations = [(rank-1)%size, rank, (rank+1)%size] topo = comm.Create_dist_graph(sources, degrees, destinations, MPI.UNWEIGHTED) self.assertTrue(topo.is_topo) self.assertTrue(topo.topology, MPI.DIST_GRAPH) self.checkFortran(topo) self.assertEqual(topo.Get_dist_neighbors_count(), (3, 3, False)) topo.Free() weights = list(range(1,4)) topo = comm.Create_dist_graph(sources, degrees, destinations, weights) self.assertEqual(topo.Get_dist_neighbors_count(), (3, 3, True)) topo.Free() def testCartMap(self): comm = self.COMM size = comm.Get_size() for ndim in (1,2,3,4,5): for periods in (None, True, False): dims = MPI.Compute_dims(size, [0]*ndim) topo = comm.Create_cart(dims, periods, reorder=True) rank = comm.Cart_map(dims, periods) self.assertEqual(topo.Get_rank(), rank) topo.Free() def testGraphMap(self): comm = self.COMM size = comm.Get_size() index, edges = [0], [] for i in range(size): pos = index[-1] index.append(pos+2) edges.append((i-1)%size) edges.append((i+1)%size) # Version 1 topo = comm.Create_graph(index, edges, reorder=True) rank = comm.Graph_map(index, edges) self.assertEqual(topo.Get_rank(), rank) topo.Free() # Version 2 topo = comm.Create_graph(index[1:], edges, reorder=True) rank = comm.Graph_map(index[1:], edges) self.assertEqual(topo.Get_rank(), rank) topo.Free() class TestTopoSelf(BaseTestTopo, unittest.TestCase): COMM = MPI.COMM_SELF class TestTopoWorld(BaseTestTopo, unittest.TestCase): COMM = MPI.COMM_WORLD class TestTopoSelfDup(TestTopoSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() class TestTopoWorldDup(TestTopoWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_ctypes.py000066400000000000000000000035111460670727200166550ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import sys pypy_lt_510 = (hasattr(sys, 'pypy_version_info') and sys.pypy_version_info < (5, 10)) try: if pypy_lt_510: ctypes = None else: import ctypes except ImportError: ctypes = None @unittest.skipIf(ctypes is None, 'ctypes') class TestCTYPES(unittest.TestCase): objects = [ MPI.DATATYPE_NULL, MPI.INT, MPI.DOUBLE, MPI.REQUEST_NULL, MPI.INFO_NULL, MPI.INFO_ENV, MPI.ERRHANDLER_NULL, MPI.ERRORS_RETURN, MPI.ERRORS_ARE_FATAL, MPI.GROUP_NULL, MPI.GROUP_EMPTY, MPI.WIN_NULL, MPI.OP_NULL, MPI.SUM, MPI.MIN, MPI.MAX, MPI.FILE_NULL, MPI.MESSAGE_NULL, MPI.MESSAGE_NO_PROC, MPI.COMM_NULL, MPI.COMM_SELF, MPI.COMM_WORLD, ] def testHandleAdress(self): typemap = {ctypes.sizeof(ctypes.c_int): ctypes.c_int, ctypes.sizeof(ctypes.c_void_p): ctypes.c_void_p} for obj in self.objects: handle_t = typemap[MPI._sizeof(obj)] oldobj = obj newobj = type(obj)() handle_old = handle_t.from_address(MPI._addressof(oldobj)) handle_new = handle_t.from_address(MPI._addressof(newobj)) handle_new.value = handle_old.value self.assertEqual(obj, newobj) def testHandleValue(self): typemap = {ctypes.sizeof(ctypes.c_uint32): ctypes.c_uint32, ctypes.sizeof(ctypes.c_uint64): ctypes.c_uint64} for obj in self.objects: uintptr_t = typemap[MPI._sizeof(obj)] handle = uintptr_t.from_address(MPI._addressof(obj)) self.assertEqual(handle.value, MPI._handleof(obj)) if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_datatype.py000066400000000000000000000364161460670727200171730ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import sys datatypes_c = [ MPI.CHAR, MPI.WCHAR, MPI.SIGNED_CHAR, MPI.SHORT, MPI.INT, MPI.LONG, MPI.UNSIGNED_CHAR, MPI.UNSIGNED_SHORT, MPI.UNSIGNED, MPI.UNSIGNED_LONG, MPI.LONG_LONG, MPI.UNSIGNED_LONG_LONG, MPI.FLOAT, MPI.DOUBLE, MPI.LONG_DOUBLE, ] datatypes_c99 = [ MPI.C_BOOL, MPI.INT8_T, MPI.INT16_T, MPI.INT32_T, MPI.INT64_T, MPI.UINT8_T, MPI.UINT16_T, MPI.UINT32_T, MPI.UINT64_T, MPI.C_COMPLEX, MPI.C_FLOAT_COMPLEX, MPI.C_DOUBLE_COMPLEX, MPI.C_LONG_DOUBLE_COMPLEX, ] datatypes_f = [ MPI.CHARACTER, MPI.LOGICAL, MPI.INTEGER, MPI.REAL, MPI.DOUBLE_PRECISION, MPI.COMPLEX, MPI.DOUBLE_COMPLEX, ] datatypes_f90 = [ MPI.LOGICAL1, MPI.LOGICAL2, MPI.LOGICAL4, MPI.LOGICAL8, MPI.INTEGER1, MPI.INTEGER2, MPI.INTEGER4, MPI.INTEGER8, MPI.INTEGER16, MPI.REAL2, MPI.REAL4, MPI.REAL8, MPI.REAL16, MPI.COMPLEX4, MPI.COMPLEX8, MPI.COMPLEX16, MPI.COMPLEX32, ] datatypes_mpi = [ MPI.PACKED, MPI.BYTE, MPI.AINT, MPI.OFFSET, ] datatypes = [] datatypes += datatypes_c datatypes += datatypes_c99 datatypes += datatypes_f datatypes += datatypes_f90 datatypes += datatypes_mpi datatypes = [t for t in datatypes if t != MPI.DATATYPE_NULL] combiner_map = {} class TestDatatype(unittest.TestCase): def testBoolEqNe(self): for dtype in datatypes: self.assertTrue (not not dtype) self.assertTrue (dtype == MPI.Datatype(dtype)) self.assertFalse(dtype != MPI.Datatype(dtype)) def testGetExtent(self): for dtype in datatypes: lb, ext = dtype.Get_extent() self.assertEqual(dtype.lb, lb) self.assertEqual(dtype.ub, lb+ext) self.assertEqual(dtype.extent, ext) def testGetSize(self): for dtype in datatypes: size = dtype.Get_size() self.assertTrue(dtype.size, size) def testGetTrueExtent(self): for dtype in datatypes: try: lb, ext = dtype.Get_true_extent() self.assertEqual(dtype.true_lb, lb) self.assertEqual(dtype.true_ub, lb+ext) self.assertEqual(dtype.true_extent, ext) except NotImplementedError: self.skipTest('mpi-type-get_true_extent') def testGetEnvelope(self): for dtype in datatypes: try: envelope = dtype.Get_envelope() except NotImplementedError: self.skipTest('mpi-type-get_envelope') if ('LAM/MPI' == MPI.get_vendor()[0] and "COMPLEX" in dtype.name): continue ni, na, nd, combiner = envelope self.assertEqual(combiner, MPI.COMBINER_NAMED) self.assertEqual(ni, 0) self.assertEqual(na, 0) self.assertEqual(nd, 0) self.assertEqual(dtype.envelope, envelope) self.assertEqual(dtype.combiner, combiner) self.assertTrue(dtype.is_named) self.assertTrue(dtype.is_predefined) otype = dtype.decode() self.assertTrue(dtype is otype) def check_datatype_contents(self, oldtype, factory, newtype): try: envelope = newtype.Get_envelope() contents = newtype.Get_contents() except NotImplementedError: self.skipTest('mpi-type-get_envelope') ni, na, nd, combiner = envelope i, a, d = contents self.assertEqual(ni, len(i)) self.assertEqual(na, len(a)) self.assertEqual(nd, len(d)) self.assertTrue(combiner != MPI.COMBINER_NAMED) self.assertEqual(newtype.envelope, envelope) self.assertEqual(newtype.contents, contents) self.assertEqual(newtype.combiner, combiner) self.assertFalse(newtype.is_named) if combiner in (MPI.COMBINER_F90_INTEGER, MPI.COMBINER_F90_REAL, MPI.COMBINER_F90_COMPLEX,): self.assertTrue(newtype.is_predefined) else: self.assertFalse(newtype.is_predefined) name = factory.__name__ NAME = name.replace('Create_', '').upper() symbol = getattr(MPI, 'COMBINER_' + NAME) if symbol == MPI.UNDEFINED: return if combiner_map is None: return symbol = combiner_map.get(symbol, symbol) if symbol is None: return self.assertEqual(symbol, combiner) decoded = newtype.decode() oldtype, constructor, kargs = decoded constructor = 'Create_' + constructor.lower() newtype2 = getattr(oldtype, constructor)(**kargs) decoded2 = newtype2.decode() self.assertEqual(decoded[1], decoded2[1]) self.assertEqual(decoded[2], decoded2[2]) if combiner not in (MPI.COMBINER_F90_INTEGER, MPI.COMBINER_F90_REAL, MPI.COMBINER_F90_COMPLEX,): self.assertFalse(newtype2.is_predefined) newtype2.Free() else: self.assertTrue(newtype2.is_predefined) def check_datatype(self, oldtype, factory, *args): try: if isinstance(oldtype, MPI.Datatype): newtype = factory(oldtype, *args) else: newtype = factory(*args) except NotImplementedError: self.skipTest('mpi-type-constructor') self.check_datatype_contents(oldtype, factory, newtype) newtype.Commit() self.check_datatype_contents(oldtype, factory, newtype) combiner = newtype.Get_envelope()[-1] if combiner not in (MPI.COMBINER_F90_INTEGER, MPI.COMBINER_F90_REAL, MPI.COMBINER_F90_COMPLEX,): newtype.Free() def testDup(self): for dtype in datatypes: factory = MPI.Datatype.Dup self.check_datatype(dtype, factory) def testCreateContiguous(self): for dtype in datatypes: for count in range(5): factory = MPI.Datatype.Create_contiguous args = (count, ) self.check_datatype(dtype, factory, *args) def testCreateVector(self): for dtype in datatypes: for count in range(5): for blocklength in range(5): for stride in range(5): factory = MPI.Datatype.Create_vector args = (count, blocklength, stride) self.check_datatype(dtype, factory, *args) def testCreateHvector(self): for dtype in datatypes: for count in range(5): for blocklength in range(5): for stride in range(5): factory = MPI.Datatype.Create_hvector args = (count, blocklength, stride) self.check_datatype(dtype, factory, *args) def testCreateIndexed(self): for dtype in datatypes: for block in range(5): blocklengths = list(range(block, block+5)) displacements = [0] for b in blocklengths[:-1]: stride = displacements[-1] + b * dtype.extent + 1 displacements.append(stride) factory = MPI.Datatype.Create_indexed args = (blocklengths, displacements) self.check_datatype(dtype, factory, *args) #args = (block, displacements) XXX #self.check_datatype(dtype, factory, *args) XXX def testCreateIndexedBlock(self): for dtype in datatypes: for block in range(5): blocklengths = list(range(block, block+5)) displacements = [0] for b in blocklengths[:-1]: stride = displacements[-1] + b * dtype.extent + 1 displacements.append(stride) factory = MPI.Datatype.Create_indexed_block args = (block, displacements) self.check_datatype(dtype, factory, *args) def testCreateHindexed(self): for dtype in datatypes: for block in range(5): blocklengths = list(range(block, block+5)) displacements = [0] for b in blocklengths[:-1]: stride = displacements[-1] + b * dtype.extent + 1 displacements.append(stride) factory = MPI.Datatype.Create_hindexed args = (blocklengths, displacements) self.check_datatype(dtype, factory, *args) #args = (block, displacements) XXX #self.check_datatype(dtype, factory, *args) XXX @unittest.skipMPI('openmpi(<=1.8.1)', MPI.VERSION == 3) def testCreateHindexedBlock(self): for dtype in datatypes: for block in range(5): displacements = [0] for i in range(5): stride = displacements[-1] + block * dtype.extent + 1 displacements.append(stride) factory = MPI.Datatype.Create_hindexed_block args = (block, displacements) self.check_datatype(dtype, factory, *args) def testCreateStruct(self): for dtype1 in datatypes: for dtype2 in datatypes: dtypes = (dtype1, dtype2) blocklengths = (2, 3) displacements = [0] for dtype in dtypes[:-1]: stride = displacements[-1] + dtype.extent displacements.append(stride) factory = MPI.Datatype.Create_struct args = (blocklengths, displacements, dtypes) self.check_datatype(dtypes, factory, *args) def testCreateSubarray(self): for dtype in datatypes: for ndim in range(1, 5): for size in range(1, 5): for subsize in range(1, size): for start in range(size-subsize): for order in [MPI.ORDER_C, MPI.ORDER_FORTRAN, MPI.ORDER_F, ]: sizes = [size] * ndim subsizes = [subsize] * ndim starts = [start] * ndim factory = MPI.Datatype.Create_subarray args = sizes, subsizes, starts, order self.check_datatype(dtype, factory, *args) def testCreateDarray(self): for dtype in datatypes: for ndim in range(1, 3+1): for size in (4, 8, 9, 27): for rank in (0, size-1): for dist in [MPI.DISTRIBUTE_BLOCK, MPI.DISTRIBUTE_CYCLIC]: for order in [MPI.ORDER_C, MPI.ORDER_F]: gsizes = [size]*ndim distribs = [dist]*ndim dargs = [MPI.DISTRIBUTE_DFLT_DARG]*ndim psizes = MPI.Compute_dims(size, [0]*ndim) factory = MPI.Datatype.Create_darray args = size, rank, gsizes, distribs, dargs, psizes, order self.check_datatype(dtype, factory, *args) def testCreateF90Integer(self): for r in (1, 2, 4): factory = MPI.Datatype.Create_f90_integer args = (r,) self.check_datatype(None, factory, *args) @unittest.skipMPI('openmpi(<3.0.0)') @unittest.skipMPI('msmpi') @unittest.skipMPI('SpectrumMPI') def testCreateF90RealSingle(self): (p, r) = (6, 30) factory = MPI.Datatype.Create_f90_real args = (p, r) self.check_datatype(None, factory, *args) @unittest.skipMPI('openmpi(<3.0.0)') @unittest.skipMPI('msmpi') @unittest.skipMPI('SpectrumMPI') def testCreateF90RealDouble(self): (p, r) = (15, 300) factory = MPI.Datatype.Create_f90_real args = (p, r) self.check_datatype(None, factory, *args) @unittest.skipMPI('openmpi(<3.0.0)') @unittest.skipMPI('msmpi') @unittest.skipMPI('SpectrumMPI') def testCreateF90ComplexSingle(self): (p, r) = (6, 30) factory = MPI.Datatype.Create_f90_complex args = (p, r) self.check_datatype(None, factory, *args) @unittest.skipMPI('openmpi(<3.0.0)') @unittest.skipMPI('msmpi') @unittest.skipMPI('SpectrumMPI') def testCreateF90ComplexDouble(self): (p, r) = (15, 300) factory = MPI.Datatype.Create_f90_complex args = (p, r) self.check_datatype(None, factory, *args) match_size_integer = [1, 2, 4, 8] match_size_real = [4, 8] match_size_complex = [8, 16] @unittest.skipMPI('MPI(<2.0)') @unittest.skipMPI('openmpi', (MPI.CHARACTER == MPI.DATATYPE_NULL or MPI.CHARACTER.Get_size() == 0)) def testMatchSize(self): typeclass = MPI.TYPECLASS_INTEGER for size in self.match_size_integer: datatype = MPI.Datatype.Match_size(typeclass, size) self.assertEqual(size, datatype.size) typeclass = MPI.TYPECLASS_REAL for size in self.match_size_real: datatype = MPI.Datatype.Match_size(typeclass, size) self.assertEqual(size, datatype.size) typeclass = MPI.TYPECLASS_COMPLEX for size in self.match_size_complex: datatype = MPI.Datatype.Match_size(typeclass, size) self.assertEqual(size, datatype.size) def testCreateResized(self): for dtype in datatypes: for lb in range(-10, 10): for extent in range(1, 10): factory = MPI.Datatype.Create_resized args = lb, extent self.check_datatype(dtype, factory, *args) def testGetSetName(self): for dtype in datatypes: try: name = dtype.Get_name() self.assertTrue(name) dtype.Set_name(name) self.assertEqual(name, dtype.Get_name()) except NotImplementedError: self.skipTest('mpi-type-name') def testCommit(self): for dtype in datatypes: dtype.Commit() name, version = MPI.get_vendor() if name == 'LAM/MPI': combiner_map[MPI.COMBINER_INDEXED_BLOCK] = MPI.COMBINER_INDEXED elif name == 'MPICH1': combiner_map[MPI.COMBINER_VECTOR] = None combiner_map[MPI.COMBINER_HVECTOR] = None combiner_map[MPI.COMBINER_INDEXED] = None combiner_map[MPI.COMBINER_HINDEXED_BLOCK] = None for t in datatypes_f: datatypes.remove(t) elif MPI.Get_version() < (2,0): combiner_map = None if name == 'Open MPI': for t in datatypes_f + datatypes_f90: if t != MPI.DATATYPE_NULL: if t.Get_size() == 0: if t in datatypes: datatypes.remove(t) if (1,6,0) < version < (1,7,0): TestDatatype.match_size_complex[:] = [] if version < (1,5,2): for t in datatypes_f90[-4:]: if t != MPI.DATATYPE_NULL: datatypes.remove(t) if name == 'Platform MPI': combiner_map[MPI.COMBINER_INDEXED_BLOCK] = MPI.COMBINER_INDEXED combiner_map[MPI.COMBINER_DARRAY] = MPI.COMBINER_STRUCT combiner_map[MPI.COMBINER_SUBARRAY] = MPI.COMBINER_STRUCT TestDatatype.match_size_complex[:] = [] if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_dl.py000066400000000000000000000044551460670727200157550ustar00rootroot00000000000000import mpiunittest as unittest import sys, os try: from mpi4py import dl except ImportError: dl = None pypy_lt_510 = (hasattr(sys, 'pypy_version_info') and sys.pypy_version_info < (5, 10)) try: if pypy_lt_510: ctypes = None else: import ctypes except ImportError: ctypes = None @unittest.skipIf(dl is None, 'mpi4py-dl') class TestDL(unittest.TestCase): @unittest.skipIf(ctypes is None, 'ctypes') def testDL1(self): from ctypes.util import find_library libm = find_library('m') handle = dl.dlopen(libm, dl.RTLD_LOCAL|dl.RTLD_LAZY) self.assertTrue(handle != 0) self.assertTrue(dl.dlerror() is None) symbol = dl.dlsym(handle, 'sqrt') self.assertTrue(symbol != 0) self.assertTrue(dl.dlerror() is None) symbol = dl.dlsym(handle, 'xxxxx') self.assertTrue(symbol == 0) self.assertTrue(dl.dlerror() is not None) ierr = dl.dlclose(handle) self.assertTrue(ierr == 0) self.assertTrue(dl.dlerror() is None) @unittest.skipIf(pypy_lt_510 and sys.platform == 'darwin', 'pypy(<5.10)|darwin') def testDL2(self): handle = dl.dlopen(None, dl.RTLD_GLOBAL|dl.RTLD_NOW) self.assertTrue(handle != 0) self.assertTrue(dl.dlerror() is None) symbol = dl.dlsym(handle, 'malloc') self.assertTrue(symbol != 0) self.assertTrue(dl.dlerror() is None) symbol = dl.dlsym(handle, '!@#$%^&*()') self.assertTrue(symbol == 0) self.assertTrue(dl.dlerror() is not None) ierr = dl.dlclose(handle) self.assertTrue(ierr == 0) self.assertTrue(dl.dlerror() is None) def testDL3(self): symbol = dl.dlsym(None, 'malloc') self.assertTrue(symbol != 0) self.assertTrue(dl.dlerror() is None) symbol = dl.dlsym(None, '!@#$%^&*()') self.assertTrue(symbol == 0) self.assertTrue(dl.dlerror() is not None) ierr = dl.dlclose(None) self.assertTrue(ierr == 0) self.assertTrue(dl.dlerror() is None) def testDL4(self): handle = dl.dlopen('xxxxx', dl.RTLD_LOCAL|dl.RTLD_LAZY) self.assertTrue(handle == 0) self.assertTrue(dl.dlerror() is not None) if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_doc.py000066400000000000000000000034041460670727200161140ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import sys, types ModuleType = type(MPI) ClassType = type(MPI.Comm) FunctionType = type(MPI.Init) MethodDescrType = type(MPI.Comm.Get_rank) GetSetDescrType = type(MPI.Comm.rank) def getdocstr(mc, docstrings, namespace=None): name = getattr(mc, '__name__', None) if name is None: return if name in ('__builtin__', 'builtins'): return if name.startswith('_'): return if namespace: name = '%s.%s' % (namespace, name) if type(mc) in (ModuleType, ClassType): doc = getattr(mc, '__doc__', None) if doc == "": return docstrings[name] = doc for k, v in vars(mc).items(): if isinstance(v, staticmethod): v = v.__get__(object) getdocstr(v, docstrings, name) elif type(mc) in (FunctionType, MethodDescrType, GetSetDescrType): doc = getattr(mc, '__doc__', None) if doc == "": return docstrings[name] = doc class TestDoc(unittest.TestCase): @unittest.skipIf(hasattr(sys, 'pypy_version_info'), 'pypy') def testDoc(self): missing = False docs = { } getdocstr(MPI, docs) for k in docs: if not k.startswith('_'): doc = docs[k] if doc is None: print ("'%s': missing docstring" % k) missing = True else: doc = doc.strip() if not doc: print ("'%s': empty docstring" % k) missing = True if 'mpi4py.MPI' in doc: print ("'%s': bad format docstring" % k) self.assertFalse(missing) if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_dynproc.py000066400000000000000000000177351460670727200170410ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest try: import socket except ImportError: socket = None def ch4_ucx(): return 'ch4:ucx' in MPI.Get_library_version() def ch4_ofi(): return 'ch4:ofi' in MPI.Get_library_version() def appnum(): if MPI.APPNUM == MPI.KEYVAL_INVALID: return None return MPI.COMM_WORLD.Get_attr(MPI.APPNUM) def badport(): if MPI.get_vendor()[0] != 'MPICH': return False try: port = MPI.Open_port() MPI.Close_port(port) except: port = "" return port == "" @unittest.skipMPI('mpich', badport()) @unittest.skipMPI('openmpi(<2.0.0)') @unittest.skipMPI('MVAPICH2') @unittest.skipMPI('msmpi(<8.1.0)') class TestDPM(unittest.TestCase): message = [ None, True, False, -7, 0, 7, -2**63+1, 2**63-1, -2.17, 0.0, 3.14, 1+2j, 2-3j, 'mpi4py', (1, 2, 3), [1, 2, 3], {1:2}, ] @unittest.skipMPI('mpich', appnum() is None) @unittest.skipMPI('MPICH2', appnum() is None) @unittest.skipMPI('MPICH1', appnum() is None) @unittest.skipMPI('msmpi(<8.1.0)', appnum() is None) @unittest.skipMPI('PlatformMPI') def testNamePublishing(self): rank = MPI.COMM_WORLD.Get_rank() service = "mpi4py-%d" % rank port = MPI.Open_port() MPI.Publish_name(service, port) found = MPI.Lookup_name(service) self.assertEqual(port, found) MPI.Unpublish_name(service, port) MPI.Close_port(port) @unittest.skipIf(MPI.COMM_WORLD.Get_size() < 2, 'mpi-world-size<2') @unittest.skipMPI('mpich(==3.4.1)', ch4_ofi()) def testAcceptConnect(self): comm_self = MPI.COMM_SELF comm_world = MPI.COMM_WORLD wsize = comm_world.Get_size() wrank = comm_world.Get_rank() group_world = comm_world.Get_group() group = group_world.Excl([0]) group_world.Free() comm = comm_world.Create(group) group.Free() if wrank == 0: self.assertEqual(comm, MPI.COMM_NULL) else: self.assertNotEqual(comm, MPI.COMM_NULL) self.assertEqual(comm.size, comm_world.size-1) self.assertEqual(comm.rank, comm_world.rank-1) if wrank == 0: port = MPI.Open_port() comm_world.send(port, dest=1) intercomm = comm_self.Accept(port) self.assertEqual(intercomm.remote_size, comm_world.size-1) self.assertEqual(intercomm.size, 1) self.assertEqual(intercomm.rank, 0) MPI.Close_port(port) else: if wrank == 1: port = comm_world.recv(source=0) else: port = None intercomm = comm.Connect(port, root=0) self.assertEqual(intercomm.remote_size, 1) self.assertEqual(intercomm.size, comm_world.size-1) self.assertEqual(intercomm.rank, comm.rank) comm.Free() if wrank == 0: message = TestDPM.message root = MPI.ROOT else: message = None root = 0 message = intercomm.bcast(message, root) if wrank == 0: self.assertEqual(message, None) else: self.assertEqual(message, TestDPM.message) intercomm.Free() @unittest.skipIf(MPI.COMM_WORLD.Get_size() < 2, 'mpi-world-size<2') def testConnectAccept(self): comm_self = MPI.COMM_SELF comm_world = MPI.COMM_WORLD wsize = comm_world.Get_size() wrank = comm_world.Get_rank() group_world = comm_world.Get_group() group = group_world.Excl([0]) group_world.Free() comm = comm_world.Create(group) group.Free() if wrank == 0: self.assertEqual(comm, MPI.COMM_NULL) else: self.assertNotEqual(comm, MPI.COMM_NULL) self.assertEqual(comm.size, comm_world.size-1) self.assertEqual(comm.rank, comm_world.rank-1) if wrank == 0: port = comm_world.recv(source=1) intercomm = comm_self.Connect(port) self.assertEqual(intercomm.remote_size, comm_world.size-1) self.assertEqual(intercomm.size, 1) self.assertEqual(intercomm.rank, 0) else: if wrank == 1: port = MPI.Open_port() comm_world.send(port, dest=0) else: port = None intercomm = comm.Accept(port, root=0) if wrank == 1: MPI.Close_port(port) self.assertEqual(intercomm.remote_size, 1) self.assertEqual(intercomm.size, comm_world.size-1) self.assertEqual(intercomm.rank, comm.rank) comm.Free() if wrank == 0: message = TestDPM.message root = MPI.ROOT else: message = None root = 0 message = intercomm.bcast(message, root) if wrank == 0: self.assertEqual(message, None) else: self.assertEqual(message, TestDPM.message) intercomm.Free() @unittest.skipIf(MPI.COMM_WORLD.Get_size() < 2, 'mpi-world-size<2') @unittest.skipIf(socket is None, 'socket') def testJoin(self): size = MPI.COMM_WORLD.Get_size() rank = MPI.COMM_WORLD.Get_rank() server = client = address = None host = socket.gethostname() addresses = socket.getaddrinfo(host, None, 0, socket.SOCK_STREAM) address_families = [ a[0] for a in addresses ] # if both INET and INET6 are available, don't assume the order # is the same on both server and client. Select INET if available. if socket.AF_INET in address_families: socket_family = socket.AF_INET elif socket.AF_INET6 in address_families: socket_family = socket.AF_INET6 elif address_families: # allow for AF_UNIX (or other families) socket_family = address_families[0] else: self.skipTest("socket") # create server/client sockets if rank == 0: # server server = socket.socket(socket_family, socket.SOCK_STREAM) server.bind((host, 0)) server.listen(0) if rank == 1: # client client = socket.socket(socket_family, socket.SOCK_STREAM) # communicate address if rank == 0: address = server.getsockname() MPI.COMM_WORLD.ssend(address, 1) if rank == 1: address = MPI.COMM_WORLD.recv(None, 0) MPI.COMM_WORLD.Barrier() # stablish client/server connection connected = False if rank == 0: # server client = server.accept()[0] server.close() if rank == 1: # client try: client.connect(address) connected = True except socket.error: raise connected = MPI.COMM_WORLD.bcast(connected, root=1) # test Comm.Join() MPI.COMM_WORLD.Barrier() if client: fd = client.fileno() intercomm = MPI.Comm.Join(fd) client.close() if intercomm != MPI.COMM_NULL: self.assertEqual(intercomm.remote_size, 1) self.assertEqual(intercomm.size, 1) self.assertEqual(intercomm.rank, 0) if rank == 0: message = TestDPM.message root = MPI.ROOT else: message = None root = 0 message = intercomm.bcast(message, root) if rank == 0: self.assertEqual(message, None) else: self.assertEqual(message, TestDPM.message) intercomm.Free() MPI.COMM_WORLD.Barrier() MVAPICH2 = MPI.get_vendor()[0] == 'MVAPICH2' try: if MVAPICH2: raise NotImplementedError except NotImplementedError: unittest.disable(TestDPM, 'mpi-dpm') if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_environ.py000066400000000000000000000066521460670727200170370ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest def appnum(): if MPI.APPNUM == MPI.KEYVAL_INVALID: return None return MPI.COMM_WORLD.Get_attr(MPI.APPNUM) class TestEnviron(unittest.TestCase): def testIsInitialized(self): flag = MPI.Is_initialized() self.assertTrue(type(flag) is bool) self.assertTrue(flag) def testIsFinalized(self): flag = MPI.Is_finalized() self.assertTrue(type(flag) is bool) self.assertFalse(flag) def testGetVersion(self): version = MPI.Get_version() self.assertEqual(len(version), 2) major, minor = version self.assertTrue(type(major) is int) self.assertTrue(major >= 1) self.assertTrue(type(minor) is int) self.assertTrue(minor >= 0) def testGetLibraryVersion(self): version = MPI.Get_library_version() self.assertTrue(isinstance(version, str)) self.assertTrue(len(version) > 0) def testGetProcessorName(self): procname = MPI.Get_processor_name() self.assertTrue(isinstance(procname, str)) def testWTime(self): time1 = MPI.Wtime() self.assertTrue(type(time1) is float) time2 = MPI.Wtime() self.assertTrue(type(time2) is float) self.assertTrue(time2 >= time1) def testWTick(self): tick = MPI.Wtick() self.assertTrue(type(tick) is float) self.assertTrue(tick > 0.0) def testPControl(self): for level in (2, 1, 0): MPI.Pcontrol(level) MPI.Pcontrol(1) class TestWorldAttrs(unittest.TestCase): def testWTimeIsGlobal(self): wtg = MPI.COMM_WORLD.Get_attr(MPI.WTIME_IS_GLOBAL) if wtg is not None: self.assertTrue(wtg in (True, False)) def testWTimeIsGlobal(self): wtg = MPI.COMM_WORLD.Get_attr(MPI.WTIME_IS_GLOBAL) if wtg is not None: self.assertTrue(wtg in (True, False)) def testHostPorcessor(self): size = MPI.COMM_WORLD.Get_size() vals = list(range(size)) + [MPI.PROC_NULL] hostproc = MPI.COMM_WORLD.Get_attr(MPI.HOST) if hostproc is not None: self.assertTrue(hostproc in vals) def testIOProcessor(self): size = MPI.COMM_WORLD.Get_size() vals = list(range(size)) + [MPI.UNDEFINED, MPI.ANY_SOURCE, MPI.PROC_NULL] ioproc = MPI.COMM_WORLD.Get_attr(MPI.IO) if ioproc is not None: self.assertTrue(ioproc in vals) @unittest.skipIf(MPI.APPNUM == MPI.KEYVAL_INVALID, 'mpi-appnum') def testAppNum(self): appnum = MPI.COMM_WORLD.Get_attr(MPI.APPNUM) if appnum is not None: self.assertTrue(appnum == MPI.UNDEFINED or appnum >= 0) @unittest.skipMPI('MPICH(>1.2.0)', appnum() is None) @unittest.skipMPI('MVAPICH2', appnum() is None) @unittest.skipMPI('MPICH2', appnum() is None) @unittest.skipIf(MPI.UNIVERSE_SIZE == MPI.KEYVAL_INVALID, 'mpi-universe-size') def testUniverseSize(self): univsz = MPI.COMM_WORLD.Get_attr(MPI.UNIVERSE_SIZE) if univsz is not None: self.assertTrue(univsz == MPI.UNDEFINED or univsz >= 0) @unittest.skipIf(MPI.LASTUSEDCODE == MPI.KEYVAL_INVALID, 'mpi-lastusedcode') def testLastUsedCode(self): lastuc = MPI.COMM_WORLD.Get_attr(MPI.LASTUSEDCODE) self.assertTrue(lastuc >= 0) if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_errhandler.py000066400000000000000000000055321460670727200175010ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest class TestErrhandler(unittest.TestCase): def testPredefined(self): self.assertFalse(MPI.ERRHANDLER_NULL) self.assertTrue(MPI.ERRORS_ARE_FATAL) self.assertTrue(MPI.ERRORS_RETURN) def testCommGetSetErrhandler(self): for COMM in [MPI.COMM_SELF, MPI.COMM_WORLD]: for ERRHANDLER in [MPI.ERRORS_ARE_FATAL, MPI.ERRORS_RETURN, MPI.ERRORS_ARE_FATAL, MPI.ERRORS_RETURN, ]: errhdl_1 = COMM.Get_errhandler() self.assertNotEqual(errhdl_1, MPI.ERRHANDLER_NULL) COMM.Set_errhandler(ERRHANDLER) errhdl_2 = COMM.Get_errhandler() self.assertEqual(errhdl_2, ERRHANDLER) errhdl_2.Free() self.assertEqual(errhdl_2, MPI.ERRHANDLER_NULL) COMM.Set_errhandler(errhdl_1) errhdl_1.Free() self.assertEqual(errhdl_1, MPI.ERRHANDLER_NULL) def testGetErrhandler(self): errhdls = [] for i in range(100): e = MPI.COMM_WORLD.Get_errhandler() errhdls.append(e) for e in errhdls: e.Free() for e in errhdls: self.assertEqual(e, MPI.ERRHANDLER_NULL) @unittest.skipMPI('MPI(<2.0)') def testCommCallErrhandler(self): errhdl = MPI.COMM_SELF.Get_errhandler() comm = MPI.COMM_SELF.Dup() comm.Set_errhandler(MPI.ERRORS_RETURN) comm.Call_errhandler(MPI.ERR_OTHER) comm.Free() @unittest.skipMPI('MPI(<2.0)') @unittest.skipMPI('SpectrumMPI') def testWinCallErrhandler(self): try: win = MPI.Win.Create(MPI.BOTTOM, 1, MPI.INFO_NULL, MPI.COMM_SELF) except NotImplementedError: self.skipTest('mpi-win') win.Set_errhandler(MPI.ERRORS_RETURN) win.Call_errhandler(MPI.ERR_OTHER) win.Free() @unittest.skipMPI('MPI(<2.0)') @unittest.skipMPI('msmpi') def testFileCallErrhandler(self): import os, tempfile rank = MPI.COMM_WORLD.Get_rank() fd, filename = tempfile.mkstemp(prefix='mpi4py-', suffix="-%d"%rank) os.close(fd) amode = MPI.MODE_WRONLY | MPI.MODE_CREATE | MPI.MODE_DELETE_ON_CLOSE try: file = MPI.File.Open(MPI.COMM_SELF, filename, amode, MPI.INFO_NULL) except NotImplementedError: self.skipTest('mpi-file') file.Set_errhandler(MPI.ERRORS_RETURN) #file.Call_errhandler(MPI.ERR_OTHER) file.Call_errhandler(MPI.SUCCESS) file.Close() try: MPI.Win.Create(MPI.BOTTOM, 1, MPI.INFO_NULL, MPI.COMM_SELF).Free() except (NotImplementedError, MPI.Exception): TestErrhandler.testWinCallErrhandler = \ unittest.disable(TestErrhandler.testWinCallErrhandler, 'mpi-win') if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_errorcode.py000066400000000000000000000075651460670727200173470ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest class TestErrorCode(unittest.TestCase): errorclasses = [item[1] for item in vars(MPI).items() if item[0].startswith('ERR_')] errorclasses.insert(0, MPI.SUCCESS) errorclasses.remove(MPI.ERR_LASTCODE) def testGetErrorClass(self): self.assertEqual(self.errorclasses[0], 0) for ierr in self.errorclasses: errcls = MPI.Get_error_class(ierr) self.assertTrue(errcls >= MPI.SUCCESS) self.assertTrue(errcls <= MPI.ERR_LASTCODE) self.assertEqual(errcls, ierr) def testGetErrorStrings(self): for ierr in self.errorclasses: errstr = MPI.Get_error_string(ierr) def testException(self): from sys import version_info as py_version success = MPI.Exception(MPI.SUCCESS) lasterr = MPI.Exception(MPI.ERR_LASTCODE) for ierr in self.errorclasses: errstr = MPI.Get_error_string(ierr) errcls = MPI.Get_error_class(ierr) errexc = MPI.Exception(ierr) if py_version >= (2,5): self.assertEqual(errexc.error_code, ierr) self.assertEqual(errexc.error_class, ierr) self.assertEqual(errexc.error_string, errstr) self.assertEqual(repr(errexc), "MPI.Exception(%d)" % ierr) self.assertEqual(str(errexc), errstr) self.assertEqual(int(errexc), ierr) self.assertEqual(hash(errexc), hash(errexc.error_code)) self.assertTrue(errexc == ierr) self.assertTrue(errexc == errexc) self.assertFalse(errexc != ierr) self.assertFalse(errexc != errexc) self.assertTrue(success <= ierr <= lasterr) self.assertTrue(success <= errexc <= lasterr) self.assertTrue(errexc >= ierr) self.assertTrue(errexc >= success) self.assertTrue(lasterr >= ierr) self.assertTrue(lasterr >= errexc) if errexc == success: self.assertFalse(errexc) else: self.assertTrue(errexc) self.assertTrue(errexc > success) self.assertTrue(success < errexc) exc = MPI.Exception(MPI.SUCCESS-1) self.assertTrue(exc, MPI.ERR_UNKNOWN) exc = MPI.Exception(MPI.ERR_LASTCODE+1) self.assertTrue(exc, MPI.ERR_UNKNOWN) @unittest.skipMPI('openmpi(<1.10.0)') def testAddErrorClass(self): try: errclass = MPI.Add_error_class() except NotImplementedError: self.skipTest('mpi-add_error_class') self.assertTrue(errclass >= MPI.ERR_LASTCODE) @unittest.skipMPI('openmpi(<1.10.0)') def testAddErrorClassCodeString(self): try: errclass = MPI.Add_error_class() except NotImplementedError: self.skipTest('mpi-add_error_class') lastused = MPI.COMM_WORLD.Get_attr(MPI.LASTUSEDCODE) self.assertTrue(errclass == lastused) errstr = MPI.Get_error_string(errclass) self.assertEqual(errstr, "") MPI.Add_error_string(errclass, "error class") self.assertEqual(MPI.Get_error_string(errclass), "error class") errcode1 = MPI.Add_error_code(errclass) errstr = MPI.Get_error_string(errcode1) self.assertEqual(errstr, "") MPI.Add_error_string(errcode1, "error code 1") self.assertEqual(MPI.Get_error_class(errcode1), errclass) self.assertEqual(MPI.Get_error_string(errcode1), "error code 1") errcode2 = MPI.Add_error_code(errclass) errstr = MPI.Get_error_string(errcode2) self.assertEqual(errstr, "") MPI.Add_error_string(errcode2, "error code 2") self.assertEqual(MPI.Get_error_class(errcode2), errclass) self.assertEqual(MPI.Get_error_string(errcode2), "error code 2") if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_exceptions.py000066400000000000000000000302171460670727200175320ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import sys, os HAVE_MPE = 'MPE_LOGFILE_PREFIX' in os.environ HAVE_VT = 'VT_FILE_PREFIX' in os.environ # -------------------------------------------------------------------- @unittest.skipMPI('PlatformMPI') @unittest.skipMPI('MPICH2') @unittest.skipIf(HAVE_MPE or HAVE_VT, 'mpe|vt') class BaseTestCase(unittest.TestCase): def setUp(self): self.errhdl_world = MPI.COMM_WORLD.Get_errhandler() MPI.COMM_WORLD.Set_errhandler(MPI.ERRORS_RETURN) self.errhdl_self = MPI.COMM_SELF.Get_errhandler() MPI.COMM_SELF.Set_errhandler(MPI.ERRORS_RETURN) def tearDown(self): MPI.COMM_WORLD.Set_errhandler(self.errhdl_world) self.errhdl_world.Free() MPI.COMM_SELF.Set_errhandler(self.errhdl_self) self.errhdl_self.Free() # -------------------------------------------------------------------- class TestExcDatatypeNull(BaseTestCase): def testDup(self): self.assertRaisesMPI(MPI.ERR_TYPE, MPI.DATATYPE_NULL.Dup) def testCommit(self): self.assertRaisesMPI(MPI.ERR_TYPE, MPI.DATATYPE_NULL.Commit) def testFree(self): self.assertRaisesMPI(MPI.ERR_TYPE, MPI.DATATYPE_NULL.Free) class TestExcDatatype(BaseTestCase): DATATYPES = (MPI.BYTE, MPI.PACKED, MPI.CHAR, MPI.WCHAR, MPI.SIGNED_CHAR, MPI.UNSIGNED_CHAR, MPI.SHORT, MPI.UNSIGNED_SHORT, MPI.INT, MPI.UNSIGNED, MPI.UNSIGNED_INT, MPI.LONG, MPI.UNSIGNED_LONG, MPI.LONG_LONG, MPI.UNSIGNED_LONG_LONG, MPI.FLOAT, MPI.DOUBLE, MPI.LONG_DOUBLE, MPI.SHORT_INT, MPI.TWOINT, MPI.INT_INT, MPI.LONG_INT, MPI.FLOAT_INT, MPI.DOUBLE_INT, MPI.LONG_DOUBLE_INT, MPI.UB, MPI.LB,) ERR_TYPE = MPI.ERR_TYPE @unittest.skipMPI('msmpi') def testFreePredefined(self): for dtype in self.DATATYPES: if dtype != MPI.DATATYPE_NULL: self.assertRaisesMPI(self.ERR_TYPE, dtype.Free) self.assertTrue(dtype != MPI.DATATYPE_NULL) def testKeyvalInvalid(self): for dtype in self.DATATYPES: if dtype != MPI.DATATYPE_NULL: try: self.assertRaisesMPI( [MPI.ERR_KEYVAL, MPI.ERR_OTHER], dtype.Get_attr, MPI.KEYVAL_INVALID) except NotImplementedError: self.skipTest('mpi-type-get_attr') name, version = MPI.get_vendor() if name == 'Open MPI': if version < (1,4,3): TestExcDatatype.DATATYPES = TestExcDatatype.DATATYPES[1:] TestExcDatatype.ERR_TYPE = MPI.ERR_INTERN # -------------------------------------------------------------------- @unittest.skipMPI('msmpi(<=4.2.0)') class TestExcStatus(BaseTestCase): def testGetCount(self): status = MPI.Status() self.assertRaisesMPI( MPI.ERR_TYPE, status.Get_count, MPI.DATATYPE_NULL) def testGetElements(self): status = MPI.Status() self.assertRaisesMPI( MPI.ERR_TYPE, status.Get_elements, MPI.DATATYPE_NULL) @unittest.skipMPI('MPICH1') def testSetElements(self): status = MPI.Status() self.assertRaisesMPI( MPI.ERR_TYPE, status.Set_elements, MPI.DATATYPE_NULL, 0) # -------------------------------------------------------------------- class TestExcRequestNull(BaseTestCase): def testFree(self): self.assertRaisesMPI(MPI.ERR_REQUEST, MPI.REQUEST_NULL.Free) def testCancel(self): self.assertRaisesMPI(MPI.ERR_REQUEST, MPI.REQUEST_NULL.Cancel) # -------------------------------------------------------------------- class TestExcOpNull(BaseTestCase): def testFree(self): self.assertRaisesMPI([MPI.ERR_OP, MPI.ERR_ARG], MPI.OP_NULL.Free) class TestExcOp(BaseTestCase): def testFreePredefined(self): for op in (MPI.MAX, MPI.MIN, MPI.SUM, MPI.PROD, MPI.LAND, MPI.BAND, MPI.LOR, MPI.BOR, MPI.LXOR, MPI.BXOR, MPI.MAXLOC, MPI.MINLOC): self.assertRaisesMPI([MPI.ERR_OP, MPI.ERR_ARG], op.Free) if MPI.REPLACE != MPI.OP_NULL: self.assertRaisesMPI([MPI.ERR_OP, MPI.ERR_ARG], op.Free) # -------------------------------------------------------------------- class TestExcInfoNull(BaseTestCase): def testTruth(self): self.assertFalse(bool(MPI.INFO_NULL)) @unittest.skipMPI('msmpi(<8.1.0)') def testDup(self): self.assertRaisesMPI( [MPI.ERR_INFO, MPI.ERR_ARG], MPI.INFO_NULL.Dup) def testFree(self): self.assertRaisesMPI( [MPI.ERR_INFO, MPI.ERR_ARG], MPI.INFO_NULL.Free) def testGet(self): self.assertRaisesMPI( [MPI.ERR_INFO, MPI.ERR_ARG], MPI.INFO_NULL.Get, 'key') def testSet(self): self.assertRaisesMPI( [MPI.ERR_INFO, MPI.ERR_ARG], MPI.INFO_NULL.Set, 'key', 'value') def testDelete(self): self.assertRaisesMPI( [MPI.ERR_INFO, MPI.ERR_ARG], MPI.INFO_NULL.Delete, 'key') def testGetNKeys(self): self.assertRaisesMPI( [MPI.ERR_INFO, MPI.ERR_ARG], MPI.INFO_NULL.Get_nkeys) def testGetNthKey(self): self.assertRaisesMPI( [MPI.ERR_INFO, MPI.ERR_ARG], MPI.INFO_NULL.Get_nthkey, 0) class TestExcInfo(BaseTestCase): def setUp(self): super(TestExcInfo, self).setUp() self.INFO = MPI.Info.Create() def tearDown(self): self.INFO.Free() self.INFO = None super(TestExcInfo, self).tearDown() def testDelete(self): self.assertRaisesMPI( MPI.ERR_INFO_NOKEY, self.INFO.Delete, 'key') def testGetNthKey(self): self.assertRaisesMPI( [MPI.ERR_INFO_KEY, MPI.ERR_ARG], self.INFO.Get_nthkey, 0) try: MPI.Info.Create().Free() except NotImplementedError: unittest.disable(TestExcInfo, 'mpi-info') unittest.disable(TestExcInfoNull, 'mpi-info') # -------------------------------------------------------------------- class TestExcGroupNull(BaseTestCase): def testCompare(self): self.assertRaisesMPI( MPI.ERR_GROUP, MPI.Group.Compare, MPI.GROUP_NULL, MPI.GROUP_NULL) self.assertRaisesMPI( MPI.ERR_GROUP, MPI.Group.Compare, MPI.GROUP_NULL, MPI.GROUP_EMPTY) self.assertRaisesMPI( MPI.ERR_GROUP, MPI.Group.Compare, MPI.GROUP_EMPTY, MPI.GROUP_NULL) def testAccessors(self): for method in ('Get_size', 'Get_rank'): self.assertRaisesMPI( MPI.ERR_GROUP, getattr(MPI.GROUP_NULL, method)) class TestExcGroup(BaseTestCase): pass # -------------------------------------------------------------------- class TestExcCommNull(BaseTestCase): ERR_COMM = MPI.ERR_COMM def testCompare(self): self.assertRaisesMPI( self.ERR_COMM, MPI.Comm.Compare, MPI.COMM_NULL, MPI.COMM_NULL) self.assertRaisesMPI( self.ERR_COMM, MPI.Comm.Compare, MPI.COMM_SELF, MPI.COMM_NULL) self.assertRaisesMPI( self.ERR_COMM, MPI.Comm.Compare, MPI.COMM_WORLD, MPI.COMM_NULL) self.assertRaisesMPI( self.ERR_COMM, MPI.Comm.Compare, MPI.COMM_NULL, MPI.COMM_SELF) self.assertRaisesMPI( self.ERR_COMM, MPI.Comm.Compare, MPI.COMM_NULL, MPI.COMM_WORLD) def testAccessors(self): for method in ('Get_size', 'Get_rank', 'Is_inter', 'Is_intra', 'Get_group', 'Get_topology'): self.assertRaisesMPI(MPI.ERR_COMM, getattr(MPI.COMM_NULL, method)) def testFree(self): self.assertRaisesMPI(MPI.ERR_COMM, MPI.COMM_NULL.Free) def testDisconnect(self): try: self.assertRaisesMPI(MPI.ERR_COMM, MPI.COMM_NULL.Disconnect) except NotImplementedError: self.skipTest('mpi-comm-disconnect') @unittest.skipMPI('openmpi(<1.4.2)') def testGetAttr(self): self.assertRaisesMPI( MPI.ERR_COMM, MPI.COMM_NULL.Get_attr, MPI.TAG_UB) @unittest.skipMPI('openmpi(<1.4.1)') def testGetErrhandler(self): self.assertRaisesMPI( [MPI.ERR_COMM, MPI.ERR_ARG], MPI.COMM_NULL.Get_errhandler) def testSetErrhandler(self): self.assertRaisesMPI( MPI.ERR_COMM, MPI.COMM_NULL.Set_errhandler, MPI.ERRORS_RETURN) def testIntraNull(self): comm_null = MPI.Intracomm() self.assertRaisesMPI(MPI.ERR_COMM, comm_null.Dup) self.assertRaisesMPI(MPI.ERR_COMM, comm_null.Create, MPI.GROUP_EMPTY) self.assertRaisesMPI(MPI.ERR_COMM, comm_null.Split, color=0, key=0) def testInterNull(self): comm_null = MPI.Intercomm() self.assertRaisesMPI(MPI.ERR_COMM, comm_null.Get_remote_group) self.assertRaisesMPI(MPI.ERR_COMM, comm_null.Get_remote_size) self.assertRaisesMPI(MPI.ERR_COMM, comm_null.Dup) self.assertRaisesMPI(MPI.ERR_COMM, comm_null.Create, MPI.GROUP_EMPTY) self.assertRaisesMPI(MPI.ERR_COMM, comm_null.Split, color=0, key=0) self.assertRaisesMPI(MPI.ERR_COMM, comm_null.Merge, high=True) class TestExcComm(BaseTestCase): @unittest.skipMPI('MPICH1') def testFreeSelf(self): errhdl = MPI.COMM_SELF.Get_errhandler() try: MPI.COMM_SELF.Set_errhandler(MPI.ERRORS_RETURN) self.assertRaisesMPI( [MPI.ERR_COMM, MPI.ERR_ARG], MPI.COMM_SELF.Free) finally: MPI.COMM_SELF.Set_errhandler(errhdl) errhdl.Free() @unittest.skipMPI('MPICH1') def testFreeWorld(self): self.assertRaisesMPI( [MPI.ERR_COMM, MPI.ERR_ARG], MPI.COMM_WORLD.Free) def testKeyvalInvalid(self): self.assertRaisesMPI( [MPI.ERR_KEYVAL, MPI.ERR_OTHER], MPI.COMM_WORLD.Get_attr, MPI.KEYVAL_INVALID) # -------------------------------------------------------------------- class TestExcWinNull(BaseTestCase): def testFree(self): self.assertRaisesMPI( [MPI.ERR_WIN, MPI.ERR_ARG], MPI.WIN_NULL.Free) def testGetErrhandler(self): self.assertRaisesMPI( [MPI.ERR_WIN, MPI.ERR_ARG], MPI.WIN_NULL.Get_errhandler) def testSetErrhandler(self): self.assertRaisesMPI( [MPI.ERR_WIN, MPI.ERR_ARG], MPI.WIN_NULL.Set_errhandler, MPI.ERRORS_RETURN) def testCallErrhandler(self): self.assertRaisesMPI([MPI.ERR_WIN, MPI.ERR_ARG], MPI.WIN_NULL.Call_errhandler, 0) class TestExcWin(BaseTestCase): def setUp(self): super(TestExcWin, self).setUp() self.WIN = MPI.Win.Create(None, 1, MPI.INFO_NULL, MPI.COMM_SELF) self.WIN.Set_errhandler(MPI.ERRORS_RETURN) def tearDown(self): self.WIN.Free() self.WIN = None super(TestExcWin, self).tearDown() def testKeyvalInvalid(self): self.assertRaisesMPI( [MPI.ERR_KEYVAL, MPI.ERR_OTHER], self.WIN.Get_attr, MPI.KEYVAL_INVALID) SpectrumMPI = MPI.get_vendor()[0] == 'Spectrum MPI' try: if SpectrumMPI: raise NotImplementedError MPI.Win.Create(None, 1, MPI.INFO_NULL, MPI.COMM_SELF).Free() except (NotImplementedError, MPI.Exception): unittest.disable(TestExcWin, 'mpi-win') unittest.disable(TestExcWinNull, 'mpi-win') # -------------------------------------------------------------------- class TestExcErrhandlerNull(BaseTestCase): def testFree(self): self.assertRaisesMPI(MPI.ERR_ARG, MPI.ERRHANDLER_NULL.Free) def testCommSelfSetErrhandler(self): self.assertRaisesMPI( MPI.ERR_ARG, MPI.COMM_SELF.Set_errhandler, MPI.ERRHANDLER_NULL) def testCommWorldSetErrhandler(self): self.assertRaisesMPI( MPI.ERR_ARG, MPI.COMM_WORLD.Set_errhandler, MPI.ERRHANDLER_NULL) # class TestExcErrhandler(BaseTestCase): # # def testFreePredefined(self): # self.assertRaisesMPI(MPI.ERR_ARG, MPI.ERRORS_ARE_FATAL.Free) # self.assertRaisesMPI(MPI.ERR_ARG, MPI.ERRORS_RETURN.Free) # pass # -------------------------------------------------------------------- if __name__ == '__main__': unittest.main() # -------------------------------------------------------------------- mpi4py-3.1.6/test/test_file.py000066400000000000000000000143071460670727200162720ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import sys, os, tempfile class BaseTestFile(object): COMM = MPI.COMM_NULL FILE = MPI.FILE_NULL prefix = 'mpi4py' def setUp(self): fd, self.fname = tempfile.mkstemp(prefix=self.prefix) os.close(fd) self.amode = MPI.MODE_RDWR | MPI.MODE_CREATE #self.amode |= MPI.MODE_DELETE_ON_CLOSE try: self.FILE = MPI.File.Open(self.COMM, self.fname, self.amode, MPI.INFO_NULL) #self.fname=None except Exception: os.remove(self.fname) raise def tearDown(self): if self.FILE == MPI.FILE_NULL: return amode = self.FILE.amode self.FILE.Close() if not (amode & MPI.MODE_DELETE_ON_CLOSE): MPI.File.Delete(self.fname, MPI.INFO_NULL) @unittest.skipMPI('openmpi(==2.0.0)') @unittest.skipMPI('MPICH2(<1.1.0)') def testPreallocate(self): size = self.FILE.Get_size() self.assertEqual(size, 0) self.FILE.Preallocate(1) size = self.FILE.Get_size() self.assertEqual(size, 1) self.FILE.Preallocate(100) size = self.FILE.Get_size() self.assertEqual(size, 100) self.FILE.Preallocate(10) size = self.FILE.Get_size() self.assertEqual(size, 100) self.FILE.Preallocate(200) size = self.FILE.Get_size() self.assertEqual(size, 200) def testGetSetSize(self): size = self.FILE.Get_size() self.assertEqual(size, 0) size = self.FILE.size self.assertEqual(size, 0) self.FILE.Set_size(100) size = self.FILE.Get_size() self.assertEqual(size, 100) size = self.FILE.size self.assertEqual(size, 100) def testGetGroup(self): fgroup = self.FILE.Get_group() cgroup = self.COMM.Get_group() gcomp = MPI.Group.Compare(fgroup, cgroup) self.assertEqual(gcomp, MPI.IDENT) fgroup.Free() cgroup.Free() def testGetAmode(self): amode = self.FILE.Get_amode() self.assertEqual(self.amode, amode) self.assertEqual(self.FILE.amode, self.amode) def testGetSetInfo(self): #info = MPI.INFO_NULL #self.FILE.Set_info(info) info = MPI.Info.Create() self.FILE.Set_info(info) info.Free() info = self.FILE.Get_info() self.FILE.Set_info(info) info.Free() def testGetSetView(self): fsize = 100 * MPI.DOUBLE.size self.FILE.Set_size(fsize) displacements = range(100) datatypes = [MPI.SHORT, MPI.INT, MPI.LONG, MPI.FLOAT, MPI.DOUBLE] datareps = ['native'] #['native', 'internal', 'external32'] for disp in displacements: for dtype in datatypes: for datarep in datareps: etype, ftype = dtype, dtype self.FILE.Set_view(disp, etype, ftype, datarep, MPI.INFO_NULL) of, et, ft, dr = self.FILE.Get_view() self.assertEqual(disp, of) self.assertEqual(etype.Get_extent(), et.Get_extent()) self.assertEqual(ftype.Get_extent(), ft.Get_extent()) self.assertEqual(datarep, dr) try: if not et.is_predefined: et.Free() except NotImplementedError: if et != etype: et.Free() try: if not ft.is_predefined: ft.Free() except NotImplementedError: if ft != ftype: ft.Free() def testGetSetAtomicity(self): atom = self.FILE.Get_atomicity() self.assertFalse(atom) for atomicity in [True, False] * 4: self.FILE.Set_atomicity(atomicity) atom = self.FILE.Get_atomicity() self.assertEqual(atom, atomicity) def testSync(self): self.FILE.Sync() def testSeekGetPosition(self): offset = 0 self.FILE.Seek(offset, MPI.SEEK_END) self.FILE.Seek(offset, MPI.SEEK_CUR) self.FILE.Seek(offset, MPI.SEEK_SET) pos = self.FILE.Get_position() self.assertEqual(pos, offset) def testSeekGetPositionShared(self): offset = 0 self.FILE.Seek_shared(offset, MPI.SEEK_END) self.FILE.Seek_shared(offset, MPI.SEEK_CUR) self.FILE.Seek_shared(offset, MPI.SEEK_SET) pos = self.FILE.Get_position_shared() self.assertEqual(pos, offset) @unittest.skipMPI('openmpi(==2.0.0)') def testGetByteOffset(self): for offset in range(10): disp = self.FILE.Get_byte_offset(offset) self.assertEqual(disp, offset) def testGetTypeExtent(self): extent = self.FILE.Get_type_extent(MPI.BYTE) self.assertEqual(extent, 1) def testGetErrhandler(self): eh = self.FILE.Get_errhandler() self.assertEqual(eh, MPI.ERRORS_RETURN) eh.Free() class TestFileNull(unittest.TestCase): def setUp(self): self.eh_save = MPI.FILE_NULL.Get_errhandler() def tearDown(self): MPI.FILE_NULL.Set_errhandler(self.eh_save) self.eh_save.Free() def testGetSetErrhandler(self): eh = MPI.FILE_NULL.Get_errhandler() self.assertEqual(eh, MPI.ERRORS_RETURN) eh.Free() MPI.FILE_NULL.Set_errhandler(MPI.ERRORS_ARE_FATAL) eh = MPI.FILE_NULL.Get_errhandler() self.assertEqual(eh, MPI.ERRORS_ARE_FATAL) eh.Free() MPI.FILE_NULL.Set_errhandler(MPI.ERRORS_RETURN) eh = MPI.FILE_NULL.Get_errhandler() self.assertEqual(eh, MPI.ERRORS_RETURN) eh.Free() class TestFileSelf(BaseTestFile, unittest.TestCase): COMM = MPI.COMM_SELF prefix = BaseTestFile.prefix + ('-%d' % MPI.COMM_WORLD.Get_rank()) def have_feature(): case = BaseTestFile() case.COMM = TestFileSelf.COMM case.prefix = TestFileSelf.prefix case.setUp() case.tearDown() try: have_feature() except NotImplementedError: unittest.disable(BaseTestFile, 'mpi-file') unittest.disable(TestFileNull, 'mpi-file') if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_fortran.py000066400000000000000000000047211460670727200170250ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest class BaseTestFortran(object): HANDLES = [] def testFortran(self): for handle1 in self.HANDLES: try: fint = handle1.py2f() except NotImplementedError: continue handle2 = type(handle1).f2py(fint) self.assertEqual(handle1, handle2) class TestFortranStatus(BaseTestFortran, unittest.TestCase): def setUp(self): s1 = MPI.Status() s2 = MPI.Status() s2.source = 1 s2.tag = 2 s2.error = MPI.ERR_OTHER s3 = MPI.Status() s3.source = 0 s3.tag = 0 s3.error = MPI.SUCCESS self.HANDLES = [s1, s2, s3] @unittest.skipMPI('MPICH1') def testFortran(self): super(TestFortranStatus, self).testFortran() class TestFortranDatatype(BaseTestFortran, unittest.TestCase): HANDLES = [MPI.DATATYPE_NULL, MPI.CHAR, MPI.SHORT, MPI.INT, MPI.LONG, MPI.FLOAT, MPI.DOUBLE, ] class TestFortranOp(BaseTestFortran, unittest.TestCase): HANDLES = [MPI.OP_NULL, MPI.MAX, MPI.MIN, MPI.SUM, MPI.PROD, MPI.LAND, MPI.BAND, MPI.LOR, MPI.BOR, MPI.LXOR, MPI.BXOR, MPI.MAXLOC, MPI.MINLOC, ] class TestFortranRequest(BaseTestFortran, unittest.TestCase): HANDLES = [MPI.REQUEST_NULL, ] class TestFortranMessage(BaseTestFortran, unittest.TestCase): HANDLES = [MPI.MESSAGE_NULL, MPI.MESSAGE_NO_PROC, ] class TestFortranErrhandler(BaseTestFortran, unittest.TestCase): HANDLES = [MPI.ERRHANDLER_NULL, MPI.ERRORS_RETURN, MPI.ERRORS_ARE_FATAL, ] class TestFortranInfo(BaseTestFortran, unittest.TestCase): HANDLES = [MPI.INFO_NULL, ] class TestFortranGroup(BaseTestFortran, unittest.TestCase): HANDLES = [MPI.GROUP_NULL, MPI.GROUP_EMPTY, ] class TestFortranComm(BaseTestFortran, unittest.TestCase): HANDLES = [MPI.COMM_NULL, MPI.COMM_SELF, MPI.COMM_WORLD, ] class TestFortranWin(BaseTestFortran, unittest.TestCase): HANDLES = [MPI.WIN_NULL, ] class TestFortranFile(BaseTestFortran, unittest.TestCase): HANDLES = [MPI.FILE_NULL, ] if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_grequest.py000066400000000000000000000044621460670727200172130ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest class GReqCtx(object): source = 3 tag = 7 completed = False cancel_called = False free_called = False def query(self, status): status.Set_source(self.source) status.Set_tag(self.tag) def free(self): self.free_called = True def cancel(self, completed): self.cancel_called = True if completed is not self.completed: raise MPI.Exception(MPI.ERR_PENDING) @unittest.skipMPI('MPI(<2.0)') @unittest.skipMPI('openmpi(==4.1.0)') class TestGrequest(unittest.TestCase): def testAll(self): ctx = GReqCtx() greq = MPI.Grequest.Start(ctx.query, ctx.free, ctx.cancel) self.assertFalse(greq.Test()) self.assertFalse(ctx.free_called) greq.Cancel() self.assertTrue(ctx.cancel_called) ctx.cancel_called = False greq.Complete() ctx.completed = True greq.Cancel() self.assertTrue(ctx.cancel_called) status = MPI.Status() self.assertTrue(greq.Test(status)) self.assertEqual(status.Get_source(), ctx.source) self.assertEqual(status.Get_tag(), ctx.tag) self.assertEqual(status.Get_error(), MPI.SUCCESS) greq.Wait() self.assertTrue(ctx.free_called) def testAll1(self): ctx = GReqCtx() greq = MPI.Grequest.Start(ctx.query, None, None) self.assertFalse(greq.Test()) greq.Cancel() greq.Complete() status = MPI.Status() self.assertTrue(greq.Test(status)) self.assertEqual(status.Get_source(), ctx.source) self.assertEqual(status.Get_tag(), ctx.tag) self.assertEqual(status.Get_error(), MPI.SUCCESS) self.assertFalse(status.Is_cancelled()) greq.Wait() def testAll2(self): greq = MPI.Grequest.Start(None, None, None) self.assertFalse(greq.Test()) greq.Cancel() greq.Complete() status = MPI.Status() self.assertTrue(greq.Test(status)) self.assertEqual(status.Get_source(), MPI.ANY_SOURCE) self.assertEqual(status.Get_tag(), MPI.ANY_TAG) self.assertEqual(status.Get_error(), MPI.SUCCESS) self.assertFalse(status.Is_cancelled()) greq.Wait() if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_group.py000066400000000000000000000141471460670727200165110ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest class BaseTestGroup(object): def testProperties(self): group = self.GROUP self.assertEqual(group.Get_size(), group.size) self.assertEqual(group.Get_rank(), group.rank) def testCompare(self): results = (MPI.IDENT, MPI.SIMILAR, MPI.UNEQUAL) group = MPI.COMM_WORLD.Get_group() gcmp = MPI.Group.Compare(self.GROUP, group) group.Free() self.assertTrue(gcmp in results) gcmp = MPI.Group.Compare(self.GROUP, self.GROUP) self.assertEqual(gcmp, MPI.IDENT) def testDup(self): group = self.GROUP.Dup() self.assertEqual(MPI.Group.Compare(group, self.GROUP), MPI.IDENT) group.Free() def testUnion(self): group = MPI.Group.Union(MPI.GROUP_EMPTY, self.GROUP) self.assertEqual(MPI.Group.Compare(group, self.GROUP), MPI.IDENT) group.Free() group = MPI.Group.Union(self.GROUP, MPI.GROUP_EMPTY) self.assertEqual(MPI.Group.Compare(group, self.GROUP), MPI.IDENT) group.Free() group = MPI.Group.Union(self.GROUP, self.GROUP) self.assertEqual(MPI.Group.Compare(group, self.GROUP), MPI.IDENT) group.Free() def testDifference(self): group = MPI.Group.Difference(MPI.GROUP_EMPTY, self.GROUP) self.assertEqual(MPI.Group.Compare(group, MPI.GROUP_EMPTY), MPI.IDENT) group.Free() group = MPI.Group.Difference(self.GROUP, MPI.GROUP_EMPTY) self.assertEqual(MPI.Group.Compare(group, self.GROUP), MPI.IDENT) group.Free() group = MPI.Group.Difference(self.GROUP, self.GROUP) self.assertEqual(MPI.Group.Compare(group, MPI.GROUP_EMPTY), MPI.IDENT) group.Free() def testIntersection(self): group = MPI.Group.Intersection(MPI.GROUP_EMPTY, self.GROUP) self.assertEqual(MPI.Group.Compare(group, MPI.GROUP_EMPTY), MPI.IDENT) group.Free() group = MPI.Group.Intersection(self.GROUP, MPI.GROUP_EMPTY) self.assertEqual(MPI.Group.Compare(group, MPI.GROUP_EMPTY), MPI.IDENT) group.Free() group = MPI.Group.Intersection(self.GROUP, self.GROUP) self.assertEqual(MPI.Group.Compare(group, self.GROUP), MPI.IDENT) group.Free() def testIncl(self): group = self.GROUP.Incl([]) self.assertEqual(MPI.Group.Compare(group, MPI.GROUP_EMPTY), MPI.IDENT) group.Free() def testExcl(self): group = self.GROUP.Excl([]) self.assertEqual(MPI.Group.Compare(group, self.GROUP), MPI.IDENT) group.Free() def testRangeIncl(self): if self.GROUP == MPI.GROUP_EMPTY: return group = self.GROUP.Range_incl([]) self.assertEqual(MPI.Group.Compare(group, MPI.GROUP_EMPTY), MPI.IDENT) group.Free() ranges = [ (0, self.GROUP.Get_size()-1, 1), ] group = self.GROUP.Range_incl(ranges) self.assertEqual(MPI.Group.Compare(group, self.GROUP), MPI.IDENT) group.Free() def testRangeExcl(self): if self.GROUP == MPI.GROUP_EMPTY: return group = self.GROUP.Range_excl([]) self.assertEqual(MPI.Group.Compare(group, self.GROUP), MPI.IDENT) group.Free() ranges = [ (0, self.GROUP.Get_size()-1, 1), ] group = self.GROUP.Range_excl(ranges) self.assertEqual(MPI.Group.Compare(group, MPI.GROUP_EMPTY), MPI.IDENT) group.Free() def testTranslRanks(self): group1 = self.GROUP group2 = self.GROUP ranks1 = list(range(group1.Get_size())) * 3 ranks2 = MPI.Group.Translate_ranks(group1, ranks1) ranks2 = MPI.Group.Translate_ranks(group1, ranks1, group2) self.assertEqual(list(ranks1), list(ranks2)) @unittest.skipMPI('PlatformMPI') @unittest.skipMPI('MPICH1') @unittest.skipMPI('LAM/MPI') def testTranslRanksProcNull(self): if self.GROUP == MPI.GROUP_EMPTY: return group1 = self.GROUP group2 = self.GROUP ranks1 = [MPI.PROC_NULL] * 10 ranks2 = MPI.Group.Translate_ranks(group1, ranks1, group2) self.assertEqual(list(ranks1), list(ranks2)) def testTranslRanksGroupEmpty(self): if self.GROUP == MPI.GROUP_EMPTY: return group1 = self.GROUP group2 = MPI.GROUP_EMPTY ranks1 = list(range(group1.Get_size())) * 2 ranks2 = MPI.Group.Translate_ranks(group1, ranks1, group2) for rank in ranks2: self.assertEqual(rank, MPI.UNDEFINED) class TestGroupNull(unittest.TestCase): def testContructor(self): group = MPI.Group() self.assertFalse(group is MPI.GROUP_NULL) self.assertEqual(group, MPI.GROUP_NULL) def testNull(self): GROUP_NULL = MPI.GROUP_NULL group_null = MPI.Group() self.assertFalse(GROUP_NULL) self.assertFalse(group_null) self.assertEqual(group_null, GROUP_NULL) class TestGroupEmpty(BaseTestGroup, unittest.TestCase): def setUp(self): self.GROUP = MPI.GROUP_EMPTY def testEmpty(self): self.assertTrue(self.GROUP) def testSize(self): size = self.GROUP.Get_size() self.assertEqual(size, 0) def testRank(self): rank = self.GROUP.Get_rank() self.assertEqual(rank, MPI.UNDEFINED) @unittest.skipMPI('MPICH1') def testTranslRanks(self): super(TestGroupEmpty, self).testTranslRanks() class TestGroupSelf(BaseTestGroup, unittest.TestCase): def setUp(self): self.GROUP = MPI.COMM_SELF.Get_group() def tearDown(self): self.GROUP.Free() def testSize(self): size = self.GROUP.Get_size() self.assertEqual(size, 1) def testRank(self): rank = self.GROUP.Get_rank() self.assertEqual(rank, 0) class TestGroupWorld(BaseTestGroup, unittest.TestCase): def setUp(self): self.GROUP = MPI.COMM_WORLD.Get_group() def tearDown(self): self.GROUP.Free() def testSize(self): size = self.GROUP.Get_size() self.assertTrue(size >= 1) def testRank(self): size = self.GROUP.Get_size() rank = self.GROUP.Get_rank() self.assertTrue(rank >= 0 and rank < size) if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_info.py000066400000000000000000000154461460670727200163130ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest class TestInfoNull(unittest.TestCase): def testTruth(self): self.assertFalse(bool(MPI.INFO_NULL)) def testPyMethods(self): inull = MPI.INFO_NULL def getitem(): return inull['k'] def setitem(): inull['k'] = 'v' def delitem(): del inull['k'] def update(): inull.update([]) def pop(): inull.pop('k') def popitem(): inull.popitem() self.assertEqual(len(inull), 0) self.assertFalse('key' in inull) self.assertRaises(KeyError, getitem) self.assertRaises(KeyError, setitem) self.assertRaises(KeyError, delitem) self.assertRaises(KeyError, update) self.assertRaises(KeyError, pop) self.assertRaises(KeyError, popitem) self.assertEqual(inull.get('key', None), None) self.assertEqual(inull.pop('key', None), None) self.assertEqual(inull.keys(), []) self.assertEqual(inull.values(), []) self.assertEqual(inull.items(), []) self.assertEqual(inull.copy(), inull) inull.clear() class TestInfoEnv(unittest.TestCase): def testTruth(self): self.assertTrue(bool(MPI.INFO_ENV)) def testPyMethods(self): env = MPI.INFO_ENV if env == MPI.INFO_NULL: return for key in ("command", "argv", "maxprocs", "soft", "host", "arch", "wdir", "file", "thread_level"): v = env.Get(key) class TestInfo(unittest.TestCase): def setUp(self): self.INFO = MPI.Info.Create() def tearDown(self): self.INFO.Free() self.assertEqual(self.INFO, MPI.INFO_NULL) self.INFO = None def testTruth(self): self.assertTrue(bool(self.INFO)) def testDup(self): info = self.INFO.Dup() self.assertNotEqual(self.INFO, info) self.assertEqual(info.Get_nkeys(), 0) info.Free() self.assertFalse(info) def testGet(self): value = self.INFO.Get('key') self.assertEqual(value, None) def testGetNKeys(self): self.assertEqual(self.INFO.Get_nkeys(), 0) def testGetSetDelete(self): INFO = self.INFO self.assertEqual(INFO.Get_nkeys(), 0) INFO.Set('key', 'value') nkeys = INFO.Get_nkeys() self.assertEqual(nkeys, 1) key = INFO.Get_nthkey(0) self.assertEqual(key, 'key') value = INFO.Get('key') self.assertEqual(value, 'value') INFO.Delete('key') nkeys = INFO.Get_nkeys() self.assertEqual(nkeys, 0) value = INFO.Get('key') self.assertEqual(value, None) def testPyMethods(self): INFO = self.INFO self.assertEqual(len(INFO), 0) self.assertTrue('key' not in INFO) self.assertEqual(INFO.keys(), []) self.assertEqual(INFO.values(), []) self.assertEqual(INFO.items(), []) INFO['key'] = 'value' self.assertEqual(len(INFO), 1) self.assertTrue('key' in INFO) self.assertEqual(INFO['key'], 'value') for key in INFO: self.assertEqual(key, 'key') self.assertEqual(INFO.keys(), ['key']) self.assertEqual(INFO.values(), ['value']) self.assertEqual(INFO.items(), [('key', 'value')]) self.assertEqual(key, 'key') del INFO['key'] self.assertEqual(len(INFO), 0) INFO['key'] = 'value' self.assertEqual(INFO.pop('key'), 'value') self.assertEqual(len(INFO), 0) self.assertEqual(INFO.pop('key', 'value'), 'value') self.assertRaises(KeyError, INFO.pop, 'key') INFO['key1'] = 'value1' INFO['key2'] = 'value2' self.assertEqual(INFO.pop('key1'), 'value1') self.assertEqual(len(INFO), 1) self.assertEqual(INFO.pop('key2'), 'value2') self.assertEqual(len(INFO), 0) INFO['key'] = 'value' self.assertEqual(INFO.popitem(), ('key', 'value')) self.assertEqual(len(INFO), 0) self.assertRaises(KeyError, INFO.popitem) INFO['key1'] = 'value1' INFO['key2'] = 'value2' self.assertEqual(INFO.popitem(), ('key2', 'value2')) self.assertEqual(len(INFO), 1) self.assertEqual(INFO.popitem(), ('key1', 'value1')) self.assertEqual(len(INFO), 0) self.assertEqual(len(INFO), 0) self.assertTrue('key' not in INFO) self.assertEqual(INFO.keys(), []) self.assertEqual(INFO.values(), []) self.assertEqual(INFO.items(), []) def getitem(): INFO['key'] self.assertRaises(KeyError, getitem) def delitem(): del INFO['key'] self.assertRaises(KeyError, delitem) INFO.clear() INFO.update([('key1','value1')]) self.assertEqual(len(INFO), 1) self.assertEqual(INFO['key1'], 'value1') self.assertEqual(INFO.get('key1'), 'value1') self.assertEqual(INFO.get('key2'), None) self.assertEqual(INFO.get('key2', 'value2'), 'value2') INFO.update(key2='value2') self.assertEqual(len(INFO), 2) self.assertEqual(INFO['key1'], 'value1') self.assertEqual(INFO['key2'], 'value2') self.assertEqual(INFO.get('key1'), 'value1') self.assertEqual(INFO.get('key2'), 'value2') self.assertEqual(INFO.get('key3'), None) self.assertEqual(INFO.get('key3', 'value3'), 'value3') INFO.update([('key1', 'newval1')], key2='newval2') self.assertEqual(len(INFO), 2) self.assertEqual(INFO['key1'], 'newval1') self.assertEqual(INFO['key2'], 'newval2') self.assertEqual(INFO.get('key1'), 'newval1') self.assertEqual(INFO.get('key2'), 'newval2') self.assertEqual(INFO.get('key3'), None) self.assertEqual(INFO.get('key3', 'newval3'), 'newval3') INFO.update(dict(key1='val1', key2='val2', key3='val3')) self.assertEqual(len(INFO), 3) self.assertEqual(INFO['key1'], 'val1') self.assertEqual(INFO['key2'], 'val2') self.assertEqual(INFO['key3'], 'val3') dupe = INFO.copy() self.assertEqual(INFO.items(), dupe.items()) dupe.Free() INFO.clear() self.assertEqual(len(INFO), 0) self.assertEqual(INFO.get('key1'), None) self.assertEqual(INFO.get('key2'), None) self.assertEqual(INFO.get('key3'), None) self.assertEqual(INFO.get('key1', 'value1'), 'value1') self.assertEqual(INFO.get('key2', 'value2'), 'value2') self.assertEqual(INFO.get('key3', 'value3'), 'value3') try: MPI.Info.Create().Free() except NotImplementedError: unittest.disable(TestInfo, 'mpi-info') unittest.disable(TestInfoNull, 'mpi-info') if (MPI.VERSION < 3 and MPI.INFO_ENV == MPI.INFO_NULL): unittest.disable(TestInfoEnv, 'mpi-info-env') if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_io.py000066400000000000000000000337221460670727200157640ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import arrayimpl import sys, os, tempfile def subTestIO(case, *args, **kwargs): for array, typecode in arrayimpl.subTest(case, *args, **kwargs): if unittest.is_mpi_gpu('mvapich2', array): continue yield array, typecode class BaseTestIO(object): COMM = MPI.COMM_NULL FILE = MPI.FILE_NULL prefix = 'mpi4py-' def setUp(self): comm = self.COMM fname = None if comm.Get_rank() == 0: fd, fname = tempfile.mkstemp(prefix=self.prefix) os.close(fd) fname = comm.bcast(fname, 0) amode = MPI.MODE_RDWR | MPI.MODE_CREATE amode |= MPI.MODE_DELETE_ON_CLOSE amode |= MPI.MODE_UNIQUE_OPEN info = MPI.INFO_NULL try: self.FILE = MPI.File.Open(comm, fname, amode, info) except Exception: if comm.Get_rank() == 0: os.remove(fname) raise def tearDown(self): if self.FILE: self.FILE.Close() self.COMM.Barrier() # non-collective def testReadWriteAt(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in subTestIO(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(42, typecode, count) fh.Write_at(count*rank, wbuf.as_raw()) fh.Sync() comm.Barrier() fh.Sync() rbuf = array(-1, typecode, count+1) fh.Read_at(count*rank, rbuf.as_mpi_c(count)) for value in rbuf[:-1]: self.assertEqual(value, 42) self.assertEqual(rbuf[-1], -1) comm.Barrier() def testIReadIWriteAt(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in subTestIO(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(42, typecode, count) fh.Iwrite_at(count*rank, wbuf.as_raw()).Wait() fh.Sync() comm.Barrier() fh.Sync() rbuf = array(-1, typecode, count+1) fh.Iread_at(count*rank, rbuf.as_mpi_c(count)).Wait() for value in rbuf[:-1]: self.assertEqual(value, 42) self.assertEqual(rbuf[-1], -1) comm.Barrier() def testReadWrite(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in subTestIO(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(42, typecode, count) for r in range(size): if r == rank: fh.Seek(0, MPI.SEEK_SET) fh.Write(wbuf.as_raw()) fh.Sync() comm.Barrier() fh.Sync() for n in range(0, len(wbuf)): rbuf = array(-1, typecode, n+1) fh.Seek(0, MPI.SEEK_SET) fh.Read(rbuf.as_mpi_c(n)) for value in rbuf[:-1]: self.assertEqual(value, 42) self.assertEqual(rbuf[-1], -1) comm.Barrier() def testIReadIWrite(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in subTestIO(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(42, typecode, count) for r in range(size): if r == rank: fh.Seek(0, MPI.SEEK_SET) fh.Iwrite(wbuf.as_raw()).Wait() fh.Sync() comm.Barrier() fh.Sync() for n in range(0, len(wbuf)): rbuf = array(-1, typecode, n+1) fh.Seek(0, MPI.SEEK_SET) fh.Iread(rbuf.as_mpi_c(n)).Wait() for value in rbuf[:-1]: self.assertEqual(value, 42) self.assertEqual(rbuf[-1], -1) comm.Barrier() def testReadWriteShared(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in subTestIO(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(rank%42, typecode, count) fh.Seek_shared(0, MPI.SEEK_SET) fh.Write_shared(wbuf.as_raw()) fh.Sync() comm.Barrier() fh.Sync() rbuf = array(-1, typecode, count+1) fh.Seek_shared(0, MPI.SEEK_SET) fh.Read_shared(rbuf.as_mpi_c(count)) for value in rbuf[:-1]: self.assertTrue(0<=value<42) self.assertEqual(value, rbuf[0]) self.assertEqual(rbuf[-1], -1) comm.Barrier() def testIReadIWriteShared(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in subTestIO(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(rank%42, typecode, count) fh.Seek_shared(0, MPI.SEEK_SET) fh.Iwrite_shared(wbuf.as_raw()).Wait() fh.Sync() comm.Barrier() fh.Sync() rbuf = array(-1, typecode, count+1) fh.Seek_shared(0, MPI.SEEK_SET) fh.Iread_shared(rbuf.as_mpi_c(count)).Wait() for value in rbuf[:-1]: self.assertTrue(0<=value<42) self.assertEqual(value, rbuf[0]) self.assertEqual(rbuf[-1], -1) comm.Barrier() # collective def testReadWriteAtAll(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in subTestIO(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(42, typecode, count) fh.Write_at_all(count*rank, wbuf.as_raw()) fh.Sync() comm.Barrier() fh.Sync() rbuf = array(-1, typecode, count+1) fh.Read_at_all(count*rank, rbuf.as_mpi_c(count)) for value in rbuf[:-1]: self.assertEqual(value, 42) self.assertEqual(rbuf[-1], -1) comm.Barrier() @unittest.skipMPI('SpectrumMPI') def testIReadIWriteAtAll(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE try: # MPI 3.1 for array, typecode in subTestIO(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(42, typecode, count) fh.Iwrite_at_all(count*rank, wbuf.as_raw()).Wait() fh.Sync() comm.Barrier() fh.Sync() rbuf = array(-1, typecode, count+1) fh.Iread_at_all(count*rank, rbuf.as_mpi_c(count)).Wait() for value in rbuf[:-1]: self.assertEqual(value, 42) self.assertEqual(rbuf[-1], -1) comm.Barrier() except NotImplementedError: if MPI.Get_version() >= (3, 1): raise self.skipTest('mpi-iwrite_at_all') def testReadWriteAtAllBeginEnd(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in subTestIO(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(42, typecode, count) fh.Write_at_all_begin(count*rank, wbuf.as_raw()) fh.Write_at_all_end(wbuf.as_raw()) fh.Sync() comm.Barrier() fh.Sync() rbuf = array(-1, typecode, count+1) fh.Read_at_all_begin(count*rank, rbuf.as_mpi_c(count)) fh.Read_at_all_end(rbuf.as_raw()) for value in rbuf[:-1]: self.assertEqual(value, 42) self.assertEqual(rbuf[-1], -1) comm.Barrier() def testReadWriteAll(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in subTestIO(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(42, typecode, count) fh.Seek(count*rank, MPI.SEEK_SET) fh.Write_all(wbuf.as_raw()) fh.Sync() comm.Barrier() fh.Sync() rbuf = array(-1, typecode, count+1) fh.Seek(count*rank, MPI.SEEK_SET) fh.Read_all(rbuf.as_mpi_c(count)) for value in rbuf[:-1]: self.assertEqual(value, 42) self.assertEqual(rbuf[-1], -1) comm.Barrier() @unittest.skipMPI('SpectrumMPI') def testIReadIWriteAll(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE try: # MPI 3.1 for array, typecode in subTestIO(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(42, typecode, count) fh.Seek(count*rank, MPI.SEEK_SET) fh.Iwrite_all(wbuf.as_raw()).Wait() fh.Sync() comm.Barrier() fh.Sync() rbuf = array(-1, typecode, count+1) fh.Seek(count*rank, MPI.SEEK_SET) fh.Iread_all(rbuf.as_mpi_c(count)).Wait() for value in rbuf[:-1]: self.assertEqual(value, 42) self.assertEqual(rbuf[-1], -1) comm.Barrier() except NotImplementedError: if MPI.Get_version() >= (3, 1): raise self.skipTest('mpi-iwrite_all') def testReadWriteAllBeginEnd(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in subTestIO(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(42, typecode, count) fh.Seek(count*rank, MPI.SEEK_SET) fh.Write_all_begin(wbuf.as_raw()) fh.Write_all_end(wbuf.as_raw()) fh.Sync() comm.Barrier() fh.Sync() rbuf = array(-1, typecode, count+1) fh.Seek(count*rank, MPI.SEEK_SET) fh.Read_all_begin(rbuf.as_mpi_c(count)) fh.Read_all_end(rbuf.as_raw()) for value in rbuf[:-1]: self.assertEqual(value, 42) self.assertEqual(rbuf[-1], -1) comm.Barrier() def testReadWriteOrdered(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in subTestIO(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(rank%42, typecode, count) fh.Seek_shared(0, MPI.SEEK_SET) fh.Write_ordered(wbuf.as_raw()) fh.Sync() comm.Barrier() fh.Sync() rbuf = array(-1, typecode, count+1) fh.Seek_shared(0, MPI.SEEK_SET) fh.Read_ordered(rbuf.as_mpi_c(count)) for value in rbuf[:-1]: self.assertEqual(value, rank%42) self.assertEqual(rbuf[-1], -1) comm.Barrier() def testReadWriteOrderedBeginEnd(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() fh = self.FILE for array, typecode in subTestIO(self): etype = array.TypeMap[typecode] fh.Set_size(0) fh.Set_view(0, etype) count = 13 wbuf = array(rank%42, typecode, count) fh.Seek_shared(0, MPI.SEEK_SET) fh.Write_ordered_begin(wbuf.as_raw()) fh.Write_ordered_end(wbuf.as_raw()) fh.Sync() comm.Barrier() fh.Sync() rbuf = array(-1, typecode, count+1) fh.Seek_shared(0, MPI.SEEK_SET) fh.Read_ordered_begin(rbuf.as_mpi_c(count)) fh.Read_ordered_end(rbuf.as_raw()) for value in rbuf[:-1]: self.assertEqual(value, rank%42) self.assertEqual(rbuf[-1], -1) comm.Barrier() @unittest.skipMPI('MPICH1') @unittest.skipMPI('LAM/MPI') class TestIOSelf(BaseTestIO, unittest.TestCase): COMM = MPI.COMM_SELF prefix = BaseTestIO.prefix + ('%d-' % MPI.COMM_WORLD.Get_rank()) @unittest.skipMPI('openmpi(<2.2.0)') @unittest.skipMPI('msmpi') @unittest.skipMPI('MPICH2') @unittest.skipMPI('MPICH1') @unittest.skipMPI('LAM/MPI') class TestIOWorld(BaseTestIO, unittest.TestCase): COMM = MPI.COMM_WORLD def have_feature(): case = BaseTestIO() case.COMM = TestIOSelf.COMM case.prefix = TestIOSelf.prefix case.setUp() case.tearDown() try: have_feature() except NotImplementedError: unittest.disable(BaseTestIO, 'mpi-io') if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_memory.py000066400000000000000000000264041460670727200166640ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import sys try: from array import array except ImportError: array = None pypy_lt_58 = (hasattr(sys, 'pypy_version_info') and sys.pypy_version_info < (5, 8)) class TestMemory(unittest.TestCase): def testNewEmpty(self): memory = MPI.memory mem = memory() self.assertEqual(mem.address, 0) self.assertEqual(mem.obj, None) self.assertEqual(mem.nbytes, 0) self.assertEqual(mem.readonly, False) self.assertEqual(mem.format, 'B') self.assertEqual(mem.itemsize, 1) self.assertEqual(len(mem), 0) mem[:] = 0 mem[:] = memory() if sys.version_info < (3,0): b = buffer(mem) self.assertEqual(len(b), 0) if sys.version_info >= (2,7): m = memoryview(mem) self.assertEqual(m.format, 'B') self.assertEqual(m.itemsize, 1) self.assertEqual(m.ndim, 1) if not pypy_lt_58: self.assertEqual(m.readonly, False) self.assertEqual(m.shape, (0,)) self.assertEqual(m.strides, (1,)) self.assertEqual(m.tobytes(), b"") self.assertEqual(m.tolist(), []) mem.release() self.assertEqual(mem.address, 0) self.assertEqual(mem.nbytes, 0) self.assertEqual(mem.readonly, False) def testNewBad(self): memory = MPI.memory for obj in (None, 0, 0.0, [], (), []): self.assertRaises(TypeError, memory, obj) def testNewBytes(self): memory = MPI.memory obj = b"abc" mem = memory(obj) self.assertEqual(mem.obj, obj) self.assertEqual(mem.nbytes, len(obj)) self.assertEqual(mem.readonly, True) def testNewBytearray(self): memory = MPI.memory obj = bytearray([1,2,3]) mem = memory(obj) self.assertEqual(mem.obj, obj) self.assertEqual(mem.nbytes, len(obj)) self.assertEqual(mem.readonly, False) @unittest.skipIf(array is None, 'array') def testNewArray(self): memory = MPI.memory obj = array('i', [1,2,3]) mem = memory(obj) self.assertEqual(mem.obj, obj) self.assertEqual(mem.nbytes, len(obj)*obj.itemsize) self.assertEqual(mem.readonly, False) def testAllocate(self): memory = MPI.memory for size in (0, 1, 2): mem = memory.allocate(size) self.assertEqual(mem.nbytes, size) self.assertNotEqual(mem.address, 0) for clear in (False, True): mem = memory.allocate(1024, clear) self.assertEqual(mem.nbytes, 1024) self.assertNotEqual(mem.address, 0) if clear: self.assertEqual(mem[0], 0) self.assertEqual(mem[-1], 0) self.assertRaises(TypeError, memory.allocate, None) self.assertRaises(ValueError, memory.allocate, -1) def testFromBufferBad(self): memory = MPI.memory for obj in (None, 0, 0.0, [], (), []): self.assertRaises(TypeError, memory.frombuffer, obj) def testFromBufferBytes(self): memory = MPI.memory mem = memory.frombuffer(b"abc", readonly=True) self.assertNotEqual(mem.address, 0) self.assertEqual(type(mem.obj), bytes) self.assertEqual(mem.obj, b"abc") self.assertEqual(mem.nbytes, 3) self.assertEqual(mem.readonly, True) self.assertEqual(mem.format, 'B') self.assertEqual(mem.itemsize, 1) self.assertEqual(len(mem), 3) if sys.version_info < (3,0): b = buffer(mem) self.assertEqual(len(b), 3) if sys.version_info >= (2,7): m = memoryview(mem) self.assertEqual(m.format, 'B') self.assertEqual(m.itemsize, 1) self.assertEqual(m.ndim, 1) if not pypy_lt_58: self.assertEqual(m.readonly, True) self.assertEqual(m.shape, (3,)) self.assertEqual(m.strides, (1,)) self.assertEqual(m.tobytes(), b"abc") self.assertEqual(m.tolist(), [ord(c) for c in "abc"]) mem.release() self.assertEqual(mem.address, 0) self.assertEqual(mem.nbytes, 0) self.assertEqual(mem.readonly, False) @unittest.skipIf(array is None, 'array') def testFromBufferArrayRO(self): memory = MPI.memory obj = array('B', [1,2,3]) mem = memory.frombuffer(obj, readonly=True) self.assertNotEqual(mem.address, 0) self.assertEqual(type(mem.obj), array) self.assertEqual(mem.nbytes, 3) self.assertEqual(mem.readonly, True) self.assertEqual(mem.format, 'B') self.assertEqual(mem.itemsize, 1) self.assertEqual(len(mem), 3) if sys.version_info < (3,0): b = buffer(mem) self.assertEqual(len(b), 3) if sys.version_info >= (2,7): m = memoryview(mem) self.assertEqual(m.format, 'B') self.assertEqual(m.itemsize, 1) self.assertEqual(m.ndim, 1) if not pypy_lt_58: self.assertEqual(m.readonly, True) self.assertEqual(m.shape, (3,)) self.assertEqual(m.strides, (1,)) self.assertEqual(m.tobytes(), b"\1\2\3") self.assertEqual(m.tolist(), [1,2,3]) mem.release() self.assertEqual(mem.address, 0) self.assertEqual(mem.nbytes, 0) self.assertEqual(mem.readonly, False) @unittest.skipIf(array is None, 'array') def testFromBufferArrayRW(self): memory = MPI.memory obj = array('B', [1,2,3]) mem = memory.frombuffer(obj, readonly=False) self.assertNotEqual(mem.address, 0) self.assertEqual(mem.nbytes, 3) self.assertEqual(mem.readonly, False) self.assertEqual(len(mem), 3) if sys.version_info < (3,0): b = buffer(mem) self.assertEqual(len(b), 3) if sys.version_info >= (2,7): m = memoryview(mem) self.assertEqual(m.format, 'B') self.assertEqual(m.itemsize, 1) self.assertEqual(m.ndim, 1) if not pypy_lt_58: self.assertEqual(m.readonly, False) self.assertEqual(m.shape, (3,)) self.assertEqual(m.strides, (1,)) self.assertEqual(m.tobytes(), b"\1\2\3") self.assertEqual(m.tolist(), [1,2,3]) mem[:] = 1 self.assertEqual(obj, array('B', [1]*3)) mem[1:] = array('B', [7]*2) self.assertEqual(obj, array('B', [1,7,7])) mem[1:2] = array('B', [8]*1) self.assertEqual(obj, array('B', [1,8,7])) mem.release() self.assertEqual(mem.address, 0) self.assertEqual(mem.nbytes, 0) self.assertEqual(mem.readonly, False) @unittest.skipIf(array is None, 'array') def testFromAddress(self): memory = MPI.memory obj = array('B', [1,2,3]) addr, size = obj.buffer_info() nbytes = size * obj.itemsize mem = memory.fromaddress(addr, nbytes, readonly=False) self.assertNotEqual(mem.address, 0) self.assertEqual(mem.nbytes, 3) self.assertEqual(mem.readonly, False) self.assertEqual(len(mem), 3) if sys.version_info < (3,0): b = buffer(mem) self.assertEqual(len(b), 3) if sys.version_info >= (2,7): m = memoryview(mem) self.assertEqual(m.format, 'B') self.assertEqual(m.itemsize, 1) self.assertEqual(m.ndim, 1) if not pypy_lt_58: self.assertEqual(m.readonly, False) self.assertEqual(m.shape, (3,)) self.assertEqual(m.strides, (1,)) self.assertEqual(m.tobytes(), b"\1\2\3") self.assertEqual(m.tolist(), [1,2,3]) mem[:] = 1 self.assertEqual(obj, array('B', [1]*3)) mem[1:] = array('B', [7]*2) self.assertEqual(obj, array('B', [1,7,7])) mem[1:2] = array('B', [8]*1) self.assertEqual(obj, array('B', [1,8,7])) mem.release() self.assertEqual(mem.address, 0) self.assertEqual(mem.nbytes, 0) self.assertEqual(mem.readonly, False) def testToReadonly(self): memory = MPI.memory obj = bytearray(b"abc") mem1 = memory.frombuffer(obj) mem2 = mem1.toreadonly() self.assertEqual(mem1.readonly, False) self.assertEqual(mem2.readonly, True) self.assertEqual(mem1.address, mem2.address) self.assertEqual(mem1.obj, mem2.obj) self.assertEqual(type(mem1.obj), type(mem2.obj)) self.assertEqual(mem1.nbytes, mem2.nbytes) def testSequence(self): n = 16 try: mem = MPI.Alloc_mem(n, MPI.INFO_NULL) except NotImplementedError: self.skipTest('mpi-alloc_mem') try: self.assertTrue(type(mem) is MPI.memory) self.assertTrue(mem.address != 0) self.assertEqual(mem.nbytes, n) self.assertEqual(mem.readonly, False) self.assertEqual(len(mem), n) def delitem(): del mem[n] def getitem1(): return mem[n] def getitem2(): return mem[::2] def getitem3(): return mem[None] def setitem1(): mem[n] = 0 def setitem2(): mem[::2] = 0 def setitem3(): mem[None] = 0 self.assertRaises(Exception, delitem) self.assertRaises(IndexError, getitem1) self.assertRaises(IndexError, getitem2) self.assertRaises(TypeError, getitem3) self.assertRaises(IndexError, setitem1) self.assertRaises(IndexError, setitem2) self.assertRaises(TypeError, setitem3) for i in range(n): mem[i] = i for i in range(n): self.assertEqual(mem[i], i) mem[:] = 0 for i in range(-n, 0): mem[i] = abs(i) for i in range(-n, 0): self.assertEqual(mem[i], abs(i)) mem[:] = 0 for i in range(n): self.assertEqual(mem[i], 0) mem[:] = 255 for i in range(n): self.assertEqual(mem[i], 255) mem[:n//2] = 1 mem[n//2:] = 0 for i in range(n//2): self.assertEqual(mem[i], 1) for i in range(n//2, n): self.assertEqual(mem[i], 0) mem[:] = 0 mem[1:5] = b"abcd" mem[10:13] = b"xyz" self.assertEqual(mem[0], 0) for i, c in enumerate("abcd"): self.assertEqual(mem[1+i], ord(c)) for i in range(5, 10): self.assertEqual(mem[i], 0) for i, c in enumerate("xyz"): self.assertEqual(mem[10+i], ord(c)) for i in range(13, n): self.assertEqual(mem[i], 0) self.assertEqual(mem[1:5].tobytes(), b"abcd") self.assertEqual(mem[10:13].tobytes(), b"xyz") finally: MPI.Free_mem(mem) self.assertEqual(mem.address, 0) self.assertEqual(mem.nbytes, 0) self.assertEqual(mem.readonly, False) try: MPI.memory except AttributeError: unittest.disable(TestMemory, 'mpi4py-memory') if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_mpimem.py000066400000000000000000000013711460670727200166340ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest class TestMemory(unittest.TestCase): def testMemory1(self): for size in range(0, 10000, 100): try: mem1 = MPI.Alloc_mem(size) self.assertEqual(len(mem1), size) MPI.Free_mem(mem1) except NotImplementedError: self.skipTest('mpi-alloc_mem') def testMemory2(self): for size in range(0, 10000, 100): try: mem2 = MPI.Alloc_mem(size, MPI.INFO_NULL) self.assertEqual(len(mem2), size) MPI.Free_mem(mem2) except NotImplementedError: self.skipTest('mpi-alloc_mem') if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_msgspec.py000066400000000000000000001313211460670727200170100ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest from arrayimpl import allclose from arrayimpl import typestr import sys typemap = MPI._typedict try: import array except ImportError: array = None try: import numpy except ImportError: numpy = None try: import cupy except ImportError: cupy = None try: import numba import numba.cuda from distutils.version import StrictVersion numba_version = StrictVersion(numba.__version__).version if numba_version < (0, 48): import warnings warnings.warn('To test Numba GPU arrays, use Numba v0.48.0+.', RuntimeWarning) numba = None except ImportError: numba = None py2 = sys.version_info[0] == 2 py3 = sys.version_info[0] >= 3 pypy = hasattr(sys, 'pypy_version_info') pypy2 = pypy and py2 pypy_lt_53 = pypy and sys.pypy_version_info < (5, 3) # --- class BaseBuf(object): def __init__(self, typecode, initializer): self._buf = array.array(typecode, initializer) def __eq__(self, other): return self._buf == other._buf def __ne__(self, other): return self._buf != other._buf def __len__(self): return len(self._buf) def __getitem__(self, item): return self._buf[item] def __setitem__(self, item, value): self._buf[item] = value._buf # --- try: import dlpackimpl as dlpack except ImportError: dlpack = None class DLPackCPUBuf(BaseBuf): def __init__(self, typecode, initializer): super(DLPackCPUBuf, self).__init__(typecode, initializer) self.managed = dlpack.make_dl_managed_tensor(self._buf) def __del__(self): self.managed = None if not pypy and sys.getrefcount(self._buf) > 2: raise RuntimeError('dlpack: possible reference leak') def __dlpack_device__(self): device = self.managed.dl_tensor.device return (device.device_type, device.device_id) def __dlpack__(self, stream=None): managed = self.managed if managed.dl_tensor.device.device_type == \ dlpack.DLDeviceType.kDLCPU: assert stream == None capsule = dlpack.make_py_capsule(managed) return capsule if cupy is not None: class DLPackGPUBuf(BaseBuf): has_dlpack = None dev_type = None def __init__(self, typecode, initializer): self._buf = cupy.array(initializer, dtype=typecode) self.has_dlpack = hasattr(self._buf, '__dlpack_device__') # TODO(leofang): test CUDA managed memory? if cupy.cuda.runtime.is_hip: self.dev_type = dlpack.DLDeviceType.kDLROCM else: self.dev_type = dlpack.DLDeviceType.kDLCUDA def __del__(self): if not pypy and sys.getrefcount(self._buf) > 2: raise RuntimeError('dlpack: possible reference leak') def __dlpack_device__(self): if self.has_dlpack: return self._buf.__dlpack_device__() else: return (self.dev_type, self._buf.device.id) def __dlpack__(self, stream=None): cupy.cuda.get_current_stream().synchronize() if self.has_dlpack: return self._buf.__dlpack__(stream=-1) else: return self._buf.toDlpack() # --- class CAIBuf(BaseBuf): def __init__(self, typecode, initializer, readonly=False): super(CAIBuf, self).__init__(typecode, initializer) address = self._buf.buffer_info()[0] typecode = self._buf.typecode itemsize = self._buf.itemsize self.__cuda_array_interface__ = dict( version = 0, data = (address, readonly), typestr = typestr(typecode, itemsize), shape = (len(self._buf), 1, 1), strides = (itemsize,) * 3, descr = [('', typestr(typecode, itemsize))], ) cupy_issue_2259 = False if cupy is not None: cupy_issue_2259 = not isinstance( cupy.zeros((2,2)).T.__cuda_array_interface__['strides'], tuple ) # --- def Sendrecv(smsg, rmsg): MPI.COMM_SELF.Sendrecv(sendbuf=smsg, dest=0, sendtag=0, recvbuf=rmsg, source=0, recvtag=0, status=MPI.Status()) class TestMessageSimple(unittest.TestCase): def testMessageBad(self): buf = MPI.Alloc_mem(5) empty = [None, 0, "B"] def f(): Sendrecv([buf, 0, 0, "i", None], empty) self.assertRaises(ValueError, f) def f(): Sendrecv([buf, 0, "\0"], empty) self.assertRaises(KeyError, f) def f(): Sendrecv([buf, -1, "i"], empty) self.assertRaises(ValueError, f) def f(): Sendrecv([buf, 0, -1, "i"], empty) self.assertRaises(ValueError, f) def f(): Sendrecv([buf, 0, +2, "i"], empty) self.assertRaises(ValueError, f) def f(): Sendrecv([None, 1, 0, "i"], empty) self.assertRaises(ValueError, f) def f(): Sendrecv([buf, None, 0, "i"], empty) self.assertRaises(ValueError, f) def f(): Sendrecv([buf, 0, 1, MPI.DATATYPE_NULL], empty) self.assertRaises(ValueError, f) def f(): Sendrecv([buf, None, 0, MPI.DATATYPE_NULL], empty) self.assertRaises(ValueError, f) try: t = MPI.INT.Create_resized(0, -4).Commit() def f(): Sendrecv([buf, None, t], empty) self.assertRaises(ValueError, f) def f(): Sendrecv([buf, 0, 1, t], empty) self.assertRaises(ValueError, f) t.Free() except NotImplementedError: pass MPI.Free_mem(buf) buf = [1,2,3,4] def f(): Sendrecv([buf, 4, 0, "i"], empty) self.assertRaises(TypeError, f) buf = {1:2,3:4} def f(): Sendrecv([buf, 4, 0, "i"], empty) self.assertRaises(TypeError, f) def f(): Sendrecv(b"abc", b"abc") self.assertRaises((BufferError, TypeError, ValueError), f) def testMessageNone(self): empty = [None, 0, "B"] Sendrecv(empty, empty) empty = [None, "B"] Sendrecv(empty, empty) def testMessageBottom(self): empty = [MPI.BOTTOM, 0, "B"] Sendrecv(empty, empty) empty = [MPI.BOTTOM, "B"] Sendrecv(empty, empty) @unittest.skipIf(pypy_lt_53, 'pypy(<5.3)') def testMessageBytes(self): sbuf = b"abc" rbuf = bytearray(3) Sendrecv([sbuf, "c"], [rbuf, MPI.CHAR]) self.assertEqual(sbuf, rbuf) @unittest.skipIf(pypy_lt_53, 'pypy(<5.3)') def testMessageBytearray(self): sbuf = bytearray(b"abc") rbuf = bytearray(3) Sendrecv([sbuf, "c"], [rbuf, MPI.CHAR]) self.assertEqual(sbuf, rbuf) @unittest.skipIf(py3, 'python3') @unittest.skipIf(pypy2, 'pypy2') @unittest.skipIf(hasattr(MPI, 'ffi'), 'mpi4py-cffi') def testMessageUnicode(self): # Test for Issue #120 sbuf = unicode("abc") rbuf = bytearray(len(buffer(sbuf))) Sendrecv([sbuf, MPI.BYTE], [rbuf, MPI.BYTE]) @unittest.skipIf(py3, 'python3') @unittest.skipIf(pypy_lt_53, 'pypy(<5.3)') def testMessageBuffer(self): sbuf = buffer(b"abc") rbuf = bytearray(3) Sendrecv([sbuf, "c"], [rbuf, MPI.CHAR]) self.assertEqual(sbuf, rbuf) self.assertRaises((BufferError, TypeError, ValueError), Sendrecv, [rbuf, "c"], [sbuf, "c"]) @unittest.skipIf(pypy2, 'pypy2') @unittest.skipIf(pypy_lt_53, 'pypy(<5.3)') def testMessageMemoryView(self): sbuf = memoryview(b"abc") rbuf = bytearray(3) Sendrecv([sbuf, "c"], [rbuf, MPI.CHAR]) self.assertEqual(sbuf, rbuf) self.assertRaises((BufferError, TypeError, ValueError), Sendrecv, [rbuf, "c"], [sbuf, "c"]) @unittest.skipMPI('msmpi(<8.0.0)') class TestMessageBlock(unittest.TestCase): @unittest.skipIf(MPI.COMM_WORLD.Get_size() < 2, 'mpi-world-size<2') def testMessageBad(self): comm = MPI.COMM_WORLD buf = MPI.Alloc_mem(4) empty = [None, 0, "B"] def f(): comm.Alltoall([buf, None, "i"], empty) self.assertRaises(ValueError, f) MPI.Free_mem(buf) class BaseTestMessageSimpleArray(object): TYPECODES = "bhil"+"BHIL"+"fd" def array(self, typecode, initializer): raise NotImplementedError def check1(self, z, s, r, typecode): r[:] = z Sendrecv(s, r) for a, b in zip(s, r): self.assertEqual(a, b) def check2(self, z, s, r, typecode): datatype = typemap[typecode] for type in (None, typecode, datatype): r[:] = z Sendrecv([s, type], [r, type]) for a, b in zip(s, r): self.assertEqual(a, b) def check3(self, z, s, r, typecode): size = len(r) for count in range(size): r[:] = z Sendrecv([s, count], [r, count]) for i in range(count): self.assertEqual(r[i], s[i]) for i in range(count, size): self.assertEqual(r[i], z[0]) for count in range(size): r[:] = z Sendrecv([s, (count, None)], [r, (count, None)]) for i in range(count): self.assertEqual(r[i], s[i]) for i in range(count, size): self.assertEqual(r[i], z[0]) for disp in range(size): r[:] = z Sendrecv([s, (None, disp)], [r, (None, disp)]) for i in range(disp): self.assertEqual(r[i], z[0]) for i in range(disp, size): self.assertEqual(r[i], s[i]) for disp in range(size): for count in range(size-disp): r[:] = z Sendrecv([s, (count, disp)], [r, (count, disp)]) for i in range(0, disp): self.assertEqual(r[i], z[0]) for i in range(disp, disp+count): self.assertEqual(r[i], s[i]) for i in range(disp+count, size): self.assertEqual(r[i], z[0]) def check4(self, z, s, r, typecode): datatype = typemap[typecode] for type in (None, typecode, datatype): for count in (None, len(s)): r[:] = z Sendrecv([s, count, type], [r, count, type]) for a, b in zip(s, r): self.assertEqual(a, b) def check5(self, z, s, r, typecode): datatype = typemap[typecode] for type in (None, typecode, datatype): for p in range(0, len(s)): r[:] = z Sendrecv([s, (p, None), type], [r, (p, None), type]) for a, b in zip(s[:p], r[:p]): self.assertEqual(a, b) for q in range(p, len(s)): count, displ = q-p, p r[:] = z Sendrecv([s, (count, displ), type], [r, (count, displ), type]) for a, b in zip(r[:p], z[:p]): self.assertEqual(a, b) for a, b in zip(r[p:q], s[p:q]): self.assertEqual(a, b) for a, b in zip(r[q:], z[q:]): self.assertEqual(a, b) def check6(self, z, s, r, typecode): datatype = typemap[typecode] for type in (None, typecode, datatype): for p in range(0, len(s)): r[:] = z Sendrecv([s, p, None, type], [r, p, None, type]) for a, b in zip(s[:p], r[:p]): self.assertEqual(a, b) for q in range(p, len(s)): count, displ = q-p, p r[:] = z Sendrecv([s, count, displ, type], [r, count, displ, type]) for a, b in zip(r[:p], z[:p]): self.assertEqual(a, b) for a, b in zip(r[p:q], s[p:q]): self.assertEqual(a, b) for a, b in zip(r[q:], z[q:]): self.assertEqual(a, b) def check(self, test): for t in tuple(self.TYPECODES): for n in range(1, 10): z = self.array(t, [0]*n) s = self.array(t, list(range(n))) r = self.array(t, [0]*n) test(z, s, r, t) def testArray1(self): self.check(self.check1) def testArray2(self): self.check(self.check2) def testArray3(self): self.check(self.check3) def testArray4(self): self.check(self.check4) def testArray5(self): self.check(self.check5) def testArray6(self): self.check(self.check6) @unittest.skipIf(array is None, 'array') class TestMessageSimpleArray(unittest.TestCase, BaseTestMessageSimpleArray): def array(self, typecode, initializer): return array.array(typecode, initializer) @unittest.skipIf(numpy is None, 'numpy') class TestMessageSimpleNumPy(unittest.TestCase, BaseTestMessageSimpleArray): def array(self, typecode, initializer): return numpy.array(initializer, dtype=typecode) def testOrderC(self): sbuf = numpy.ones([3,2]) rbuf = numpy.zeros([3,2]) Sendrecv(sbuf, rbuf) self.assertTrue((sbuf == rbuf).all()) def testOrderFortran(self): sbuf = numpy.ones([3,2]).T rbuf = numpy.zeros([3,2]).T Sendrecv(sbuf, rbuf) self.assertTrue((sbuf == rbuf).all()) def testReadonly(self): sbuf = numpy.ones([3]) rbuf = numpy.zeros([3]) sbuf.flags.writeable = False Sendrecv(sbuf, rbuf) self.assertTrue((sbuf == rbuf).all()) def testNotWriteable(self): sbuf = numpy.ones([3]) rbuf = numpy.zeros([3]) rbuf.flags.writeable = False self.assertRaises( (BufferError, ValueError, TypeError), Sendrecv, sbuf, rbuf ) def testNotContiguous(self): sbuf = numpy.ones([3,2])[:,0] rbuf = numpy.zeros([3]) self.assertRaises( (BufferError, ValueError, TypeError), Sendrecv, sbuf, rbuf, ) @unittest.skipIf(array is None, 'array') @unittest.skipIf(dlpack is None, 'dlpack') class TestMessageSimpleDLPackCPUBuf(unittest.TestCase, BaseTestMessageSimpleArray): def array(self, typecode, initializer): return DLPackCPUBuf(typecode, initializer) @unittest.skipIf(cupy is None, 'cupy') class TestMessageSimpleDLPackGPUBuf(unittest.TestCase, BaseTestMessageSimpleArray): def array(self, typecode, initializer): return DLPackGPUBuf(typecode, initializer) @unittest.skipIf(array is None, 'array') class TestMessageSimpleCAIBuf(unittest.TestCase, BaseTestMessageSimpleArray): def array(self, typecode, initializer): return CAIBuf(typecode, initializer) @unittest.skipIf(cupy is None, 'cupy') class TestMessageSimpleCuPy(unittest.TestCase, BaseTestMessageSimpleArray): def array(self, typecode, initializer): return cupy.array(initializer, dtype=typecode) def testOrderC(self): sbuf = cupy.ones([3,2]) rbuf = cupy.zeros([3,2]) Sendrecv(sbuf, rbuf) self.assertTrue((sbuf == rbuf).all()) @unittest.skipIf(cupy_issue_2259, 'cupy-issue-2259') def testOrderFortran(self): sbuf = cupy.ones([3,2]).T rbuf = cupy.zeros([3,2]).T Sendrecv(sbuf, rbuf) self.assertTrue((sbuf == rbuf).all()) @unittest.skipIf(cupy_issue_2259, 'cupy-issue-2259') def testNotContiguous(self): sbuf = cupy.ones([3,2])[:,0] rbuf = cupy.zeros([3]) self.assertRaises((BufferError, ValueError), Sendrecv, sbuf, rbuf) @unittest.skipIf(numba is None, 'numba') class TestMessageSimpleNumba(unittest.TestCase, BaseTestMessageSimpleArray): def array(self, typecode, initializer): n = len(initializer) arr = numba.cuda.device_array((n,), dtype=typecode) arr[:] = initializer return arr def testOrderC(self): sbuf = numba.cuda.device_array((6,)) sbuf[:] = 1 sbuf = sbuf.reshape(3,2) rbuf = numba.cuda.device_array((6,)) rbuf[:] = 0 rbuf = sbuf.reshape(3,2) Sendrecv(sbuf, rbuf) # numba arrays do not have the .all() method for i in range(3): for j in range(2): self.assertTrue(sbuf[i,j] == rbuf[i,j]) def testOrderFortran(self): sbuf = numba.cuda.device_array((6,)) sbuf[:] = 1 sbuf = sbuf.reshape(3,2,order='F') rbuf = numba.cuda.device_array((6,)) rbuf[:] = 0 rbuf = sbuf.reshape(3,2,order='F') Sendrecv(sbuf, rbuf) # numba arrays do not have the .all() method for i in range(3): for j in range(2): self.assertTrue(sbuf[i,j] == rbuf[i,j]) def testNotContiguous(self): sbuf = numba.cuda.device_array((6,)) sbuf[:] = 1 sbuf = sbuf.reshape(3,2)[:,0] rbuf = numba.cuda.device_array((3,)) rbuf[:] = 0 self.assertRaises((BufferError, ValueError), Sendrecv, sbuf, rbuf) # --- @unittest.skipIf(array is None, 'array') @unittest.skipIf(dlpack is None, 'dlpack') class TestMessageDLPackCPUBuf(unittest.TestCase): def testDevice(self): buf = DLPackCPUBuf('i', [0,1,2,3]) buf.__dlpack_device__ = None self.assertRaises(TypeError, MPI.Get_address, buf) buf.__dlpack_device__ = lambda: None self.assertRaises(TypeError, MPI.Get_address, buf) buf.__dlpack_device__ = lambda: (None, 0) self.assertRaises(TypeError, MPI.Get_address, buf) buf.__dlpack_device__ = lambda: (1, None) self.assertRaises(TypeError, MPI.Get_address, buf) buf.__dlpack_device__ = lambda: (1,) self.assertRaises(ValueError, MPI.Get_address, buf) buf.__dlpack_device__ = lambda: (1, 0, 1) self.assertRaises(ValueError, MPI.Get_address, buf) del buf.__dlpack_device__ MPI.Get_address(buf) def testCapsule(self): buf = DLPackCPUBuf('i', [0,1,2,3]) # capsule = buf.__dlpack__() MPI.Get_address(buf) MPI.Get_address(buf) del capsule # capsule = buf.__dlpack__() retvals = [capsule] * 2 buf.__dlpack__ = lambda *args, **kwargs: retvals.pop() MPI.Get_address(buf) self.assertRaises(BufferError, MPI.Get_address, buf) del buf.__dlpack__ del capsule # buf.__dlpack__ = lambda *args, **kwargs: None self.assertRaises(BufferError, MPI.Get_address, buf) del buf.__dlpack__ def testNdim(self): buf = DLPackCPUBuf('i', [0,1,2,3]) dltensor = buf.managed.dl_tensor # for ndim in (2, 1, 0): dltensor.ndim = ndim MPI.Get_address(buf) # dltensor.ndim = -1 self.assertRaises(BufferError, MPI.Get_address, buf) # del dltensor def testShape(self): buf = DLPackCPUBuf('i', [0,1,2,3]) dltensor = buf.managed.dl_tensor # dltensor.ndim = 1 dltensor.shape[0] = -1 self.assertRaises(BufferError, MPI.Get_address, buf) # dltensor.ndim = 0 dltensor.shape = None MPI.Get_address(buf) # dltensor.ndim = 1 dltensor.shape = None self.assertRaises(BufferError, MPI.Get_address, buf) # del dltensor def testStrides(self): buf = DLPackCPUBuf('i', range(8)) dltensor = buf.managed.dl_tensor # for order in ('C', 'F'): dltensor.ndim, dltensor.shape, dltensor.strides = \ dlpack.make_dl_shape([2, 2, 2], order=order) MPI.Get_address(buf) dltensor.strides[0] = -1 self.assertRaises(BufferError, MPI.Get_address, buf) # del dltensor def testContiguous(self): buf = DLPackCPUBuf('i', range(8)) dltensor = buf.managed.dl_tensor # dltensor.ndim, dltensor.shape, dltensor.strides = \ dlpack.make_dl_shape([2, 2, 2], order='C') s = dltensor.strides strides = [s[i] for i in range(dltensor.ndim)] s[0], s[1], s[2] = [strides[i] for i in [0, 1, 2]] MPI.Get_address(buf) s[0], s[1], s[2] = [strides[i] for i in [2, 1, 0]] MPI.Get_address(buf) s[0], s[1], s[2] = [strides[i] for i in [0, 2, 1]] self.assertRaises(BufferError, MPI.Get_address, buf) s[0], s[1], s[2] = [strides[i] for i in [1, 0, 2]] self.assertRaises(BufferError, MPI.Get_address, buf) del s # dltensor.ndim, dltensor.shape, dltensor.strides = \ dlpack.make_dl_shape([1, 3, 1], order='C') s = dltensor.strides MPI.Get_address(buf) for i in range(4): for j in range(4): s[0], s[2] = i, j MPI.Get_address(buf) s[1] = 0 self.assertRaises(BufferError, MPI.Get_address, buf) del s # del dltensor def testByteOffset(self): buf = DLPackCPUBuf('B', [0,1,2,3]) dltensor = buf.managed.dl_tensor # dltensor.ndim = 1 for i in range(len(buf)): dltensor.byte_offset = i mem = MPI.memory(buf) self.assertEqual(mem[0], buf[i]) # del dltensor # --- @unittest.skipIf(array is None, 'array') class TestMessageCAIBuf(unittest.TestCase): def testNonReadonly(self): smsg = CAIBuf('i', [1,2,3], readonly=True) rmsg = CAIBuf('i', [0,0,0], readonly=True) self.assertRaises(BufferError, Sendrecv, smsg, rmsg) def testNonContiguous(self): smsg = CAIBuf('i', [1,2,3]) rmsg = CAIBuf('i', [0,0,0]) Sendrecv(smsg, rmsg) strides = rmsg.__cuda_array_interface__['strides'] good_strides = strides[:-2] + (0, 7) rmsg.__cuda_array_interface__['strides'] = good_strides Sendrecv(smsg, rmsg) bad_strides = (7,) + strides[1:] rmsg.__cuda_array_interface__['strides'] = bad_strides self.assertRaises(BufferError, Sendrecv, smsg, rmsg) def testAttrNone(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__ = None self.assertRaises(TypeError, Sendrecv, smsg, rmsg) def testAttrEmpty(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__ = dict() self.assertRaises(KeyError, Sendrecv, smsg, rmsg) def testAttrType(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) items = list(rmsg.__cuda_array_interface__.items()) rmsg.__cuda_array_interface__ = items self.assertRaises(TypeError, Sendrecv, smsg, rmsg) def testDataMissing(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) del rmsg.__cuda_array_interface__['data'] self.assertRaises(KeyError, Sendrecv, smsg, rmsg) def testDataNone(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__['data'] = None self.assertRaises(TypeError, Sendrecv, smsg, rmsg) def testDataType(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__['data'] = 0 self.assertRaises(TypeError, Sendrecv, smsg, rmsg) def testDataValue(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) dev_ptr = rmsg.__cuda_array_interface__['data'][0] rmsg.__cuda_array_interface__['data'] = (dev_ptr, ) self.assertRaises(ValueError, Sendrecv, smsg, rmsg) rmsg.__cuda_array_interface__['data'] = ( ) self.assertRaises(ValueError, Sendrecv, smsg, rmsg) rmsg.__cuda_array_interface__['data'] = (dev_ptr, False, None) self.assertRaises(ValueError, Sendrecv, smsg, rmsg) def testTypestrMissing(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) del rmsg.__cuda_array_interface__['typestr'] self.assertRaises(KeyError, Sendrecv, smsg, rmsg) def testTypestrNone(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__['typestr'] = None self.assertRaises(TypeError, Sendrecv, smsg, rmsg) def testTypestrType(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__['typestr'] = 42 self.assertRaises(TypeError, Sendrecv, smsg, rmsg) def testTypestrItemsize(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) typestr = rmsg.__cuda_array_interface__['typestr'] rmsg.__cuda_array_interface__['typestr'] = typestr[:2]+'X' self.assertRaises(ValueError, Sendrecv, smsg, rmsg) def testShapeMissing(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) del rmsg.__cuda_array_interface__['shape'] self.assertRaises(KeyError, Sendrecv, smsg, rmsg) def testShapeNone(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__['shape'] = None self.assertRaises(TypeError, Sendrecv, smsg, rmsg) def testShapeType(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__['shape'] = 3 self.assertRaises(TypeError, Sendrecv, smsg, rmsg) def testShapeValue(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__['shape'] = (3, -1) rmsg.__cuda_array_interface__['strides'] = None self.assertRaises(BufferError, Sendrecv, smsg, rmsg) def testStridesMissing(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) del rmsg.__cuda_array_interface__['strides'] Sendrecv(smsg, rmsg) self.assertEqual(smsg, rmsg) def testStridesNone(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__['strides'] = None Sendrecv(smsg, rmsg) self.assertEqual(smsg, rmsg) def testStridesType(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__['strides'] = 42 self.assertRaises(TypeError, Sendrecv, smsg, rmsg) def testDescrMissing(self): smsg = CAIBuf('d', [1,2,3]) rmsg = CAIBuf('d', [0,0,0]) del rmsg.__cuda_array_interface__['descr'] Sendrecv(smsg, rmsg) self.assertEqual(smsg, rmsg) def testDescrNone(self): smsg = CAIBuf('d', [1,2,3]) rmsg = CAIBuf('d', [0,0,0]) rmsg.__cuda_array_interface__['descr'] = None Sendrecv(smsg, rmsg) self.assertEqual(smsg, rmsg) def testDescrType(self): smsg = CAIBuf('B', [1,2,3]) rmsg = CAIBuf('B', [0,0,0]) rmsg.__cuda_array_interface__['descr'] = 42 self.assertRaises(TypeError, Sendrecv, smsg, rmsg) def testDescrWarning(self): m, n = 5, 3 smsg = CAIBuf('d', list(range(m*n))) rmsg = CAIBuf('d', [0]*(m*n)) typestr = rmsg.__cuda_array_interface__['typestr'] itemsize = int(typestr[2:]) new_typestr = "|V"+str(itemsize*n) new_descr = [('', typestr)]*n rmsg.__cuda_array_interface__['shape'] = (m,) rmsg.__cuda_array_interface__['strides'] = (itemsize*n,) rmsg.__cuda_array_interface__['typestr'] = new_typestr rmsg.__cuda_array_interface__['descr'] = new_descr import warnings with warnings.catch_warnings(): warnings.simplefilter("error") self.assertRaises(RuntimeWarning, Sendrecv, smsg, rmsg) try: # Python 3.2+ self.assertWarns(RuntimeWarning, Sendrecv, smsg, rmsg) except AttributeError: # Python 2 with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") Sendrecv(smsg, rmsg) self.assertEqual(len(w), 1) self.assertEqual(w[-1].category, RuntimeWarning) self.assertEqual(smsg, rmsg) # --- def Alltoallv(smsg, rmsg): comm = MPI.COMM_SELF comm.Alltoallv(smsg, rmsg) @unittest.skipMPI('msmpi(<8.0.0)') class TestMessageVector(unittest.TestCase): def testMessageBad(self): buf = MPI.Alloc_mem(5) empty = [None, 0, [0], "B"] def f(): Alltoallv([buf, 0, [0], "i", None], empty) self.assertRaises(ValueError, f) def f(): Alltoallv([buf, 0, [0], "\0"], empty) self.assertRaises(KeyError, f) def f(): Alltoallv([buf, None, [0], MPI.DATATYPE_NULL], empty) self.assertRaises(ValueError, f) def f(): Alltoallv([buf, None, [0], "i"], empty) self.assertRaises(ValueError, f) try: t = MPI.INT.Create_resized(0, -4).Commit() def f(): Alltoallv([buf, None, [0], t], empty) self.assertRaises(ValueError, f) t.Free() except NotImplementedError: pass MPI.Free_mem(buf) buf = [1,2,3,4] def f(): Alltoallv([buf, 0, 0, "i"], empty) self.assertRaises(TypeError, f) buf = {1:2,3:4} def f(): Alltoallv([buf, 0, 0, "i"], empty) self.assertRaises(TypeError, f) def testMessageNone(self): empty = [None, 0, "B"] Alltoallv(empty, empty) empty = [None, "B"] Alltoallv(empty, empty) def testMessageBottom(self): empty = [MPI.BOTTOM, 0, [0], "B"] Alltoallv(empty, empty) empty = [MPI.BOTTOM, 0, "B"] Alltoallv(empty, empty) empty = [MPI.BOTTOM, "B"] Alltoallv(empty, empty) @unittest.skipIf(pypy_lt_53, 'pypy(<5.3)') def testMessageBytes(self): sbuf = b"abc" rbuf = bytearray(3) Alltoallv([sbuf, "c"], [rbuf, MPI.CHAR]) self.assertEqual(sbuf, rbuf) @unittest.skipIf(pypy_lt_53, 'pypy(<5.3)') def testMessageBytearray(self): sbuf = bytearray(b"abc") rbuf = bytearray(3) Alltoallv([sbuf, "c"], [rbuf, MPI.CHAR]) self.assertEqual(sbuf, rbuf) @unittest.skipMPI('msmpi(<8.0.0)') class BaseTestMessageVectorArray(object): TYPECODES = "bhil"+"BHIL"+"fd" def array(self, typecode, initializer): raise NotImplementedError def check1(self, z, s, r, typecode): r[:] = z Alltoallv(s, r) for a, b in zip(s, r): self.assertEqual(a, b) def check2(self, z, s, r, typecode): datatype = typemap[typecode] for type in (None, typecode, datatype): r[:] = z Alltoallv([s, type], [r, type]) for a, b in zip(s, r): self.assertEqual(a, b) def check3(self, z, s, r, typecode): size = len(r) for count in range(size): r[:] = z Alltoallv([s, count], [r, count]) for i in range(count): self.assertEqual(r[i], s[i]) for i in range(count, size): self.assertEqual(r[i], z[0]) for count in range(size): r[:] = z Alltoallv([s, (count, None)], [r, (count, None)]) for i in range(count): self.assertEqual(r[i], s[i]) for i in range(count, size): self.assertEqual(r[i], z[0]) for disp in range(size): for count in range(size-disp): r[:] = z Alltoallv([s, ([count], [disp])], [r, ([count], [disp])]) for i in range(0, disp): self.assertEqual(r[i], z[0]) for i in range(disp, disp+count): self.assertEqual(r[i], s[i]) for i in range(disp+count, size): self.assertEqual(r[i], z[0]) def check4(self, z, s, r, typecode): datatype = typemap[typecode] for type in (None, typecode, datatype): for count in (None, len(s)): r[:] = z Alltoallv([s, count, type], [r, count, type]) for a, b in zip(s, r): self.assertEqual(a, b) def check5(self, z, s, r, typecode): datatype = typemap[typecode] for type in (None, typecode, datatype): for p in range(len(s)): r[:] = z Alltoallv([s, (p, None), type], [r, (p, None), type]) for a, b in zip(s[:p], r[:p]): self.assertEqual(a, b) for q in range(p, len(s)): count, displ = q-p, p r[:] = z Alltoallv([s, (count, [displ]), type], [r, (count, [displ]), type]) for a, b in zip(r[:p], z[:p]): self.assertEqual(a, b) for a, b in zip(r[p:q], s[p:q]): self.assertEqual(a, b) for a, b in zip(r[q:], z[q:]): self.assertEqual(a, b) def check6(self, z, s, r, typecode): datatype = typemap[typecode] for type in (None, typecode, datatype): for p in range(0, len(s)): r[:] = z Alltoallv([s, p, None, type], [r, p, None, type]) for a, b in zip(s[:p], r[:p]): self.assertEqual(a, b) for q in range(p, len(s)): count, displ = q-p, p r[:] = z Alltoallv([s, count, [displ], type], [r, count, [displ], type]) for a, b in zip(r[:p], z[:p]): self.assertEqual(a, b) for a, b in zip(r[p:q], s[p:q]): self.assertEqual(a, b) for a, b in zip(r[q:], z[q:]): self.assertEqual(a, b) def check(self, test): for t in tuple(self.TYPECODES): for n in range(1, 10): z = self.array(t, [0]*n) s = self.array(t, list(range(n))) r = self.array(t, [0]*n) test(z, s, r, t) def testArray1(self): self.check(self.check1) def testArray2(self): self.check(self.check2) def testArray3(self): self.check(self.check3) def testArray4(self): self.check(self.check4) def testArray5(self): self.check(self.check5) def testArray6(self): self.check(self.check6) @unittest.skipIf(array is None, 'array') class TestMessageVectorArray(unittest.TestCase, BaseTestMessageVectorArray): def array(self, typecode, initializer): return array.array(typecode, initializer) @unittest.skipIf(numpy is None, 'numpy') class TestMessageVectorNumPy(unittest.TestCase, BaseTestMessageVectorArray): def array(self, typecode, initializer): return numpy.array(initializer, dtype=typecode) @unittest.skipIf(array is None, 'array') class TestMessageVectorCAIBuf(unittest.TestCase, BaseTestMessageVectorArray): def array(self, typecode, initializer): return CAIBuf(typecode, initializer) @unittest.skipIf(cupy is None, 'cupy') class TestMessageVectorCuPy(unittest.TestCase, BaseTestMessageVectorArray): def array(self, typecode, initializer): return cupy.array(initializer, dtype=typecode) @unittest.skipIf(numba is None, 'numba') class TestMessageVectorNumba(unittest.TestCase, BaseTestMessageVectorArray): def array(self, typecode, initializer): n = len(initializer) arr = numba.cuda.device_array((n,), dtype=typecode) arr[:] = initializer return arr # --- def Alltoallw(smsg, rmsg): try: MPI.COMM_SELF.Alltoallw(smsg, rmsg) except NotImplementedError: if isinstance(smsg, (list, tuple)): smsg = smsg[0] if isinstance(rmsg, (list, tuple)): rmsg = rmsg[0] try: rmsg[:] = smsg except: pass class TestMessageVectorW(unittest.TestCase): def testMessageBad(self): sbuf = MPI.Alloc_mem(4) rbuf = MPI.Alloc_mem(4) def f(): Alltoallw([sbuf],[rbuf]) self.assertRaises(ValueError, f) def f(): Alltoallw([sbuf, [0], [0], [MPI.BYTE], None], [rbuf, [0], [0], [MPI.BYTE]]) self.assertRaises(ValueError, f) def f(): Alltoallw([sbuf, [0], [0], [MPI.BYTE]], [rbuf, [0], [0], [MPI.BYTE], None]) self.assertRaises(ValueError, f) MPI.Free_mem(sbuf) MPI.Free_mem(rbuf) @unittest.skipIf(pypy_lt_53, 'pypy(<5.3)') def testMessageBottom(self): sbuf = b"abcxyz" rbuf = bytearray(6) saddr = MPI.Get_address(sbuf) raddr = MPI.Get_address(rbuf) stype = MPI.Datatype.Create_struct([6], [saddr], [MPI.CHAR]).Commit() rtype = MPI.Datatype.Create_struct([6], [raddr], [MPI.CHAR]).Commit() smsg = [MPI.BOTTOM, [1], [0] , [stype]] rmsg = [MPI.BOTTOM, ([1], [0]), [rtype]] try: Alltoallw(smsg, rmsg) self.assertEqual(sbuf, rbuf) finally: stype.Free() rtype.Free() @unittest.skipIf(pypy_lt_53, 'pypy(<5.3)') def testMessageBytes(self): sbuf = b"abc" rbuf = bytearray(3) smsg = [sbuf, [3], [0], [MPI.CHAR]] rmsg = [rbuf, ([3], [0]), [MPI.CHAR]] Alltoallw(smsg, rmsg) self.assertEqual(sbuf, rbuf) @unittest.skipIf(pypy_lt_53, 'pypy(<5.3)') def testMessageBytearray(self): sbuf = bytearray(b"abc") rbuf = bytearray(3) smsg = [sbuf, [3], [0], [MPI.CHAR]] rmsg = [rbuf, ([3], [0]), [MPI.CHAR]] Alltoallw(smsg, rmsg) self.assertEqual(sbuf, rbuf) sbuf = bytearray(b"abc") rbuf = bytearray(3) smsg = [sbuf, None, None, [MPI.CHAR]] rmsg = [rbuf, [MPI.CHAR]] Alltoallw(smsg, rmsg) self.assertEqual(sbuf[0], rbuf[0]) self.assertEqual(bytearray(2), rbuf[1:]) @unittest.skipIf(array is None, 'array') def testMessageArray(self): sbuf = array.array('i', [1,2,3]) rbuf = array.array('i', [0,0,0]) smsg = [sbuf, [3], [0], [MPI.INT]] rmsg = [rbuf, ([3], [0]), [MPI.INT]] Alltoallw(smsg, rmsg) self.assertEqual(sbuf, rbuf) @unittest.skipIf(numpy is None, 'numpy') def testMessageNumPy(self): sbuf = numpy.array([1,2,3], dtype='i') rbuf = numpy.array([0,0,0], dtype='i') smsg = [sbuf, [3], [0], [MPI.INT]] rmsg = [rbuf, ([3], [0]), [MPI.INT]] Alltoallw(smsg, rmsg) self.assertTrue((sbuf == rbuf).all()) @unittest.skipIf(array is None, 'array') def testMessageCAIBuf(self): sbuf = CAIBuf('i', [1,2,3], readonly=True) rbuf = CAIBuf('i', [0,0,0], readonly=False) smsg = [sbuf, [3], [0], [MPI.INT]] rmsg = [rbuf, ([3], [0]), [MPI.INT]] Alltoallw(smsg, rmsg) self.assertEqual(sbuf, rbuf) @unittest.skipIf(cupy is None, 'cupy') def testMessageCuPy(self): sbuf = cupy.array([1,2,3], 'i') rbuf = cupy.array([0,0,0], 'i') smsg = [sbuf, [3], [0], [MPI.INT]] rmsg = [rbuf, ([3], [0]), [MPI.INT]] Alltoallw(smsg, rmsg) self.assertTrue((sbuf == rbuf).all()) @unittest.skipIf(numba is None, 'numba') def testMessageNumba(self): sbuf = numba.cuda.device_array((3,), 'i') sbuf[:] = [1,2,3] rbuf = numba.cuda.device_array((3,), 'i') rbuf[:] = [0,0,0] smsg = [sbuf, [3], [0], [MPI.INT]] rmsg = [rbuf, ([3], [0]), [MPI.INT]] Alltoallw(smsg, rmsg) # numba arrays do not have the .all() method for i in range(3): self.assertTrue(sbuf[i] == rbuf[i]) # --- def PutGet(smsg, rmsg, target=None): try: win = MPI.Win.Allocate(256, 1, MPI.INFO_NULL, MPI.COMM_SELF) except NotImplementedError: win = MPI.WIN_NULL try: try: win.Fence() except NotImplementedError: pass try: win.Put(smsg, 0, target) except NotImplementedError: pass try: win.Fence() except NotImplementedError: pass try: win.Get(rmsg, 0, target) except NotImplementedError: if isinstance(smsg, (list, tuple)): smsg = smsg[0] if isinstance(rmsg, (list, tuple)): rmsg = rmsg[0] try: rmsg[:] = smsg except: pass try: win.Fence() except NotImplementedError: pass finally: if win != MPI.WIN_NULL: win.Free() class TestMessageRMA(unittest.TestCase): def testMessageBad(self): sbuf = [None, 0, 0, "B", None] rbuf = [None, 0, 0, "B"] target = (0, 0, MPI.BYTE) def f(): PutGet(sbuf, rbuf, target) self.assertRaises(ValueError, f) sbuf = [None, 0, 0, "B"] rbuf = [None, 0, 0, "B", None] target = (0, 0, MPI.BYTE) def f(): PutGet(sbuf, rbuf, target) self.assertRaises(ValueError, f) sbuf = [None, 0, "B"] rbuf = [None, 0, "B"] target = (0, 0, MPI.BYTE, None) def f(): PutGet(sbuf, rbuf, target) self.assertRaises(ValueError, f) sbuf = [None, 0, "B"] rbuf = [None, 0, "B"] target = {1:2,3:4} def f(): PutGet(sbuf, rbuf, target) self.assertRaises(ValueError, f) def testMessageNone(self): for empty in ([None, 0, 0, MPI.BYTE], [None, 0, MPI.BYTE], [None, MPI.BYTE]): for target in (None, 0, [0, 0, MPI.BYTE]): PutGet(empty, empty, target) def testMessageBottom(self): for empty in ([MPI.BOTTOM, 0, 0, MPI.BYTE], [MPI.BOTTOM, 0, MPI.BYTE], [MPI.BOTTOM, MPI.BYTE]): for target in (None, 0, [0, 0, MPI.BYTE]): PutGet(empty, empty, target) @unittest.skipIf(pypy_lt_53, 'pypy(<5.3)') def testMessageBytes(self): for target in (None, 0, [0, 3, MPI.BYTE]): sbuf = b"abc" rbuf = bytearray(3) PutGet(sbuf, rbuf, target) self.assertEqual(sbuf, rbuf) @unittest.skipIf(pypy_lt_53, 'pypy(<5.3)') def testMessageBytearray(self): for target in (None, 0, [0, 3, MPI.BYTE]): sbuf = bytearray(b"abc") rbuf = bytearray(3) PutGet(sbuf, rbuf, target) self.assertEqual(sbuf, rbuf) @unittest.skipIf(py3, 'python3') @unittest.skipIf(pypy2, 'pypy2') @unittest.skipIf(hasattr(MPI, 'ffi'), 'mpi4py-cffi') def testMessageUnicode(self): # Test for Issue #120 sbuf = unicode("abc") rbuf = bytearray(len(buffer(sbuf))) PutGet([sbuf, MPI.BYTE], [rbuf, MPI.BYTE], None) @unittest.skipMPI('msmpi') @unittest.skipIf(array is None, 'array') def testMessageArray(self): sbuf = array.array('i', [1,2,3]) rbuf = array.array('i', [0,0,0]) PutGet(sbuf, rbuf) self.assertEqual(sbuf, rbuf) @unittest.skipMPI('msmpi') @unittest.skipIf(numpy is None, 'numpy') def testMessageNumPy(self): sbuf = numpy.array([1,2,3], dtype='i') rbuf = numpy.array([0,0,0], dtype='i') PutGet(sbuf, rbuf) self.assertTrue((sbuf == rbuf).all()) @unittest.skipMPI('msmpi') @unittest.skipIf(array is None, 'array') def testMessageCAIBuf(self): sbuf = CAIBuf('i', [1,2,3], readonly=True) rbuf = CAIBuf('i', [0,0,0], readonly=False) PutGet(sbuf, rbuf) self.assertEqual(sbuf, rbuf) @unittest.skipMPI('msmpi') @unittest.skipMPI('mvapich2') @unittest.skipIf(cupy is None, 'cupy') def testMessageCuPy(self): sbuf = cupy.array([1,2,3], 'i') rbuf = cupy.array([0,0,0], 'i') PutGet(sbuf, rbuf) self.assertTrue((sbuf == rbuf).all()) @unittest.skipMPI('msmpi') @unittest.skipMPI('mvapich2') @unittest.skipIf(numba is None, 'numba') def testMessageNumba(self): sbuf = numba.cuda.device_array((3,), 'i') sbuf[:] = [1,2,3] rbuf = numba.cuda.device_array((3,), 'i') rbuf[:] = [0,0,0] PutGet(sbuf, rbuf) # numba arrays do not have the .all() method for i in range(3): self.assertTrue(sbuf[i] == rbuf[i]) # --- if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_msgzero.py000066400000000000000000000034231460670727200170360ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest class BaseTestMessageZero(object): null_b = [None, MPI.INT] null_v = [None, (0, None), MPI.INT] def testPointToPoint(self): comm = self.COMM comm.Sendrecv(sendbuf=self.null_b, dest=comm.rank, recvbuf=self.null_b, source=comm.rank) r2 = comm.Irecv(self.null_b, comm.rank) r1 = comm.Isend(self.null_b, comm.rank) MPI.Request.Waitall([r1, r2]) def testCollectivesBlock(self): comm = self.COMM comm.Bcast(self.null_b) comm.Gather(self.null_b, self.null_b) comm.Scatter(self.null_b, self.null_b) comm.Allgather(self.null_b, self.null_b) comm.Alltoall(self.null_b, self.null_b) def testCollectivesVector(self): comm = self.COMM comm.Gatherv(self.null_b, self.null_v) comm.Scatterv(self.null_v, self.null_b) comm.Allgatherv(self.null_b, self.null_v) comm.Alltoallv(self.null_v, self.null_v) @unittest.skipMPI('openmpi') @unittest.skipMPI('SpectrumMPI') def testReductions(self): comm = self.COMM comm.Reduce(self.null_b, self.null_b) comm.Allreduce(self.null_b, self.null_b) comm.Reduce_scatter_block(self.null_b, self.null_b) rcnt = [0]*comm.Get_size() comm.Reduce_scatter(self.null_b, self.null_b, rcnt) try: comm.Scan(self.null_b, self.null_b) except NotImplementedError: pass try: comm.Exscan(self.null_b, self.null_b) except NotImplementedError: pass class TestMessageZeroSelf(BaseTestMessageZero, unittest.TestCase): COMM = MPI.COMM_SELF class TestMessageZeroWorld(BaseTestMessageZero, unittest.TestCase): COMM = MPI.COMM_WORLD if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_objmodel.py000066400000000000000000000077401460670727200171510ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import sys import weakref class TestObjModel(unittest.TestCase): objects = [ MPI.Status(), MPI.DATATYPE_NULL, MPI.REQUEST_NULL, MPI.INFO_NULL, MPI.ERRHANDLER_NULL, MPI.GROUP_NULL, MPI.WIN_NULL, MPI.OP_NULL, MPI.FILE_NULL, MPI.MESSAGE_NULL, MPI.COMM_NULL, ] def testEq(self): for i, obj1 in enumerate(self.objects): objects = self.objects[:] obj2 = objects[i] self.assertTrue(obj1 == obj2) self.assertFalse(obj1 != obj2) del objects[i] for obj2 in objects: self.assertTrue(obj1 != obj2) self.assertTrue(obj2 != obj1) self.assertFalse(obj1 == obj2) self.assertFalse(obj2 == obj1) self.assertFalse(None == obj1 ) self.assertFalse(obj1 == None ) self.assertFalse(obj1 == True ) self.assertFalse(obj1 == False) self.assertFalse(obj1 == 12345) self.assertFalse(obj1 == "abc") self.assertFalse(obj1 == [123]) self.assertFalse(obj1 == (1,2)) self.assertFalse(obj1 == {0:0}) self.assertFalse(obj1 == set()) def testNe(self): for i, obj1 in enumerate(self.objects): objects = self.objects[:] obj2 = objects[i] self.assertFalse(obj1 != obj2) del objects[i] for obj2 in objects: self.assertTrue (obj1 != obj2) self.assertTrue(None != obj1 ) self.assertTrue(obj1 != None ) self.assertTrue(obj1 != True ) self.assertTrue(obj1 != False) self.assertTrue(obj1 != 12345) self.assertTrue(obj1 != "abc") self.assertTrue(obj1 != [123]) self.assertTrue(obj1 != (1,2)) self.assertTrue(obj1 != {0:0}) self.assertTrue(obj1 != set()) def testBool(self): for obj in self.objects[1:]: self.assertFalse(not not obj) self.assertTrue(not obj) self.assertFalse(obj) def testHash(self): try: hash(MPI.COMM_NULL) except TypeError: pass else: if hasattr(sys, 'pypy_version_info'): self.skipTest('pypy') for obj in self.objects: ob_hash = lambda: hash(obj) self.assertRaises(TypeError, ob_hash) def testInit(self): for i, obj in enumerate(self.objects): klass = type(obj) new = klass() self.assertEqual(new, obj) new = klass(obj) self.assertEqual(new, obj) objects = self.objects[:] del objects[i] for other in objects: ob_init = lambda: klass(other) self.assertRaises(TypeError, ob_init) ob_init = lambda: klass(1234) self.assertRaises(TypeError, ob_init) ob_init = lambda: klass("abc") self.assertRaises(TypeError, ob_init) def testWeakRef(self): for obj in self.objects: wr = weakref.ref(obj) self.assertTrue(wr() is obj) self.assertTrue(wr in weakref.getweakrefs(obj)) wr = weakref.proxy(obj) self.assertTrue(wr in weakref.getweakrefs(obj)) def testSizeOf(self): for obj in self.objects: n1 = MPI._sizeof(obj) n2 = MPI._sizeof(type(obj)) self.assertEqual(n1, n2) def testAddressOf(self): for obj in self.objects: addr = MPI._addressof(obj) def testAHandleOf(self): for obj in self.objects: if isinstance(obj, MPI.Status): hdl = lambda: MPI._handleof(obj) self.assertRaises(NotImplementedError, hdl) continue hdl = MPI._handleof(obj) if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_op.py000066400000000000000000000166501460670727200157740ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import sys MPI_ERR_OP = MPI.ERR_OP try: import array except ImportError: array = None def asarray(typecode, data): try: memoryview _tobytes = lambda s: memoryview(s).tobytes() except NameError: _tobytes = lambda s: buffer(s)[:] try: _frombytes = array.array.frombytes except AttributeError: _frombytes = array.array.fromstring a = array.array(typecode, []) _frombytes(a, _tobytes(data)) return a def mysum_obj(a, b): for i in range(len(a)): b[i] = a[i] + b[i] return b def mysum_buf(a, b, dt): assert dt == MPI.INT assert len(a) == len(b) b[:] = mysum_obj(asarray('i', a), asarray('i', b)) def mysum(ba, bb, dt): if dt is None: return mysum_obj(ba, bb) else: return mysum_buf(ba, bb, dt) class TestOp(unittest.TestCase): def testConstructor(self): op = MPI.Op() self.assertFalse(op) self.assertEqual(op, MPI.OP_NULL) @unittest.skipIf(array is None, 'array') def testCreate(self): for comm in [MPI.COMM_SELF, MPI.COMM_WORLD]: for commute in [True, False]: for N in range(4): myop = MPI.Op.Create(mysum, commute) self.assertFalse(myop.is_predefined) if (hasattr(sys, 'pypy_version_info') and comm.size > 1): myop.Free() continue try: # buffer(empty_array) returns # the same non-NULL pointer !!! if N == 0: continue size = comm.Get_size() rank = comm.Get_rank() a = array.array('i', [i*(rank+1) for i in range(N)]) b = array.array('i', [0]*len(a)) comm.Allreduce([a, MPI.INT], [b, MPI.INT], myop) scale = sum(range(1,size+1)) for i in range(N): self.assertEqual(b[i], scale*i) ret = myop(a, b) self.assertTrue(ret is b) for i in range(N): self.assertEqual(b[i], a[i]+scale*i) myop2 = MPI.Op(myop) a = array.array('i', [1]*N) b = array.array('i', [2]*N) ret = myop2(a, b) self.assertTrue(ret is b) for i in range(N): self.assertEqual(b[i], 3) myop2 = None finally: myop.Free() def testCreateMany(self): N = 32 # max user-defined operations # ops = [] for i in range(N): o = MPI.Op.Create(mysum) ops.append(o) for o in ops: o.Free() # cleanup # another round ops = [] for i in range(N): o = MPI.Op.Create(mysum) ops.append(o) for o in ops: o.Free() # cleanup def _test_call(self, op, args, res): self.assertEqual(op(*args), res) def testCall(self): self._test_call(MPI.MIN, (2,3), 2) self._test_call(MPI.MAX, (2,3), 3) self._test_call(MPI.SUM, (2,3), 5) self._test_call(MPI.PROD, (2,3), 6) def xor(x,y): return bool(x) ^ bool(y) for x, y in ((0, 0), (0, 1), (1, 0), (1, 1)): self._test_call(MPI.LAND, (x,y), x and y) self._test_call(MPI.LOR, (x,y), x or y) self._test_call(MPI.LXOR, (x,y), xor(x, y)) self._test_call(MPI.BAND, (x,y), x & y) self._test_call(MPI.BOR, (x,y), x | y) self._test_call(MPI.BXOR, (x,y), x ^ y) if MPI.REPLACE: self._test_call(MPI.REPLACE, (2,3), 3) self._test_call(MPI.REPLACE, (3,2), 2) if MPI.NO_OP: self._test_call(MPI.NO_OP, (2,3), 2) self._test_call(MPI.NO_OP, (3,2), 3) def testMinMax(self): x = [1]; y = [1] res = MPI.MIN(x, y) self.assertTrue(res is x) res = MPI.MAX(x, y) self.assertTrue(res is x) def testMinMaxLoc(self): x = [1]; i = [2]; u = [x, i] y = [2]; j = [1]; v = [y, j] res = MPI.MINLOC(u, v) self.assertTrue(res[0] is x) self.assertTrue(res[1] is i) res = MPI.MINLOC(v, u) self.assertTrue(res[0] is x) self.assertTrue(res[1] is i) res = MPI.MAXLOC(u, v) self.assertTrue(res[0] is y) self.assertTrue(res[1] is j) res = MPI.MAXLOC(v, u) self.assertTrue(res[0] is y) self.assertTrue(res[1] is j) # x = [1]; i = 0; u = [x, i] y = [1]; j = 1; v = [y, j] res = MPI.MINLOC(u, v) self.assertTrue(res[0] is x) self.assertTrue(res[1] is i) res = MPI.MAXLOC(u, v) self.assertTrue(res[0] is x) self.assertTrue(res[1] is i) # x = [1]; i = 1; u = [x, i] y = [1]; j = 0; v = [y, j] res = MPI.MINLOC(u, v) self.assertTrue(res[0] is y) self.assertTrue(res[1] is j) res = MPI.MAXLOC(u, v) self.assertTrue(res[0] is y) self.assertTrue(res[1] is j) # x = [1]; i = [0]; u = [x, i] y = [1]; j = [1]; v = [y, j] res = MPI.MINLOC(u, v) self.assertTrue(res[0] is x) self.assertTrue(res[1] is i) res = MPI.MAXLOC(u, v) self.assertTrue(res[0] is x) self.assertTrue(res[1] is i) # x = [1]; i = [1]; u = [x, i] y = [1]; j = [0]; v = [y, j] res = MPI.MINLOC(u, v) self.assertTrue(res[0] is y) self.assertTrue(res[1] is j) res = MPI.MAXLOC(u, v) self.assertTrue(res[0] is y) self.assertTrue(res[1] is j) @unittest.skipMPI('openmpi(<=1.8.1)') def testIsCommutative(self): try: MPI.SUM.Is_commutative() except NotImplementedError: self.skipTest('mpi-op-is_commutative') ops = [MPI.MAX, MPI.MIN, MPI.SUM, MPI.PROD, MPI.LAND, MPI.BAND, MPI.LOR, MPI.BOR, MPI.LXOR, MPI.BXOR, MPI.MAXLOC, MPI.MINLOC,] for op in ops: flag = op.Is_commutative() self.assertEqual(flag, op.is_commutative) self.assertTrue(flag) @unittest.skipMPI('openmpi(<=1.8.1)') @unittest.skipMPI('mpich(==3.4.1)') def testIsCommutativeExtra(self): try: MPI.SUM.Is_commutative() except NotImplementedError: self.skipTest('mpi-op-is_commutative') ops = [MPI.REPLACE, MPI.NO_OP] for op in ops: if not op: continue flag = op.Is_commutative() self.assertEqual(flag, op.is_commutative) #self.assertFalse(flag) def testIsPredefined(self): self.assertTrue(MPI.OP_NULL.is_predefined) ops = [MPI.MAX, MPI.MIN, MPI.SUM, MPI.PROD, MPI.LAND, MPI.BAND, MPI.LOR, MPI.BOR, MPI.LXOR, MPI.BXOR, MPI.MAXLOC, MPI.MINLOC,] for op in ops: self.assertTrue(op.is_predefined) if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_p2p_buf.py000066400000000000000000000333721460670727200167130ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import arrayimpl class BaseTestP2PBuf(object): COMM = MPI.COMM_NULL def testSendrecv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() dest = (rank + 1) % size source = (rank - 1) % size for array, typecode in arrayimpl.subTest(self): for s in range(0, size): sbuf = array( s, typecode, s) rbuf = array(-1, typecode, s+1) self.COMM.Sendrecv(sbuf.as_mpi(), dest, 0, rbuf.as_mpi(), source, 0) for value in rbuf[:-1]: self.assertEqual(value, s) self.assertEqual(rbuf[-1], -1) def testSendrecvReplace(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() dest = (rank + 1) % size source = (rank - 1) % size for array, typecode in arrayimpl.subTest(self): for s in range(0, size): buf = array(rank, typecode, s); self.COMM.Sendrecv_replace(buf.as_mpi(), dest, 0, source, 0) for value in buf: self.assertEqual(value, source) def testSendRecv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for array, typecode in arrayimpl.subTest(self): if unittest.is_mpi_gpu('openmpi', array): continue if unittest.is_mpi_gpu('mvapich2', array): continue for s in range(0, size): # sbuf = array( s, typecode, s) rbuf = array(-1, typecode, s) mem = array( 0, typecode, 2*(s+MPI.BSEND_OVERHEAD)).as_raw() if size == 1: MPI.Attach_buffer(mem) rbuf = sbuf MPI.Detach_buffer() elif rank == 0: MPI.Attach_buffer(mem) self.COMM.Ibsend(sbuf.as_mpi(), 1, 0).Wait() self.COMM.Bsend(sbuf.as_mpi(), 1, 0) MPI.Detach_buffer() self.COMM.Send(sbuf.as_mpi(), 1, 0) self.COMM.Ssend(sbuf.as_mpi(), 1, 0) self.COMM.Recv(rbuf.as_mpi(), 1, 0) self.COMM.Recv(rbuf.as_mpi(), 1, 0) self.COMM.Recv(rbuf.as_mpi(), 1, 0) self.COMM.Recv(rbuf.as_mpi(), 1, 0) elif rank == 1: self.COMM.Recv(rbuf.as_mpi(), 0, 0) self.COMM.Recv(rbuf.as_mpi(), 0, 0) self.COMM.Recv(rbuf.as_mpi(), 0, 0) self.COMM.Recv(rbuf.as_mpi(), 0, 0) MPI.Attach_buffer(mem) self.COMM.Ibsend(sbuf.as_mpi(), 0, 0).Wait() self.COMM.Bsend(sbuf.as_mpi(), 0, 0) MPI.Detach_buffer() self.COMM.Send(sbuf.as_mpi(), 0, 0) self.COMM.Ssend(sbuf.as_mpi(), 0, 0) else: rbuf = sbuf for value in rbuf: self.assertEqual(value, s) # rank = self.COMM.Get_rank() sbuf = array( s, typecode, s) rbuf = array(-1, typecode, s) rreq = self.COMM.Irecv(rbuf.as_mpi(), rank, 0) self.COMM.Rsend(sbuf.as_mpi(), rank, 0) rreq.Wait() for value in rbuf: self.assertEqual(value, s) rbuf = array(-1, typecode, s) rreq = self.COMM.Irecv(rbuf.as_mpi(), rank, 0) self.COMM.Irsend(sbuf.as_mpi(), rank, 0).Wait() rreq.Wait() for value in rbuf: self.assertEqual(value, s) def testProcNull(self): comm = self.COMM # comm.Sendrecv(None, MPI.PROC_NULL, 0, None, MPI.PROC_NULL, 0) comm.Sendrecv_replace(None, MPI.PROC_NULL, 0, MPI.PROC_NULL, 0) # comm.Send (None, MPI.PROC_NULL) comm.Isend (None, MPI.PROC_NULL).Wait() # comm.Ssend(None, MPI.PROC_NULL) comm.Issend(None, MPI.PROC_NULL).Wait() # buf = MPI.Alloc_mem(MPI.BSEND_OVERHEAD) MPI.Attach_buffer(buf) comm.Bsend(None, MPI.PROC_NULL) comm.Ibsend(None, MPI.PROC_NULL).Wait() MPI.Detach_buffer() MPI.Free_mem(buf) # comm.Rsend(None, MPI.PROC_NULL) comm.Irsend(None, MPI.PROC_NULL).Wait() # comm.Recv (None, MPI.PROC_NULL) comm.Irecv(None, MPI.PROC_NULL).Wait() @unittest.skipMPI('mpich(==3.4.1)') def testProcNullPersistent(self): comm = self.COMM # req = comm.Send_init(None, MPI.PROC_NULL) req.Start(); req.Wait(); req.Free() # req = comm.Ssend_init(None, MPI.PROC_NULL) req.Start(); req.Wait(); req.Free() # buf = MPI.Alloc_mem(MPI.BSEND_OVERHEAD) MPI.Attach_buffer(buf) req = comm.Bsend_init(None, MPI.PROC_NULL) req.Start(); req.Wait(); req.Free() MPI.Detach_buffer() MPI.Free_mem(buf) # req = comm.Rsend_init(None, MPI.PROC_NULL) req.Start(); req.Wait(); req.Free() # req = comm.Recv_init(None, MPI.PROC_NULL) req.Start(); req.Wait(); req.Free() def testPersistent(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() dest = (rank + 1) % size source = (rank - 1) % size for array, typecode in arrayimpl.subTest(self): if unittest.is_mpi_gpu('openmpi', array): continue if unittest.is_mpi_gpu('mvapich2', array): continue for s in range(size): for xs in range(3): # sbuf = array( s, typecode, s) rbuf = array(-1, typecode, s+xs) sendreq = self.COMM.Send_init(sbuf.as_mpi(), dest, 0) recvreq = self.COMM.Recv_init(rbuf.as_mpi(), source, 0) sendreq.Start() recvreq.Start() sendreq.Wait() recvreq.Wait() self.assertNotEqual(sendreq, MPI.REQUEST_NULL) self.assertNotEqual(recvreq, MPI.REQUEST_NULL) sendreq.Free() recvreq.Free() self.assertEqual(sendreq, MPI.REQUEST_NULL) self.assertEqual(recvreq, MPI.REQUEST_NULL) for value in rbuf[:s]: self.assertEqual(value, s) for value in rbuf[s:]: self.assertEqual(value, -1) # sbuf = array(s, typecode, s) rbuf = array(-1, typecode, s+xs) sendreq = self.COMM.Send_init(sbuf.as_mpi(), dest, 0) recvreq = self.COMM.Recv_init(rbuf.as_mpi(), source, 0) reqlist = [sendreq, recvreq] MPI.Prequest.Startall(reqlist) index1 = MPI.Prequest.Waitany(reqlist) self.assertTrue(index1 in [0, 1]) self.assertNotEqual(reqlist[index1], MPI.REQUEST_NULL) index2 = MPI.Prequest.Waitany(reqlist) self.assertTrue(index2 in [0, 1]) self.assertNotEqual(reqlist[index2], MPI.REQUEST_NULL) self.assertTrue(index1 != index2) index3 = MPI.Prequest.Waitany(reqlist) self.assertEqual(index3, MPI.UNDEFINED) for preq in reqlist: self.assertNotEqual(preq, MPI.REQUEST_NULL) preq.Free() self.assertEqual(preq, MPI.REQUEST_NULL) for value in rbuf[:s]: self.assertEqual(value, s) for value in rbuf[s:]: self.assertEqual(value, -1) # sbuf = array( s, typecode, s) rbuf = array(-1, typecode, s+xs) sendreq = self.COMM.Ssend_init(sbuf.as_mpi(), dest, 0) recvreq = self.COMM.Recv_init(rbuf.as_mpi(), source, 0) sendreq.Start() recvreq.Start() sendreq.Wait() recvreq.Wait() self.assertNotEqual(sendreq, MPI.REQUEST_NULL) self.assertNotEqual(recvreq, MPI.REQUEST_NULL) sendreq.Free() recvreq.Free() self.assertEqual(sendreq, MPI.REQUEST_NULL) self.assertEqual(recvreq, MPI.REQUEST_NULL) for value in rbuf[:s]: self.assertEqual(value, s) for value in rbuf[s:]: self.assertEqual(value, -1) # mem = array( 0, typecode, s+MPI.BSEND_OVERHEAD).as_raw() sbuf = array( s, typecode, s) rbuf = array(-1, typecode, s+xs) MPI.Attach_buffer(mem) sendreq = self.COMM.Bsend_init(sbuf.as_mpi(), dest, 0) recvreq = self.COMM.Recv_init(rbuf.as_mpi(), source, 0) sendreq.Start() recvreq.Start() sendreq.Wait() recvreq.Wait() MPI.Detach_buffer() self.assertNotEqual(sendreq, MPI.REQUEST_NULL) self.assertNotEqual(recvreq, MPI.REQUEST_NULL) sendreq.Free() recvreq.Free() self.assertEqual(sendreq, MPI.REQUEST_NULL) self.assertEqual(recvreq, MPI.REQUEST_NULL) for value in rbuf[:s]: self.assertEqual(value, s) for value in rbuf[s:]: self.assertEqual(value, -1) # rank = self.COMM.Get_rank() sbuf = array( s, typecode, s) rbuf = array(-1, typecode, s+xs) recvreq = self.COMM.Recv_init (rbuf.as_mpi(), rank, 0) sendreq = self.COMM.Rsend_init(sbuf.as_mpi(), rank, 0) recvreq.Start() sendreq.Start() recvreq.Wait() sendreq.Wait() self.assertNotEqual(sendreq, MPI.REQUEST_NULL) self.assertNotEqual(recvreq, MPI.REQUEST_NULL) sendreq.Free() recvreq.Free() self.assertEqual(sendreq, MPI.REQUEST_NULL) self.assertEqual(recvreq, MPI.REQUEST_NULL) for value in rbuf[:s]: self.assertEqual(value, s) for value in rbuf[s:]: self.assertEqual(value, -1) def testProbe(self): comm = self.COMM.Dup() try: request = comm.Issend([None, 0, MPI.BYTE], comm.rank, 123) self.assertTrue(request) status = MPI.Status() comm.Probe(MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) self.assertTrue(request) flag = request.Test() self.assertTrue(request) self.assertFalse(flag) comm.Recv([None, 0, MPI.BYTE], comm.rank, 123) self.assertTrue(request) flag = False while not flag: flag = request.Test() self.assertFalse(request) self.assertTrue(flag) finally: comm.Free() @unittest.skipMPI('MPICH1') @unittest.skipMPI('LAM/MPI') def testProbeCancel(self): comm = self.COMM.Dup() try: request = comm.Issend([None, 0, MPI.BYTE], comm.rank, 123) status = MPI.Status() comm.Probe(MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) request.Cancel() self.assertTrue(request) status = MPI.Status() request.Get_status(status) cancelled = status.Is_cancelled() if not cancelled: comm.Recv([None, 0, MPI.BYTE], comm.rank, 123) request.Wait() else: request.Free() finally: comm.Free() def testIProbe(self): comm = self.COMM.Dup() try: f = comm.Iprobe() self.assertFalse(f) f = comm.Iprobe(MPI.ANY_SOURCE) self.assertFalse(f) f = comm.Iprobe(MPI.ANY_SOURCE, MPI.ANY_TAG) self.assertFalse(f) status = MPI.Status() f = comm.Iprobe(MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertFalse(f) self.assertEqual(status.source, MPI.ANY_SOURCE) self.assertEqual(status.tag, MPI.ANY_TAG) self.assertEqual(status.error, MPI.SUCCESS) finally: comm.Free() class TestP2PBufSelf(BaseTestP2PBuf, unittest.TestCase): COMM = MPI.COMM_SELF class TestP2PBufWorld(BaseTestP2PBuf, unittest.TestCase): COMM = MPI.COMM_WORLD class TestP2PBufSelfDup(TestP2PBufSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() @unittest.skipMPI('openmpi(<1.4.0)', MPI.Query_thread() > MPI.THREAD_SINGLE) class TestP2PBufWorldDup(TestP2PBufWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_p2p_buf_matched.py000066400000000000000000000150021460670727200203660ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import arrayimpl @unittest.skipIf(MPI.MESSAGE_NULL == MPI.MESSAGE_NO_PROC, 'mpi-message') class TestMessage(unittest.TestCase): def testMessageNull(self): null = MPI.MESSAGE_NULL self.assertFalse(null) null2 = MPI.Message() self.assertEqual(null, null2) null3 = MPI.Message(null) self.assertEqual(null, null3) def testMessageNoProc(self): # noproc = MPI.MESSAGE_NO_PROC self.assertTrue(noproc) noproc.Recv(None) self.assertTrue(noproc) noproc.Irecv(None).Wait() self.assertTrue(noproc) # noproc2 = MPI.Message(MPI.MESSAGE_NO_PROC) self.assertTrue(noproc2) self.assertEqual(noproc2, noproc) self.assertNotEqual(noproc, MPI.MESSAGE_NULL) # message = MPI.Message(MPI.MESSAGE_NO_PROC) message.Recv(None) self.assertEqual(message, MPI.MESSAGE_NULL) # message = MPI.Message(MPI.MESSAGE_NO_PROC) request = message.Irecv(None) self.assertEqual(message, MPI.MESSAGE_NULL) self.assertNotEqual(request, MPI.REQUEST_NULL) request.Wait() self.assertEqual(request, MPI.REQUEST_NULL) @unittest.skipIf(MPI.MESSAGE_NULL == MPI.MESSAGE_NO_PROC, 'mpi-message') class BaseTestP2PMatched(object): COMM = MPI.COMM_NULL def testIMProbe(self): comm = self.COMM.Dup() try: m = comm.Improbe() self.assertEqual(m, None) m = comm.Improbe(MPI.ANY_SOURCE) self.assertEqual(m, None) m = comm.Improbe(MPI.ANY_SOURCE, MPI.ANY_TAG) self.assertEqual(m, None) status = MPI.Status() m = comm.Improbe(MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertEqual(m, None) self.assertEqual(status.source, MPI.ANY_SOURCE) self.assertEqual(status.tag, MPI.ANY_TAG) self.assertEqual(status.error, MPI.SUCCESS) m = MPI.Message.Iprobe(comm) self.assertEqual(m, None) buf = [None, 0, MPI.BYTE] s = comm.Isend(buf, comm.rank, 0) r = comm.Mprobe(comm.rank, 0).Irecv(buf) MPI.Request.Waitall([s,r]) finally: comm.Free() def testProbeRecv(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() for array, typecode in arrayimpl.subTest(self): for s in range(0, size+1): sbuf = array( s, typecode, s) rbuf = array(-1, typecode, s) if size == 1: n = comm.Improbe(0, 0) self.assertEqual(n, None) sr = comm.Isend(sbuf.as_mpi(), 0, 0) m = comm.Mprobe(0, 0) self.assertTrue(isinstance(m, MPI.Message)) self.assertTrue(m) rr = m.Irecv(rbuf.as_raw()) self.assertFalse(m) self.assertTrue(sr) self.assertTrue(rr) MPI.Request.Waitall([sr,rr]) self.assertFalse(sr) self.assertFalse(rr) # n = comm.Improbe(0, 0) self.assertEqual(n, None) r = comm.Isend(sbuf.as_mpi(), 0, 0) m = MPI.Message.Probe(comm, 0, 0) self.assertTrue(isinstance(m, MPI.Message)) self.assertTrue(m) m.Recv(rbuf.as_raw()) self.assertFalse(m) r.Wait() # n = MPI.Message.Iprobe(comm, 0, 0) self.assertEqual(n, None) r = comm.Isend(sbuf.as_mpi(), 0, 0) comm.Probe(0, 0) m = MPI.Message.Iprobe(comm, 0, 0) self.assertTrue(isinstance(m, MPI.Message)) self.assertTrue(m) m.Recv(rbuf.as_raw()) self.assertFalse(m) r.Wait() # n = MPI.Message.Iprobe(comm, 0, 0) self.assertEqual(n, None) r = comm.Isend(sbuf.as_mpi(), 0, 0) m = comm.Mprobe(0, 0) self.assertTrue(isinstance(m, MPI.Message)) self.assertTrue(m) m.Recv(rbuf.as_raw()) self.assertFalse(m) r.Wait() elif rank == 0: n = comm.Improbe(0, 0) self.assertEqual(n, None) # comm.Send(sbuf.as_mpi(), 1, 0) m = comm.Mprobe(1, 0) self.assertTrue(m) m.Recv(rbuf.as_raw()) self.assertFalse(m) # n = comm.Improbe(0, 0) self.assertEqual(n, None) comm.Send(sbuf.as_mpi(), 1, 1) m = None while not m: m = comm.Improbe(1, 1) m.Irecv(rbuf.as_raw()).Wait() elif rank == 1: n = comm.Improbe(1, 0) self.assertEqual(n, None) # m = comm.Mprobe(0, 0) self.assertTrue(m) m.Recv(rbuf.as_raw()) self.assertFalse(m) # n = comm.Improbe(1, 0) self.assertEqual(n, None) comm.Send(sbuf.as_mpi(), 0, 0) m = None while not m: m = comm.Improbe(0, 1) m.Irecv(rbuf.as_mpi()).Wait() comm.Send(sbuf.as_mpi(), 0, 1) else: rbuf = sbuf for value in rbuf: self.assertEqual(value, s) class TestP2PMatchedSelf(BaseTestP2PMatched, unittest.TestCase): COMM = MPI.COMM_SELF class TestP2PMatchedWorld(BaseTestP2PMatched, unittest.TestCase): COMM = MPI.COMM_WORLD class TestP2PMatchedSelfDup(TestP2PMatchedSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() class TestP2PMatchedWorldDup(TestP2PMatchedWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_p2p_obj.py000066400000000000000000000644531460670727200167150ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import sys pypy_lt_53 = (hasattr(sys, 'pypy_version_info') and sys.pypy_version_info < (5, 3)) def allocate(n): if pypy_lt_53: try: import array return array.array('B', [0]) * n except ImportError: return None return bytearray(n) _basic = [None, True, False, -7, 0, 7, -2**63+1, 2**63-1, -2.17, 0.0, 3.14, 1+2j, 2-3j, 'mpi4py', ] messages = list(_basic) messages += [ list(_basic), tuple(_basic), set(_basic), frozenset(_basic), dict([('k%d' % key, val) for key, val in enumerate(_basic)]) ] class BaseTestP2PObj(object): COMM = MPI.COMM_NULL def testSendAndRecv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages: self.COMM.send(smess, MPI.PROC_NULL) rmess = self.COMM.recv(None, MPI.PROC_NULL, 0) self.assertEqual(rmess, None) if size == 1: return for smess in messages: if rank == 0: self.COMM.send(smess, rank+1, 0) rmess = smess elif rank == size - 1: rmess = self.COMM.recv(None, rank-1, 0) else: rmess = self.COMM.recv(None, rank-1, 0) self.COMM.send(rmess, rank+1, 0) self.assertEqual(rmess, smess) def testISendAndRecv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() buf = None for smess in messages: req = self.COMM.isend(smess, MPI.PROC_NULL) self.assertTrue(req) req.Wait() self.assertFalse(req) rmess = self.COMM.recv(buf, MPI.PROC_NULL, 0) self.assertEqual(rmess, None) for smess in messages: req = self.COMM.isend(smess, rank, 0) self.assertTrue(req) rmess = self.COMM.recv(buf, rank, 0) self.assertTrue(req) flag = False while not flag: flag = req.Test() self.assertTrue(flag) self.assertFalse(req) self.assertEqual(rmess, smess) for smess in messages: dst = (rank+1)%size src = (rank-1)%size req = self.COMM.isend(smess, dst, 0) self.assertTrue(req) rmess = self.COMM.recv(buf, src, 0) req.Wait() self.assertFalse(req) self.assertEqual(rmess, smess) def testIRecvAndSend(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() for smess in messages: req = comm.irecv(0, MPI.PROC_NULL) self.assertTrue(req) comm.send(smess, MPI.PROC_NULL) rmess = req.wait() self.assertFalse(req) self.assertEqual(rmess, None) for smess in messages: buf = allocate(512) req = comm.irecv(buf, rank, 0) self.assertTrue(req) flag, rmess = req.test() self.assertTrue(req) self.assertFalse(flag) self.assertEqual(rmess, None) comm.send(smess, rank, 0) self.assertTrue(req) flag, rmess = req.test() while not flag: flag, rmess = req.test() self.assertTrue(flag) self.assertFalse(req) self.assertEqual(rmess, smess) tmp = allocate(1024) for buf in (None, 1024, tmp): for smess in messages + [messages]: dst = (rank+1)%size src = (rank-1)%size req = comm.irecv(buf, src, 0) self.assertTrue(req) comm.send(smess, dst, 0) rmess = req.wait() self.assertFalse(req) self.assertEqual(rmess, smess) for smess in messages: src = dst = rank rreq1 = comm.irecv(None, src, 1) rreq2 = comm.irecv(None, src, 2) rreq3 = comm.irecv(None, src, 3) rreqs = [rreq1, rreq2, rreq3] for i in range(len(rreqs)): self.assertTrue(rreqs[i]) comm.send(smess, dst, i+1) index, obj = MPI.Request.waitany(rreqs) self.assertEqual(index, i) self.assertEqual(obj, smess) self.assertFalse(rreqs[index]) index, obj = MPI.Request.waitany(rreqs) self.assertEqual(index, MPI.UNDEFINED) self.assertEqual(obj, None) for smess in messages: src = dst = rank rreq1 = comm.irecv(None, src, 1) rreq2 = comm.irecv(None, src, 2) rreq3 = comm.irecv(None, src, 3) rreqs = [rreq1, rreq2, rreq3] index, flag, obj = MPI.Request.testany(rreqs) self.assertEqual(index, MPI.UNDEFINED) self.assertEqual(flag, False) self.assertEqual(obj, None) for i in range(len(rreqs)): self.assertTrue(rreqs[i]) comm.send(smess, dst, i+1) index, flag, obj = MPI.Request.testany(rreqs) while not flag: index, flag, obj = MPI.Request.testany(rreqs) self.assertEqual(index, i) self.assertEqual(flag, True) self.assertEqual(obj, smess) self.assertFalse(rreqs[i]) index, flag, obj = MPI.Request.testany(rreqs) self.assertEqual(index, MPI.UNDEFINED) self.assertEqual(flag, True) self.assertEqual(obj, None) def testIRecvAndISend(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() tmp = allocate(512) for smess in messages: dst = (rank+1)%size src = (rank-1)%size rreq = comm.irecv(None, src, 0) self.assertTrue(rreq) sreq = comm.isend(smess, dst, 0) self.assertTrue(sreq) index1, mess1 = MPI.Request.waitany([sreq,rreq]) self.assertTrue(index1 in (0, 1)) if index1 == 0: self.assertFalse(sreq) self.assertTrue (rreq) self.assertEqual(mess1, None) else: self.assertTrue (sreq) self.assertFalse(rreq) self.assertEqual(mess1, smess) index2, mess2 = MPI.Request.waitany([sreq,rreq]) self.assertTrue(index2 in (0, 1)) self.assertNotEqual(index2, index1) self.assertFalse(sreq) self.assertFalse(rreq) if index2 == 0: self.assertEqual(mess2, None) else: self.assertEqual(mess2, smess) for smess in messages: dst = (rank+1)%size src = (rank-1)%size rreq = comm.irecv(None, src, 0) self.assertTrue(rreq) sreq = comm.isend(smess, dst, 0) self.assertTrue(sreq) index1, flag1, mess1 = MPI.Request.testany([sreq,rreq]) while not flag1: index1, flag1, mess1 = MPI.Request.testany([sreq,rreq]) self.assertTrue(index1 in (0, 1)) if index1 == 0: self.assertFalse(sreq) self.assertTrue (rreq) self.assertEqual(mess1, None) else: self.assertTrue (sreq) self.assertFalse(rreq) self.assertEqual(mess1, smess) index2, flag2, mess2 = MPI.Request.testany([sreq,rreq]) while not flag2: index2, flag2, mess2 = MPI.Request.testany([sreq,rreq]) self.assertTrue(index2 in (0, 1)) self.assertNotEqual(index2, index1) self.assertFalse(sreq) self.assertFalse(rreq) if index2 == 0: self.assertEqual(mess2, None) else: self.assertEqual(mess2, smess) for buf in (None, 512, tmp): for smess in messages: dst = (rank+1)%size src = (rank-1)%size rreq = comm.irecv(buf, src, 0) self.assertTrue(rreq) sreq = comm.isend(smess, dst, 0) self.assertTrue(sreq) dummy, rmess = MPI.Request.waitall([sreq,rreq], []) self.assertFalse(sreq) self.assertFalse(rreq) self.assertEqual(dummy, None) self.assertEqual(rmess, smess) for buf in (None, 512, tmp): for smess in messages: src = dst = rank rreq = comm.irecv(buf, src, 1) flag, msg = MPI.Request.testall([rreq]) self.assertEqual(flag, False) self.assertEqual(msg, None) sreq = comm.isend(smess, dst, 1) while True: flag, msg = MPI.Request.testall([sreq,rreq], []) if not flag: self.assertEqual(msg, None) continue (dummy, rmess) = msg self.assertEqual(dummy, None) self.assertEqual(rmess, smess) break def testManyISendAndRecv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages: reqs = [] for k in range(6): r = self.COMM.isend(smess, rank, 0) reqs.append(r) flag = MPI.Request.Testall(reqs) if not flag: index, flag = MPI.Request.Testany(reqs) indices = MPI.Request.Testsome(reqs) if indices is None: count = MPI.UNDEFINED indices = [] else: count = len(indices) self.assertTrue(count in [0, MPI.UNDEFINED]) for k in range(3): rmess = self.COMM.recv(None, rank, 0) self.assertEqual(rmess, smess) flag = MPI.Request.Testall(reqs) if not flag: index, flag = MPI.Request.Testany(reqs) self.assertEqual(index, 0) self.assertTrue(flag) indices = MPI.Request.Testsome(reqs) if indices is None: count = MPI.UNDEFINED indices = [] else: count = len(indices) self.assertTrue(count >= 2) indices = list(indices) indices.sort() self.assertTrue(indices[:2] == [1, 2]) for k in range(3): rmess = self.COMM.recv(None, rank, 0) self.assertEqual(rmess, smess) flag = MPI.Request.Testall(reqs) self.assertTrue(flag) for smess in messages: reqs = [] for k in range(6): r = self.COMM.isend(smess, rank, 0) reqs.append(r) for k in range(3): rmess = self.COMM.recv(None, rank, 0) self.assertEqual(rmess, smess) index = MPI.Request.Waitany(reqs) self.assertTrue(index == 0) self.assertTrue(flag) indices1 = MPI.Request.Waitsome(reqs) if indices1 is None: count1 = MPI.UNDEFINED indices1 = [] else: count1 = len(indices1) for k in range(3): rmess = self.COMM.recv(None, rank, 0) self.assertEqual(rmess, smess) indices2 = MPI.Request.Waitsome(reqs) if indices2 is None: count2 = MPI.UNDEFINED indices2 = [] else: count2 = len(indices2) if count1 == MPI.UNDEFINED: count1 = 0 if count2 == MPI.UNDEFINED: count2 = 0 self.assertEqual(6, 1+count1+count2) indices = [0]+list(indices1)+list(indices2) indices.sort() self.assertEqual(indices, list(range(6))) MPI.Request.Waitall(reqs) def testSSendAndRecv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages: self.COMM.ssend(smess, MPI.PROC_NULL) rmess = self.COMM.recv(None, MPI.PROC_NULL, 0) self.assertEqual(rmess, None) if size == 1: return for smess in messages: if rank == 0: self.COMM.ssend(smess, rank+1, 0) rmess = smess elif rank == size - 1: rmess = self.COMM.recv(None, rank-1, 0) else: rmess = self.COMM.recv(None, rank-1, 0) self.COMM.ssend(rmess, rank+1, 0) self.assertEqual(rmess, smess) def testISSendAndRecv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages: req = self.COMM.issend(smess, MPI.PROC_NULL) self.assertTrue(req) req.Wait() self.assertFalse(req) rmess = self.COMM.recv(None, MPI.PROC_NULL, 0) self.assertEqual(rmess, None) for smess in messages: req = self.COMM.issend(smess, rank, 0) self.assertTrue(req) flag = req.Test() self.assertFalse(flag) self.assertTrue(req) rmess = self.COMM.recv(None, rank, 0) self.assertTrue(req) flag = False while not flag: flag = req.Test() self.assertTrue(flag) self.assertFalse(req) self.assertEqual(rmess, smess) for smess in messages: dst = (rank+1)%size src = (rank-1)%size req = self.COMM.issend(smess, dst, 0) self.assertTrue(req) rmess = self.COMM.recv(None, src, 0) req.Wait() self.assertFalse(req) self.assertEqual(rmess, smess) def testCancel(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() status = MPI.Status() for smess in messages: req = self.COMM.issend(smess, rank) self.assertTrue(req) req.cancel() flag = req.get_status(status) cancelled = status.Is_cancelled() self.assertTrue(req) if cancelled: self.assertTrue(flag) req.Free() self.assertFalse(req) else: self.assertFalse(flag) rmess = self.COMM.recv(None, rank, 0) flag = req.get_status() self.assertTrue(flag) flag, _ = req.test() self.assertTrue(flag) self.assertEqual(rmess, smess) def testIRecvAndBSend(self): comm = self.COMM rank = comm.Get_rank() buf = MPI.Alloc_mem((1<<16)+MPI.BSEND_OVERHEAD) MPI.Attach_buffer(buf) try: for smess in messages: src = dst = rank req1 = comm.irecv(None, src, 1) req2 = comm.irecv(None, src, 2) req3 = comm.irecv(None, src, 3) comm.bsend(smess, dst, 3) comm.bsend(smess, dst, 2) comm.bsend(smess, dst, 1) self.assertEqual(smess, req3.wait()) self.assertEqual(smess, req2.wait()) self.assertEqual(smess, req1.wait()) comm.bsend(smess, MPI.PROC_NULL, 3) finally: MPI.Detach_buffer() MPI.Free_mem(buf) def testIRecvAndIBSend(self): comm = self.COMM rank = comm.Get_rank() buf = MPI.Alloc_mem((1<<16)+MPI.BSEND_OVERHEAD) MPI.Attach_buffer(buf) try: for smess in messages: src = dst = rank req1 = comm.irecv(None, src, 1) req2 = comm.irecv(None, src, 2) req3 = comm.irecv(None, src, 3) req4 = comm.ibsend(smess, dst, 3) req5 = comm.ibsend(smess, dst, 2) req6 = comm.ibsend(smess, dst, 1) MPI.Request.waitall([req4, req5, req6]) self.assertEqual(smess, req3.wait()) self.assertEqual(smess, req2.wait()) self.assertEqual(smess, req1.wait()) comm.ibsend(smess, MPI.PROC_NULL, 3).wait() finally: MPI.Detach_buffer() MPI.Free_mem(buf) def testIRecvAndSSend(self): comm = self.COMM rank = comm.Get_rank() for smess in messages: src = dst = rank req1 = comm.irecv(None, src, 1) req2 = comm.irecv(None, src, 2) req3 = comm.irecv(None, src, 3) comm.ssend(smess, dst, 3) comm.ssend(smess, dst, 2) comm.ssend(smess, dst, 1) self.assertEqual(smess, req3.wait()) self.assertEqual(smess, req2.wait()) self.assertEqual(smess, req1.wait()) comm.ssend(smess, MPI.PROC_NULL, 3) def testIRecvAndISSend(self): comm = self.COMM rank = comm.Get_rank() for smess in messages: src = dst = rank req1 = comm.irecv(None, src, 1) req2 = comm.irecv(None, src, 2) req3 = comm.irecv(None, src, 3) req4 = comm.issend(smess, dst, 3) req5 = comm.issend(smess, dst, 2) req6 = comm.issend(smess, dst, 1) MPI.Request.waitall([req4, req5, req6]) self.assertEqual(smess, req3.wait()) self.assertEqual(smess, req2.wait()) self.assertEqual(smess, req1.wait()) comm.issend(smess, MPI.PROC_NULL, 3).wait() def testSendrecv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages: dest = (rank + 1) % size source = (rank - 1) % size rmess = self.COMM.sendrecv(smess, dest, 0, None, source, 0) continue self.assertEqual(rmess, smess) rmess = self.COMM.sendrecv(None, dest, 0, None, source, 0) self.assertEqual(rmess, None) rmess = self.COMM.sendrecv(smess, MPI.PROC_NULL, 0, None, MPI.PROC_NULL, 0) self.assertEqual(rmess, None) def testMixed(self): comm = self.COMM rank = comm.Get_rank() # sreq = comm.Isend([None, 0, 'B'], rank) obj = comm.recv(None, rank) sreq.Wait() self.assertTrue(obj is None) for smess in messages: buf = MPI.pickle.dumps(smess) sreq = comm.Isend([buf, 'B'], rank) rmess = comm.recv(None, rank) sreq.Wait() self.assertTrue(rmess == smess) # sreq = comm.Isend([None, 0, 'B'], rank) rreq = comm.irecv(None, rank) sreq.Wait() obj = rreq.wait() self.assertTrue(obj is None) for smess in messages: buf = MPI.pickle.dumps(smess) sreq = comm.Isend([buf, 'B'], rank) rreq = comm.irecv(None, rank) sreq.Wait() rmess = rreq.wait() self.assertTrue(rmess == smess) def testPingPong01(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages: self.COMM.send(smess, MPI.PROC_NULL) rmess = self.COMM.recv(None, MPI.PROC_NULL, 0) self.assertEqual(rmess, None) if size == 1: return smess = None if rank == 0: self.COMM.send(smess, rank+1, 0) rmess = self.COMM.recv(None, rank+1, 0) elif rank == 1: rmess = self.COMM.recv(None, rank-1, 0) self.COMM.send(smess, rank-1, 0) else: rmess = smess self.assertEqual(rmess, smess) for smess in messages: if rank == 0: self.COMM.send(smess, rank+1, 0) rmess = self.COMM.recv(None, rank+1, 0) elif rank == 1: rmess = self.COMM.recv(None, rank-1, 0) self.COMM.send(smess, rank-1, 0) else: rmess = smess self.assertEqual(rmess, smess) @unittest.skipMPI('MPICH1') def testProbe(self): comm = self.COMM.Dup() try: status = MPI.Status() flag = comm.iprobe(MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertFalse(flag) for smess in messages: request = comm.issend(smess, comm.rank, 123) self.assertTrue(request) while not comm.iprobe(MPI.ANY_SOURCE, MPI.ANY_TAG, status): pass self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) comm.probe(MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) self.assertTrue(request) flag, obj = request.test() self.assertTrue(request) self.assertFalse(flag) self.assertEqual(obj, None) obj = comm.recv(None, comm.rank, 123) self.assertEqual(obj, smess) self.assertTrue(request) obj = request.wait() self.assertFalse(request) self.assertEqual(obj, None) finally: comm.Free() def testWaitSomeRecv(self): comm = self.COMM.Dup() rank = comm.Get_rank() reqs = [comm.irecv(source=rank, tag=i) for i in range(6)] for indexlist in ([5], [3,1,2], [0,4]): for i in indexlist: comm.ssend("abc", dest=rank, tag=i) statuses = [] idxs, objs = MPI.Request.waitsome(reqs, statuses) self.assertEqual(sorted(idxs), sorted(indexlist)) self.assertEqual(objs, ["abc"]*len(idxs)) self.assertFalse(any(reqs[i] for i in idxs)) self.assertEqual(len(statuses), len(idxs)) self.assertTrue(all(s.source == rank for s in statuses)) self.assertTrue(all(s.tag in indexlist for s in statuses)) self.assertTrue(all(s.error == MPI.SUCCESS for s in statuses)) idxs, objs = MPI.Request.waitsome(reqs) self.assertEqual(idxs, None) self.assertEqual(objs, None) self.assertFalse(any(reqs)) comm.Free() def testTestSomeRecv(self): comm = self.COMM.Dup() rank = comm.Get_rank() reqs = [comm.irecv(source=rank, tag=i) for i in range(6)] statuses = [] idxs, objs = MPI.Request.testsome(reqs, statuses) self.assertEqual(idxs, []) self.assertEqual(objs, []) self.assertTrue(all(reqs)) self.assertEqual(statuses, []) for indexlist in ([5], [], [3,1,2], [], [0,4]): for i in indexlist: comm.ssend("abc", dest=rank, tag=i) statuses = [] idxs, objs = MPI.Request.testsome(reqs, statuses) self.assertEqual(sorted(idxs), sorted(indexlist)) self.assertEqual(objs, ["abc"]*len(idxs)) self.assertFalse(any(reqs[i] for i in idxs)) self.assertEqual(len(statuses), len(idxs)) self.assertTrue(all(s.source == rank for s in statuses)) self.assertTrue(all(s.tag in indexlist for s in statuses)) self.assertTrue(all(s.error == MPI.SUCCESS for s in statuses)) idxs, objs = MPI.Request.testsome(reqs) self.assertEqual(idxs, None) self.assertEqual(objs, None) self.assertFalse(any(reqs)) comm.Free() def testWaitSomeSend(self): comm = self.COMM.Dup() rank = comm.Get_rank() reqs = [comm.issend("abc", dest=rank, tag=i) for i in range(6)] for indexlist in ([5], [3,1,2], [0,4]): for i in indexlist: msg = comm.recv(source=rank, tag=i) self.assertEqual(msg, "abc") idxs, objs = MPI.Request.waitsome(reqs) while sorted(idxs) != sorted(indexlist): i, o = MPI.Request.waitsome(reqs) idxs.extend(i) objs.extend(o) self.assertEqual(sorted(idxs), sorted(indexlist)) self.assertEqual(objs, [None]*len(idxs)) self.assertFalse(any(reqs[i] for i in idxs)) idxs, objs = MPI.Request.waitsome(reqs) self.assertEqual(idxs, None) self.assertEqual(objs, None) self.assertFalse(any(reqs)) comm.Free() def testTestSomeSend(self): comm = self.COMM.Dup() rank = comm.Get_rank() reqs = [comm.issend("abc", dest=rank, tag=i) for i in range(6)] idxs, objs = MPI.Request.testsome(reqs) self.assertEqual(idxs, []) self.assertEqual(objs, []) self.assertTrue(all(reqs)) for indexlist in ([5], [], [3,1,2], [], [0,4]): for i in indexlist: msg = comm.recv(source=rank, tag=i) self.assertEqual(msg, "abc") idxs, objs = MPI.Request.testsome(reqs) while sorted(idxs) != sorted(indexlist): i, o = MPI.Request.testsome(reqs) idxs.extend(i) objs.extend(o) self.assertEqual(sorted(idxs), sorted(indexlist)) self.assertEqual(objs, [None]*len(idxs)) self.assertFalse(any(reqs[i] for i in idxs)) idxs, objs = MPI.Request.testsome(reqs) self.assertEqual(idxs, None) self.assertEqual(objs, None) self.assertFalse(any(reqs)) comm.Free() class TestP2PObjSelf(BaseTestP2PObj, unittest.TestCase): COMM = MPI.COMM_SELF class TestP2PObjWorld(BaseTestP2PObj, unittest.TestCase): COMM = MPI.COMM_WORLD class TestP2PObjSelfDup(TestP2PObjSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() @unittest.skipMPI('openmpi(<1.4.0)', MPI.Query_thread() > MPI.THREAD_SINGLE) class TestP2PObjWorldDup(TestP2PObjWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_p2p_obj_matched.py000066400000000000000000000125471460670727200203770ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest _basic = [None, True, False, -7, 0, 7, -2**63+1, 2**63-1, -2.17, 0.0, 3.14, 1+2j, 2-3j, 'mpi4py', ] messages = list(_basic) messages += [ list(_basic), tuple(_basic), dict([('k%d' % key, val) for key, val in enumerate(_basic)]) ] @unittest.skipIf(MPI.MESSAGE_NULL == MPI.MESSAGE_NO_PROC, 'mpi-message') class TestMessage(unittest.TestCase): def testMessageNull(self): null = MPI.MESSAGE_NULL self.assertFalse(null) null2 = MPI.Message() self.assertEqual(null, null2) null3 = MPI.Message(null) self.assertEqual(null, null3) def testMessageNoProc(self): # noproc = MPI.MESSAGE_NO_PROC self.assertTrue(noproc) noproc.recv() self.assertTrue(noproc) noproc.irecv().wait() self.assertTrue(noproc) # noproc2 = MPI.Message(MPI.MESSAGE_NO_PROC) self.assertTrue(noproc2) self.assertEqual(noproc2, noproc) self.assertNotEqual(noproc, MPI.MESSAGE_NULL) # message = MPI.Message(MPI.MESSAGE_NO_PROC) message.recv() self.assertEqual(message, MPI.MESSAGE_NULL) # message = MPI.Message(MPI.MESSAGE_NO_PROC) request = message.irecv() self.assertEqual(message, MPI.MESSAGE_NULL) self.assertNotEqual(request, MPI.REQUEST_NULL) request.wait() self.assertEqual(request, MPI.REQUEST_NULL) # comm = MPI.COMM_SELF message = comm.mprobe(MPI.PROC_NULL) self.assertNotEqual(message, MPI.MESSAGE_NULL) self.assertEqual(message, MPI.MESSAGE_NO_PROC) noproc = comm.improbe(MPI.PROC_NULL) self.assertNotEqual(message, MPI.MESSAGE_NULL) self.assertEqual(message, MPI.MESSAGE_NO_PROC) @unittest.skipIf(MPI.MESSAGE_NULL == MPI.MESSAGE_NO_PROC, 'mpi-message') class BaseTestP2PMatched(object): COMM = MPI.COMM_NULL def testIMProbe(self): comm = self.COMM.Dup() try: m = comm.improbe() self.assertEqual(m, None) m = comm.improbe(MPI.ANY_SOURCE) self.assertEqual(m, None) m = comm.improbe(MPI.ANY_SOURCE, MPI.ANY_TAG) self.assertEqual(m, None) status = MPI.Status() m = comm.improbe(MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertEqual(m, None) self.assertEqual(status.source, MPI.ANY_SOURCE) self.assertEqual(status.tag, MPI.ANY_TAG) self.assertEqual(status.error, MPI.SUCCESS) m = MPI.Message.iprobe(comm) self.assertEqual(m, None) s = comm.isend(None, comm.rank, 0) r = comm.mprobe(comm.rank, 0).irecv() MPI.Request.waitall([s,r]) finally: comm.Free() def testProbeRecv(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() for smsg in messages: if size == 1: sr = comm.isend(smsg, 0, 0) m = comm.mprobe(0, 0) self.assertTrue(isinstance(m, MPI.Message)) self.assertTrue(m) rr = m.irecv() self.assertFalse(m) self.assertTrue(sr) self.assertTrue(rr) MPI.Request.Waitall([sr,rr]) self.assertFalse(sr) self.assertFalse(rr) # r = comm.isend(smsg, 0, 0) m = MPI.Message.probe(comm, 0, 0) self.assertTrue(isinstance(m, MPI.Message)) self.assertTrue(m) rmsg = m.recv() self.assertFalse(m) r.wait() elif rank == 0: comm.send(smsg, 1, 0) m = comm.mprobe(1, 0) self.assertTrue(m) rmsg = m.recv() self.assertFalse(m) # comm.send(smsg, 1, 1) m = None while not m: m = MPI.Message.iprobe(comm, 1, 1) rmsg = m.irecv().wait() elif rank == 1: m = comm.mprobe(0, 0) self.assertTrue(m) rmsg = m.recv() self.assertFalse(m) comm.send(rmsg, 0, 0) # m = None while not m: m = MPI.Message.iprobe(comm, 0, 1) rmsg = m.irecv().wait() comm.send(smsg, 0, 1) else: rmsg = smsg self.assertEqual(smsg, rmsg) class TestP2PMatchedSelf(BaseTestP2PMatched, unittest.TestCase): COMM = MPI.COMM_SELF class TestP2PMatchedWorld(BaseTestP2PMatched, unittest.TestCase): COMM = MPI.COMM_WORLD @unittest.skipMPI('openmpi(<1.8.5)', MPI.COMM_WORLD.Get_size() > 1) class TestP2PMatchedSelfDup(TestP2PMatchedSelf): def setUp(self): self.COMM = MPI.COMM_SELF.Dup() def tearDown(self): self.COMM.Free() @unittest.skipMPI('openmpi(<1.8.5)', MPI.COMM_WORLD.Get_size() > 1) class TestP2PMatchedWorldDup(TestP2PMatchedWorld): def setUp(self): self.COMM = MPI.COMM_WORLD.Dup() def tearDown(self): self.COMM.Free() if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_pack.py000066400000000000000000000132711460670727200162700ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest from arrayimpl import allclose import arrayimpl class BaseTestPack(object): COMM = MPI.COMM_NULL skipdtype = [] def testPackSize(self): for array, typecode in arrayimpl.subTest(self): if typecode in self.skipdtype: continue datatype = array.TypeMap[typecode] itemsize = datatype.Get_size() overhead = datatype.Pack_size(0, self.COMM) for count in range(10): pack_size = datatype.Pack_size(count, self.COMM) self.assertEqual(pack_size - overhead, count*itemsize) def testPackUnpack(self): for array, typecode1 in arrayimpl.subTest(self): if typecode1 in self.skipdtype: continue for typecode2 in array.TypeMap: if typecode2 in self.skipdtype: continue datatype1 = array.TypeMap[typecode1] datatype2 = array.TypeMap[typecode2] for items in range(10): # input and output arrays iarray1 = array(range(items), typecode1).as_raw() iarray2 = array(range(items), typecode2).as_raw() oarray1 = array(items, typecode1, items).as_raw() oarray2 = array(items, typecode2, items).as_raw() # temp array for packing size1 = datatype1.Pack_size(len(iarray1), self.COMM) size2 = datatype2.Pack_size(len(iarray2), self.COMM) tmpbuf = array(0, 'b', size1 + size2 + 1).as_raw() # pack input arrays position = 0 position = datatype1.Pack(iarray1, tmpbuf, position, self.COMM) position = datatype2.Pack(iarray2, tmpbuf, position, self.COMM) # unpack output arrays position = 0 position = datatype1.Unpack(tmpbuf, position, oarray1, self.COMM) position = datatype2.Unpack(tmpbuf, position, oarray2, self.COMM) # test self.assertTrue(allclose(iarray1, oarray1)) self.assertTrue(allclose(iarray2, oarray2)) EXT32 = 'external32' class BaseTestPackExternal(object): skipdtype = [] def testPackSize(self): for array, typecode in arrayimpl.subTest(self): if typecode in self.skipdtype: continue datatype = array.TypeMap[typecode] itemsize = datatype.Get_size() overhead = datatype.Pack_external_size(EXT32, 0) for count in range(10): pack_size = datatype.Pack_external_size(EXT32, count) real_size = pack_size - overhead def testPackUnpackExternal(self): for array, typecode1 in arrayimpl.subTest(self): if unittest.is_mpi_gpu('mpich', array): continue if unittest.is_mpi_gpu('openmpi', array): continue if unittest.is_mpi_gpu('mvapich2', array): continue if typecode1 in self.skipdtype: continue for typecode2 in array.TypeMap: if typecode2 in self.skipdtype: continue datatype1 = array.TypeMap[typecode1] datatype2 = array.TypeMap[typecode2] for items in range(1, 10): # input and output arrays if typecode1 == 'b': iarray1 = array(127, typecode1, items).as_raw() else: iarray1 = array(255, typecode1, items).as_raw() iarray2 = array(range(items), typecode2).as_raw() oarray1 = array(-1, typecode1, items).as_raw() oarray2 = array(-1, typecode2, items).as_raw() # temp array for packing size1 = datatype1.Pack_external_size(EXT32, len(iarray1)) size2 = datatype2.Pack_external_size(EXT32, len(iarray2)) tmpbuf = array(0, 'b', size1 + size2 + 1).as_raw() # pack input arrays position = 0 position = datatype1.Pack_external(EXT32, iarray1, tmpbuf, position) position = datatype2.Pack_external(EXT32, iarray2, tmpbuf, position) # unpack output arrays position = 0 position = datatype1.Unpack_external(EXT32, tmpbuf, position, oarray1) position = datatype2.Unpack_external(EXT32, tmpbuf, position, oarray2) # test result self.assertTrue(allclose(iarray1, oarray1)) self.assertTrue(allclose(iarray2, oarray2)) class TestPackSelf(BaseTestPack, unittest.TestCase): COMM = MPI.COMM_SELF class TestPackWorld(BaseTestPack, unittest.TestCase): COMM = MPI.COMM_SELF @unittest.skipMPI('openmpi(<3.0.0)') class TestPackExternal(BaseTestPackExternal, unittest.TestCase): pass name, version = MPI.get_vendor() if name == 'MPICH': if version < (4, 0, 0): BaseTestPackExternal.skipdtype += 'ldgFDG' elif name == 'Open MPI': BaseTestPackExternal.skipdtype += 'gG' # XXX TODO if version < (5, 0, 0): BaseTestPackExternal.skipdtype += 'gG' elif name == 'Intel MPI': BaseTestPackExternal.skipdtype += 'ldgFDG' elif name == 'Microsoft MPI': BaseTestPackExternal.skipdtype += 'gFDG' elif name == 'MVAPICH2': BaseTestPackExternal.skipdtype += 'ldgFDG' elif name =='MPICH2': BaseTestPackExternal.skipdtype += 'ldgFDG' else: try: MPI.BYTE.Pack_external_size(EXT32, 0) except NotImplementedError: unittest.disable(BaseTestPackExternal, 'mpi-ext32') if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_pickle.py000066400000000000000000000113611460670727200166170ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import sys try: import cPickle except ImportError: cPickle = None try: import pickle as pyPickle except ImportError: pyPickle = None try: import dill except ImportError: dill = None try: import marshal except ImportError: marshal = None try: import json except ImportError: json = None try: import yaml yaml.dump(None) except ImportError: yaml = None OBJS = [ None, True, False, 7, 1<<32, 3.14, 1+2j, 'qwerty', (0, 1, 2), [0, 1, 2], {'a':0, 'b':1}, ] try: memoryview tobytes = lambda s: memoryview(s).tobytes() except NameError: tobytes = lambda s: buffer(s)[:] class TestPickle(unittest.TestCase): def setUp(self): self.pickle = MPI.pickle def tearDown(self): self.pickle.__init__() def do_pickle(self, obj, pickle): comm = MPI.COMM_SELF o = comm.sendrecv(obj, 0, 0, None, 0, 0) self.assertEqual(obj, o) s = pickle.dumps(obj) o = pickle.loads(s) self.assertEqual(obj, o) def testDefault(self): pickle = self.pickle protocols = [0, 1, 2] if sys.version_info[:2] >= (3, 0): protocols.append(3) if sys.version_info[:2] >= (3, 4): protocols.append(4) protocols.append(-1) protocols.append(None) for proto in protocols: pickle.__init__(protocol=proto) for obj in OBJS: self.do_pickle(obj, pickle) self.do_pickle(OBJS, pickle) def testCPickle(self): if cPickle is None: return pickle = self.pickle dumps = cPickle.dumps loads = cPickle.loads protocols = [0, 1, 2] if sys.version_info[:2] >= (3, 0): protocols.append(3) if sys.version_info[:2] >= (3, 4): protocols.append(4) if sys.version_info[:2] >= (3, 8): protocols.append(5) protocols.append(-1) protocols.append(None) for proto in protocols: pickle.__init__(dumps, loads, proto) for obj in OBJS: self.do_pickle(obj, pickle) self.do_pickle(OBJS, pickle) def testPyPickle(self): pickle = self.pickle dumps = pyPickle.dumps loads = pyPickle.loads protocols = [0, 1, 2] if sys.version_info[:2] >= (3, 0): protocols.append(3) if sys.version_info[:2] >= (3, 4): protocols.append(4) if sys.version_info[:2] >= (3, 8): protocols.append(5) protocols.append(-1) protocols.append(None) for proto in protocols: pickle.__init__(dumps, loads, proto) for obj in OBJS: self.do_pickle(obj, pickle) self.do_pickle(OBJS, pickle) @unittest.skipIf(dill is None, 'dill') def testDill(self): pickle = self.pickle dumps = dill.dumps loads = dill.loads protocols = list(range(dill.HIGHEST_PROTOCOL+1)) protocols.append(-1) protocols.append(None) for proto in protocols: pickle.__init__(dumps, loads, proto) for obj in OBJS: self.do_pickle(obj, pickle) self.do_pickle(OBJS, pickle) @unittest.skipIf(marshal is None, 'marshal') def testMarshal(self): pickle = self.pickle dumps = marshal.dumps loads = marshal.loads protocols = [0, 1, 2] if sys.version_info[:2] >= (3, 4): protocols.append(3) protocols.append(4) protocols.append(None) for protocol in protocols: pickle.__init__(dumps, loads, protocol) for obj in OBJS: self.do_pickle(obj, pickle) self.do_pickle(OBJS, pickle) @unittest.skipIf(json is None, 'json') def testJson(self): pickle = self.pickle dumps = lambda o: json.dumps(o).encode() loads = lambda s: json.loads(tobytes(s).decode()) pickle.__init__(dumps, loads) OBJS2 = [o for o in OBJS if not isinstance(o, (float, complex, tuple))] for obj in OBJS2: self.do_pickle(obj, pickle) self.do_pickle(OBJS2, pickle) @unittest.skipIf(yaml is None, 'yaml') def testYAML(self): pickle = self.pickle dumps = lambda o: yaml.dump(o).encode() loads = lambda s: yaml.load(tobytes(s).decode(), Loader=yaml.Loader) pickle.__init__(dumps, loads) OBJS2 = [o for o in OBJS if not isinstance(o, (complex, tuple))] for obj in OBJS2: self.do_pickle(obj, pickle) self.do_pickle(OBJS2, pickle) if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_rc.py000066400000000000000000000011251460670727200157510ustar00rootroot00000000000000from mpi4py import rc import mpiunittest as unittest class TestRC(unittest.TestCase): def testRC1(self): rc(initialize = rc.initialize) rc(threads = rc.threads) rc(thread_level = rc.thread_level) rc(finalize = rc.finalize) rc(fast_reduce = rc.fast_reduce) rc(recv_mprobe = rc.recv_mprobe) def testRC2(self): kwargs = rc.__dict__.copy() rc(**kwargs) def testRC3(self): error = lambda: rc(ABCXYZ=123456) self.assertRaises(TypeError, error) if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_request.py000066400000000000000000000120041460670727200170330ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest class TestRequest(unittest.TestCase): def setUp(self): self.REQUEST = MPI.Request() self.STATUS = MPI.Status() def testWait(self): self.REQUEST.Wait() self.REQUEST.Wait(None) self.REQUEST.Wait(self.STATUS) self.assertTrue(self.REQUEST.Wait() is True) self.REQUEST.wait() self.REQUEST.wait(None) self.REQUEST.wait(self.STATUS) self.assertTrue(self.REQUEST.wait() is None) def testTest(self): self.REQUEST.Test() self.REQUEST.Test(None) self.REQUEST.Test(self.STATUS) self.assertTrue(self.REQUEST.Test() is True) self.REQUEST.test() self.REQUEST.test(None) self.REQUEST.test(self.STATUS) self.assertTrue(self.REQUEST.test() == (True, None)) @unittest.skipMPI('MPICH1') @unittest.skipMPI('LAM/MPI') def testGetStatus(self): try: flag = self.REQUEST.Get_status() except NotImplementedError: self.skipTest('mpi-request-get_status') self.assertTrue(flag) flag = self.REQUEST.Get_status(self.STATUS) self.assertTrue(flag) self.assertEqual(self.STATUS.Get_source(), MPI.ANY_SOURCE) self.assertEqual(self.STATUS.Get_tag(), MPI.ANY_TAG) self.assertEqual(self.STATUS.Get_error(), MPI.SUCCESS) self.assertEqual(self.STATUS.Get_count(MPI.BYTE), 0) self.assertEqual(self.STATUS.Get_elements(MPI.BYTE), 0) try: self.assertFalse(self.STATUS.Is_cancelled()) except NotImplementedError: self.skipTest('mpi-status-is_cancelled') flag = self.REQUEST.get_status() self.assertTrue(flag) flag = self.REQUEST.get_status(self.STATUS) self.assertEqual(self.STATUS.source, MPI.ANY_SOURCE) self.assertEqual(self.STATUS.tag, MPI.ANY_TAG) self.assertEqual(self.STATUS.error, MPI.SUCCESS) class TestRequestArray(unittest.TestCase): def setUp(self): self.REQUESTS = [MPI.Request() for i in range(5)] self.STATUSES = [MPI.Status() for i in range(5)] def testWaitany(self): MPI.Request.Waitany(self.REQUESTS) MPI.Request.Waitany(self.REQUESTS, None) MPI.Request.Waitany(self.REQUESTS, self.STATUSES[0]) MPI.Request.waitany(self.REQUESTS) MPI.Request.waitany(self.REQUESTS, None) MPI.Request.waitany(self.REQUESTS, self.STATUSES[0]) def testTestany(self): MPI.Request.Testany(self.REQUESTS) MPI.Request.Testany(self.REQUESTS, None) MPI.Request.Testany(self.REQUESTS, self.STATUSES[0]) MPI.Request.testany(self.REQUESTS) MPI.Request.testany(self.REQUESTS, None) MPI.Request.testany(self.REQUESTS, self.STATUSES[0]) def testWaitall(self): MPI.Request.Waitall(self.REQUESTS) MPI.Request.Waitall(self.REQUESTS, None) self.assertTrue(MPI.Request.Waitall(self.REQUESTS) is True) for statuses in (tuple(self.STATUSES), (self.STATUSES[0],), ()): MPI.Request.Waitall(self.REQUESTS, statuses) for statuses in (self.STATUSES, []): MPI.Request.Waitall(self.REQUESTS, statuses) self.assertEqual(len(statuses), len(self.REQUESTS)) MPI.Request.waitall(self.REQUESTS) MPI.Request.waitall(self.REQUESTS, None) for statuses in (self.STATUSES, []): MPI.Request.waitall(self.REQUESTS, statuses) self.assertEqual(len(statuses), len(self.REQUESTS)) def testTestall(self): MPI.Request.Testall(self.REQUESTS) MPI.Request.Testall(self.REQUESTS, None) self.assertTrue(MPI.Request.Testall(self.REQUESTS) is True) for statuses in (self.STATUSES, []): MPI.Request.Testall(self.REQUESTS, statuses) self.assertEqual(len(statuses), len(self.REQUESTS)) MPI.Request.testall(self.REQUESTS) MPI.Request.testall(self.REQUESTS, None) for statuses in (self.STATUSES, []): MPI.Request.testall(self.REQUESTS, statuses) self.assertEqual(len(statuses), len(self.REQUESTS)) def testWaitsome(self): ret = MPI.Request.Waitsome(self.REQUESTS) self.assertEqual(ret, None) ret = MPI.Request.Waitsome(self.REQUESTS, None) self.assertEqual(ret, None) for statuses in (self.STATUSES, []): slen = len(statuses) ret = MPI.Request.Waitsome(self.REQUESTS, statuses) self.assertEqual(ret, None) self.assertEqual(len(statuses), slen) def testTestsome(self): ret = MPI.Request.Testsome(self.REQUESTS) self.assertEqual(ret, None) ret = MPI.Request.Testsome(self.REQUESTS, None) self.assertEqual(ret, None) for statuses in (self.STATUSES, []): slen = len(statuses) ret = MPI.Request.Testsome(self.REQUESTS, statuses) self.assertEqual(ret, None) self.assertEqual(len(statuses), slen) if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_rma.py000066400000000000000000000373361460670727200161410ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import arrayimpl import sys pypy_lt_53 = (hasattr(sys, 'pypy_version_info') and sys.pypy_version_info < (5, 3)) def mkzeros(n): if pypy_lt_53: return b'\0' * n return bytearray(n) def memzero(m): try: m[:] = 0 except IndexError: # cffi buffer m[0:len(m)] = b'\0'*len(m) class BaseTestRMA(object): COMM = MPI.COMM_NULL INFO = MPI.INFO_NULL def setUp(self): nbytes = 100*MPI.DOUBLE.size try: self.mpi_memory = MPI.Alloc_mem(nbytes) self.memory = self.mpi_memory memzero(self.memory) except MPI.Exception: import array self.mpi_memory = None self.memory = array.array('B',[0]*nbytes) self.WIN = MPI.Win.Create(self.memory, 1, self.INFO, self.COMM) def tearDown(self): self.WIN.Free() if self.mpi_memory: MPI.Free_mem(self.mpi_memory) def testPutGet(self): typemap = MPI._typedict group = self.WIN.Get_group() size = group.Get_size() group.Free() for array, typecode in arrayimpl.subTest(self): if unittest.is_mpi_gpu('mvapich2', array): continue for count in range(10): for rank in range(size): sbuf = array(range(count), typecode) rbuf = array(-1, typecode, count+1) # self.WIN.Fence() self.WIN.Put(sbuf.as_mpi(), rank) self.WIN.Fence() self.WIN.Get(rbuf.as_mpi_c(count), rank) self.WIN.Fence() for i in range(count): self.assertEqual(sbuf[i], i) self.assertNotEqual(rbuf[i], -1) self.assertEqual(rbuf[-1], -1) # sbuf = array(range(count), typecode) rbuf = array(-1, typecode, count+1) target = sbuf.itemsize self.WIN.Fence() self.WIN.Put(sbuf.as_mpi(), rank, target) self.WIN.Fence() self.WIN.Get(rbuf.as_mpi_c(count), rank, target) self.WIN.Fence() for i in range(count): self.assertEqual(sbuf[i], i) self.assertNotEqual(rbuf[i], -1) self.assertEqual(rbuf[-1], -1) # sbuf = array(range(count), typecode) rbuf = array(-1, typecode, count+1) datatype = typemap[typecode] target = (sbuf.itemsize, count, datatype) self.WIN.Fence() self.WIN.Put(sbuf.as_mpi(), rank, target) self.WIN.Fence() self.WIN.Get(rbuf.as_mpi_c(count), rank, target) self.WIN.Fence() for i in range(count): self.assertEqual(sbuf[i], i) self.assertNotEqual(rbuf[i], -1) self.assertEqual(rbuf[-1], -1) def testAccumulate(self): group = self.WIN.Get_group() size = group.Get_size() group.Free() for array, typecode in arrayimpl.subTest(self): if unittest.is_mpi_gpu('openmpi', array): continue if unittest.is_mpi_gpu('mvapich2', array): continue if typecode in 'FDG': continue for count in range(10): for rank in range(size): sbuf = array(range(count), typecode) rbuf = array(-1, typecode, count+1) for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN): self.WIN.Fence() self.WIN.Accumulate(sbuf.as_mpi(), rank, op=op) self.WIN.Fence() self.WIN.Get(rbuf.as_mpi_c(count), rank) self.WIN.Fence() for i in range(count): self.assertEqual(sbuf[i], i) self.assertNotEqual(rbuf[i], -1) self.assertEqual(rbuf[-1], -1) @unittest.skipMPI('openmpi(>=1.10,<1.11)') def testGetAccumulate(self): group = self.WIN.Get_group() size = group.Get_size() rank = group.Get_rank() group.Free() self.WIN.Fence() obuf = MPI.Alloc_mem(1); memzero(obuf) rbuf = MPI.Alloc_mem(1); memzero(rbuf) try: try: self.WIN.Get_accumulate([obuf, 0, MPI.BYTE], [rbuf, 0, MPI.BYTE], rank) finally: MPI.Free_mem(obuf) MPI.Free_mem(rbuf) except NotImplementedError: self.skipTest('mpi-win-get_accumulate') self.WIN.Fence() for array, typecode in arrayimpl.subTest(self): if unittest.is_mpi_gpu('openmpi', array): continue if unittest.is_mpi_gpu('mvapich2', array): continue if typecode in 'FDG': continue for count in range(10): for rank in range(size): ones = array([1]*count, typecode) sbuf = array(range(count), typecode) rbuf = array(-1, typecode, count+1) gbuf = array(-1, typecode, count+1) for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN, MPI.REPLACE, MPI.NO_OP): self.WIN.Lock(rank) self.WIN.Put(ones.as_mpi(), rank) self.WIN.Flush(rank) self.WIN.Get_accumulate(sbuf.as_mpi(), rbuf.as_mpi_c(count), rank, op=op) self.WIN.Flush(rank) self.WIN.Get(gbuf.as_mpi_c(count), rank) self.WIN.Flush(rank) self.WIN.Unlock(rank) # for i in range(count): self.assertEqual(sbuf[i], i) self.assertEqual(rbuf[i], 1) self.assertEqual(gbuf[i], op(1, i)) self.assertEqual(rbuf[-1], -1) self.assertEqual(gbuf[-1], -1) def testFetchAndOp(self): typemap = MPI._typedict group = self.WIN.Get_group() size = group.Get_size() rank = group.Get_rank() group.Free() self.WIN.Fence() blen = MPI.INT.Get_size() obuf = MPI.Alloc_mem(blen); memzero(obuf) rbuf = MPI.Alloc_mem(blen); memzero(rbuf) try: try: self.WIN.Fetch_and_op( [obuf, 1, MPI.INT], [rbuf, 1, MPI.INT], rank) finally: MPI.Free_mem(obuf) MPI.Free_mem(rbuf) except NotImplementedError: self.skipTest('mpi-win-fetch_and_op') self.WIN.Fence() for array, typecode in arrayimpl.subTest(self): if unittest.is_mpi_gpu('openmpi', array): continue if unittest.is_mpi_gpu('mvapich2', array): continue if typecode in 'FDG': continue obuf = array(+1, typecode) rbuf = array(-1, typecode, 2) datatype = typemap[typecode] for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN, MPI.REPLACE, MPI.NO_OP): for rank in range(size): for disp in range(3): self.WIN.Lock(rank) self.WIN.Fetch_and_op(obuf.as_mpi(), rbuf.as_mpi_c(1), rank, disp * datatype.size, op=op) self.WIN.Unlock(rank) self.assertEqual(rbuf[1], -1) def testCompareAndSwap(self): typemap = MPI._typedict group = self.WIN.Get_group() size = group.Get_size() rank = group.Get_rank() group.Free() self.WIN.Fence() obuf = MPI.Alloc_mem(1); memzero(obuf) cbuf = MPI.Alloc_mem(1); memzero(cbuf) rbuf = MPI.Alloc_mem(1); memzero(rbuf) try: try: self.WIN.Compare_and_swap([obuf, 1, MPI.BYTE], [cbuf, 1, MPI.BYTE], [rbuf, 1, MPI.BYTE], rank, 0) finally: MPI.Free_mem(obuf) MPI.Free_mem(cbuf) MPI.Free_mem(rbuf) except NotImplementedError: self.skipTest('mpi-win-compare_and_swap') self.WIN.Fence() for array, typecode in arrayimpl.subTest(self): if unittest.is_mpi_gpu('openmpi', array): continue if unittest.is_mpi_gpu('mvapich2', array): continue if typecode in 'fdg': continue if typecode in 'FDG': continue obuf = array(+1, typecode) cbuf = array( 0, typecode) rbuf = array(-1, typecode, 2) datatype = typemap[typecode] for rank in range(size): for disp in range(3): self.WIN.Lock(rank) self.WIN.Compare_and_swap(obuf.as_mpi(), cbuf.as_mpi(), rbuf.as_mpi_c(1), rank, disp * datatype.size) self.WIN.Unlock(rank) self.assertEqual(rbuf[1], -1) def testPutProcNull(self): self.WIN.Fence() self.WIN.Put(None, MPI.PROC_NULL, None) self.WIN.Fence() def testGetProcNull(self): self.WIN.Fence() self.WIN.Get(None, MPI.PROC_NULL, None) self.WIN.Fence() def testAccumulateProcNullReplace(self): self.WIN.Fence() zeros = mkzeros(8) self.WIN.Fence() self.WIN.Accumulate([zeros, MPI.INT], MPI.PROC_NULL, None, MPI.REPLACE) self.WIN.Fence() self.WIN.Accumulate([zeros, MPI.INT], MPI.PROC_NULL, None, MPI.REPLACE) self.WIN.Fence() def testAccumulateProcNullSum(self): self.WIN.Fence() zeros = mkzeros(8) self.WIN.Fence() self.WIN.Accumulate([zeros, MPI.INT], MPI.PROC_NULL, None, MPI.SUM) self.WIN.Fence() self.WIN.Accumulate([None, MPI.INT], MPI.PROC_NULL, None, MPI.SUM) self.WIN.Fence() def testGetAccumulateProcNull(self): obuf = [mkzeros(8), 0, MPI.INT] rbuf = [mkzeros(8), 0, MPI.INT] self.WIN.Fence() try: self.WIN.Get_accumulate(obuf, rbuf, MPI.PROC_NULL) except NotImplementedError: self.skipTest('mpi-win-get_accumulate') self.WIN.Fence() ##def testFetchAndOpProcNull(self): ## obuf = cbuf = rbuf = None ## self.WIN.Fence() ## try: ## self.WIN.Fetch_and_op(obuf, rbuf, MPI.PROC_NULL, 0) ## except NotImplementedError: ## self.skipTest('mpi-win-fetch_and_op') ## self.WIN.Fence() ##def testCompareAndSwapProcNull(self): ## obuf = cbuf = rbuf = None ## self.WIN.Fence() ## try: ## self.WIN.Compare_and_swap(obuf, cbuf, rbuf, MPI.PROC_NULL, 0) ## except NotImplementedError: ## self.skipTest('mpi-win-compare_and_swap') ## self.WIN.Fence() def testFence(self): win = self.WIN LMODE = [0, MPI.MODE_NOSTORE, MPI.MODE_NOPUT, MPI.MODE_NOSTORE|MPI.MODE_NOPUT] GMODE = [0, MPI.MODE_NOPRECEDE, MPI.MODE_NOSUCCEED] win.Fence() for lmode in LMODE: for gmode in GMODE: assertion = lmode | gmode win.Fence(assertion) win.Fence() @unittest.skipMPI('openmpi(==1.8.1)') def testFenceAll(self): win = self.WIN assertion = 0 modes = [0, MPI.MODE_NOSTORE, MPI.MODE_NOPUT, MPI.MODE_NOPRECEDE, MPI.MODE_NOSUCCEED] win.Fence() for mode in modes: win.Fence(mode) assertion |= mode win.Fence(assertion) win.Fence() @unittest.skipMPI('openmpi(==1.8.6)') def testStartComplete(self): self.WIN.Start(MPI.GROUP_EMPTY) self.WIN.Complete() @unittest.skipMPI('openmpi(==1.8.6)') def testPostWait(self): self.WIN.Post(MPI.GROUP_EMPTY) self.WIN.Wait() @unittest.skipMPI('openmpi(==1.8.7)') @unittest.skipMPI('openmpi(==1.8.6)') def testStartCompletePostWait(self): win = self.WIN wingroup = win.Get_group() size = wingroup.Get_size() rank = wingroup.Get_rank() if size < 2: return wingroup.Free() if rank == 0: group = wingroup.Excl([0]) win.Start(group) win.Complete() win.Post(group) win.Wait() group.Free() else: group = wingroup.Incl([0]) win.Post(group) win.Wait() win.Start(group) win.Complete() group.Free() wingroup.Free() @unittest.skipMPI('openmpi(==1.8.7)') @unittest.skipMPI('openmpi(==1.8.6)') def testStartCompletePostTest(self): comm = self.COMM win = self.WIN wingroup = win.Get_group() size = wingroup.Get_size() rank = wingroup.Get_rank() if size < 2: return wingroup.Free() if rank == 0: group = wingroup.Excl([0]) win.Start(group) comm.Barrier() win.Complete() comm.Barrier() group.Free() else: group = wingroup.Incl([0]) win.Post(group) flag = win.Test() self.assertFalse(flag) comm.Barrier() comm.Barrier() flag = win.Test() self.assertTrue(flag) group.Free() wingroup.Free() @unittest.skipMPI('MPI(<3.0)') def testSync(self): win = self.WIN comm = self.COMM rank = comm.Get_rank() win.Lock(rank) win.Sync() win.Unlock(rank) comm.Barrier() @unittest.skipMPI('MPI(<3.0)') def testFlush(self): win = self.WIN comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() # for i in range(size): win.Lock(i) win.Flush(i) win.Unlock(i) comm.Barrier() for i in range(size): if i == rank: win.Lock_all() win.Flush_all() win.Unlock_all() comm.Barrier() # for i in range(size): win.Lock(i) win.Flush_local(i) win.Unlock(i) comm.Barrier() for i in range(size): if i == rank: win.Lock_all() win.Flush_local_all() win.Unlock_all() comm.Barrier() class TestRMASelf(BaseTestRMA, unittest.TestCase): COMM = MPI.COMM_SELF class TestRMAWorld(BaseTestRMA, unittest.TestCase): COMM = MPI.COMM_WORLD SpectrumMPI = MPI.get_vendor()[0] == 'Spectrum MPI' try: if SpectrumMPI: raise NotImplementedError MPI.Win.Create(None, 1, MPI.INFO_NULL, MPI.COMM_SELF).Free() except (NotImplementedError, MPI.Exception): unittest.disable(BaseTestRMA, 'mpi-rma') if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_rma_nb.py000066400000000000000000000157761460670727200166240ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import arrayimpl import sys pypy_lt_53 = (hasattr(sys, 'pypy_version_info') and sys.pypy_version_info < (5, 3)) def mkzeros(n): if pypy_lt_53: return b'\0' * n return bytearray(n) def memzero(m): try: m[:] = 0 except IndexError: # cffi buffer m[0:len(m)] = b'\0'*len(m) class BaseTestRMA(object): COMM = MPI.COMM_NULL INFO = MPI.INFO_NULL COUNT_MIN = 0 def setUp(self): nbytes = 100*MPI.DOUBLE.size try: self.mpi_memory = MPI.Alloc_mem(nbytes) self.memory = self.mpi_memory memzero(self.memory) except MPI.Exception: import array self.mpi_memory = None self.memory = array.array('B',[0]*nbytes) self.WIN = MPI.Win.Create(self.memory, 1, self.INFO, self.COMM) def tearDown(self): self.WIN.Free() if self.mpi_memory: MPI.Free_mem(self.mpi_memory) def testPutGet(self): group = self.WIN.Get_group() size = group.Get_size() group.Free() for array, typecode in arrayimpl.subTest(self): if unittest.is_mpi_gpu('mvapich2', array): continue for count in range(self.COUNT_MIN, 10): for rank in range(size): sbuf = array([rank]*count, typecode) rbuf = array(-1, typecode, count+1) self.WIN.Fence() self.WIN.Lock(rank) r = self.WIN.Rput(sbuf.as_mpi(), rank) r.Wait() self.WIN.Flush(rank) r = self.WIN.Rget(rbuf.as_mpi_c(count), rank) r.Wait() self.WIN.Unlock(rank) for i in range(count): self.assertEqual(sbuf[i], rank) self.assertEqual(rbuf[i], rank) self.assertEqual(rbuf[-1], -1) @unittest.skipMPI('openmpi(>=1.10.0,<1.11.0)') def testAccumulate(self): group = self.WIN.Get_group() size = group.Get_size() group.Free() for array, typecode in arrayimpl.subTest(self): if unittest.is_mpi_gpu('openmpi', array): continue if unittest.is_mpi_gpu('mvapich2', array): continue if typecode in 'FDG': continue for count in range(self.COUNT_MIN, 10): for rank in range(size): ones = array([1]*count, typecode) sbuf = array(range(count), typecode) rbuf = array(-1, typecode, count+1) for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN, MPI.REPLACE): self.WIN.Lock(rank) self.WIN.Put(ones.as_mpi(), rank) self.WIN.Flush(rank) r = self.WIN.Raccumulate(sbuf.as_mpi(), rank, op=op) r.Wait() self.WIN.Flush(rank) r = self.WIN.Rget(rbuf.as_mpi_c(count), rank) r.Wait() self.WIN.Unlock(rank) # for i in range(count): self.assertEqual(sbuf[i], i) self.assertEqual(rbuf[i], op(1, i)) self.assertEqual(rbuf[-1], -1) @unittest.skipMPI('openmpi(>=1.10,<1.11)') def testGetAccumulate(self): group = self.WIN.Get_group() size = group.Get_size() group.Free() for array, typecode in arrayimpl.subTest(self): if unittest.is_mpi_gpu('openmpi', array): continue if unittest.is_mpi_gpu('mvapich2', array): continue if typecode in 'FDG': continue for count in range(self.COUNT_MIN, 10): for rank in range(size): ones = array([1]*count, typecode) sbuf = array(range(count), typecode) rbuf = array(-1, typecode, count+1) gbuf = array(-1, typecode, count+1) for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN, MPI.REPLACE, MPI.NO_OP): self.WIN.Lock(rank) self.WIN.Put(ones.as_mpi(), rank) self.WIN.Flush(rank) r = self.WIN.Rget_accumulate(sbuf.as_mpi(), rbuf.as_mpi_c(count), rank, op=op) r.Wait() self.WIN.Flush(rank) r = self.WIN.Rget(gbuf.as_mpi_c(count), rank) r.Wait() self.WIN.Unlock(rank) # for i in range(count): self.assertEqual(sbuf[i], i) self.assertEqual(rbuf[i], 1) self.assertEqual(gbuf[i], op(1, i)) self.assertEqual(rbuf[-1], -1) self.assertEqual(gbuf[-1], -1) def testPutProcNull(self): rank = self.COMM.Get_rank() self.WIN.Lock(rank) r = self.WIN.Rput(None, MPI.PROC_NULL, None) r.Wait() self.WIN.Unlock(rank) def testGetProcNull(self): rank = self.COMM.Get_rank() self.WIN.Lock(rank) r = self.WIN.Rget(None, MPI.PROC_NULL, None) r.Wait() self.WIN.Unlock(rank) def testAccumulateProcNullReplace(self): rank = self.COMM.Get_rank() zeros = mkzeros(8) self.WIN.Lock(rank) r = self.WIN.Raccumulate([zeros, MPI.INT], MPI.PROC_NULL, None, MPI.REPLACE) r.Wait() r = self.WIN.Raccumulate([zeros, MPI.INT], MPI.PROC_NULL, None, MPI.REPLACE) r.Wait() self.WIN.Unlock(rank) def testAccumulateProcNullSum(self): rank = self.COMM.Get_rank() zeros = mkzeros(8) self.WIN.Lock(rank) r = self.WIN.Raccumulate([zeros, MPI.INT], MPI.PROC_NULL, None, MPI.SUM) r.Wait() r = self.WIN.Raccumulate([None, MPI.INT], MPI.PROC_NULL, None, MPI.SUM) r.Wait() self.WIN.Unlock(rank) @unittest.skipMPI('MPI(<3.0)') @unittest.skipMPI('openmpi(<1.8.1)') @unittest.skipMPI('MPICH2(<1.5.0)') class TestRMASelf(BaseTestRMA, unittest.TestCase): COMM = MPI.COMM_SELF @unittest.skipMPI('MPI(<3.0)') @unittest.skipMPI('openmpi(<1.8.1)') @unittest.skipMPI('MPICH2(<1.5.0)') class TestRMAWorld(BaseTestRMA, unittest.TestCase): COMM = MPI.COMM_WORLD SpectrumMPI = MPI.get_vendor()[0] == 'Spectrum MPI' try: if SpectrumMPI: raise NotImplementedError MPI.Win.Create(None, 1, MPI.INFO_NULL, MPI.COMM_SELF).Free() except (NotImplementedError, MPI.Exception): unittest.disable(BaseTestRMA, 'mpi-rma-nb') if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_spawn.py000066400000000000000000000230611460670727200165000ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import sys, os, mpi4py MPI4PYPATH = os.path.abspath(os.path.dirname(mpi4py.__path__[0])) CHILDSCRIPT = os.path.abspath( os.path.join(os.path.dirname(__file__), 'spawn_child.py') ) HAVE_MPE = 'MPE_LOGFILE_PREFIX' in os.environ HAVE_VT = 'VT_FILE_PREFIX' in os.environ def childscript(): from tempfile import mkstemp from textwrap import dedent fd, script = mkstemp(suffix='.py', prefix="mpi4py-") os.close(fd) with open(script, "w") as f: f.write(dedent("""\ #!%(python)s import sys; sys.path.insert(0, "%(path)s") import mpi4py if %(mpe)s: mpi4py.profile('mpe', logfile="%(logfile)s") if %(vt)s: mpi4py.profile('vt', logfile="%(logfile)s") from mpi4py import MPI parent = MPI.Comm.Get_parent() parent.Barrier() parent.Disconnect() assert parent == MPI.COMM_NULL parent = MPI.Comm.Get_parent() assert parent == MPI.COMM_NULL """ % dict(python=sys.executable, path=MPI4PYPATH, mpe=HAVE_MPE, vt=HAVE_VT, logfile="runtests-mpi4py-child"))) os.chmod(script, int("770", 8)) return script def ch4_ucx(): return 'ch4:ucx' in MPI.Get_library_version() def ch4_ofi(): return 'ch4:ofi' in MPI.Get_library_version() def appnum(): if MPI.APPNUM == MPI.KEYVAL_INVALID: return None return MPI.COMM_WORLD.Get_attr(MPI.APPNUM) def badport(): if MPI.get_vendor()[0] != 'MPICH': return False try: port = MPI.Open_port() MPI.Close_port(port) except: port = "" return port == "" def using_GPU(): # Once a CUDA context is created, the process cannot be forked. # Note: This seems to be a partial fix. Even if we are running cpu-only # tests, if MPI is built with CUDA support we can still fail. Unfortunately # there is no runtime check for us to detect if it's the case... disabled_cupy = (sys.modules.get('cupy', -1) is None) disabled_numba = (sys.modules.get('numba', -1) is None) return False if (disabled_cupy and disabled_numba) else True @unittest.skipMPI('MPI(<2.0)') @unittest.skipMPI('openmpi(<3.0.0)') @unittest.skipMPI('openmpi(==4.0.0)') @unittest.skipMPI('openmpi(==4.0.1)', sys.platform=='darwin') @unittest.skipMPI('openmpi(==4.0.2)', sys.platform=='darwin') @unittest.skipMPI('mpich(<4.1.0)', appnum() is None) @unittest.skipMPI('mpich(==4.1.2)', sys.platform=='darwin') @unittest.skipMPI('mpich', badport()) @unittest.skipMPI('msmpi(<8.1.0)') @unittest.skipMPI('msmpi', appnum() is None) @unittest.skipMPI('MVAPICH2') @unittest.skipMPI('MPICH2') @unittest.skipMPI('MPICH1') @unittest.skipMPI('PlatformMPI') @unittest.skipIf(using_GPU(), 'using CUDA') class BaseTestSpawn(object): COMM = MPI.COMM_NULL COMMAND = sys.executable ARGS = [CHILDSCRIPT, MPI4PYPATH, "mpe" if HAVE_MPE else "vt" if HAVE_VT else ""] MAXPROCS = 1 INFO = MPI.INFO_NULL ROOT = 0 def testCommSpawn(self): self.COMM.Barrier() child = self.COMM.Spawn(self.COMMAND, self.ARGS, self.MAXPROCS, info=self.INFO, root=self.ROOT) local_size = child.Get_size() remote_size = child.Get_remote_size() child.Barrier() child.Disconnect() self.COMM.Barrier() self.assertEqual(local_size, self.COMM.Get_size()) self.assertEqual(remote_size, self.MAXPROCS) @unittest.skipMPI('msmpi') def testErrcodes(self): self.COMM.Barrier() errcodes = [] child = self.COMM.Spawn(self.COMMAND, self.ARGS, self.MAXPROCS, info=self.INFO, root=self.ROOT, errcodes=errcodes) child.Barrier() child.Disconnect() self.COMM.Barrier() self.assertEqual(len(errcodes), self.MAXPROCS) for errcode in errcodes: self.assertEqual(errcode, MPI.SUCCESS) @unittest.skipMPI('msmpi') @unittest.skipMPI('mpich(==3.4.1)', ch4_ofi()) def testArgsOnlyAtRoot(self): self.COMM.Barrier() if self.COMM.Get_rank() == self.ROOT: child = self.COMM.Spawn(self.COMMAND, self.ARGS, self.MAXPROCS, info=self.INFO, root=self.ROOT) else: child = self.COMM.Spawn(None, None, -1, info=MPI.INFO_NULL, root=self.ROOT) child.Barrier() child.Disconnect() self.COMM.Barrier() @unittest.skipIf(os.name != 'posix', 'posix') def testNoArgs(self): self.COMM.Barrier() script = None if self.COMM.Get_rank() == self.ROOT: script = childscript() self.COMM.Barrier() script = self.COMM.bcast(script, root=self.ROOT) child = self.COMM.Spawn(script, None, self.MAXPROCS, info=self.INFO, root=self.ROOT) child.Barrier() child.Disconnect() self.COMM.Barrier() if self.COMM.Get_rank() == self.ROOT: os.remove(script) self.COMM.Barrier() def testCommSpawnMultiple(self): self.COMM.Barrier() count = 2 + (self.COMM.Get_size() == 0) COMMAND = [self.COMMAND] * count ARGS = [self.ARGS] * len(COMMAND) MAXPROCS = [self.MAXPROCS] * len(COMMAND) INFO = [self.INFO] * len(COMMAND) child = self.COMM.Spawn_multiple( COMMAND, ARGS, MAXPROCS, info=INFO, root=self.ROOT) local_size = child.Get_size() remote_size = child.Get_remote_size() child.Barrier() child.Disconnect() self.COMM.Barrier() self.assertEqual(local_size, self.COMM.Get_size()) self.assertEqual(remote_size, sum(MAXPROCS)) def testCommSpawnMultipleDefaults1(self): self.COMM.Barrier() count = 2 + (self.COMM.Get_size() == 0) COMMAND = [self.COMMAND] * count ARGS = [self.ARGS] * len(COMMAND) child = self.COMM.Spawn_multiple(COMMAND, ARGS) local_size = child.Get_size() remote_size = child.Get_remote_size() child.Barrier() child.Disconnect() self.COMM.Barrier() self.assertEqual(local_size, self.COMM.Get_size()) self.assertEqual(remote_size, len(COMMAND)) def testCommSpawnMultipleDefaults2(self): self.COMM.Barrier() count = 2 + (self.COMM.Get_size() == 0) COMMAND = [self.COMMAND] * count ARGS = [self.ARGS] * len(COMMAND) child = self.COMM.Spawn_multiple(COMMAND, ARGS, 1, MPI.INFO_NULL) local_size = child.Get_size() remote_size = child.Get_remote_size() child.Barrier() child.Disconnect() self.COMM.Barrier() self.assertEqual(local_size, self.COMM.Get_size()) self.assertEqual(remote_size, len(COMMAND)) @unittest.skipMPI('msmpi') def testErrcodesMultiple(self): self.COMM.Barrier() count = 2 + (self.COMM.Get_size() == 0) COMMAND = [self.COMMAND] * count ARGS = [self.ARGS]*len(COMMAND) MAXPROCS = list(range(1, len(COMMAND)+1)) INFO = MPI.INFO_NULL errcodelist = [] child = self.COMM.Spawn_multiple( COMMAND, ARGS, MAXPROCS, info=INFO, root=self.ROOT, errcodes=errcodelist) child.Barrier() child.Disconnect() self.COMM.Barrier() self.assertEqual(len(errcodelist), len(COMMAND)) for i, errcodes in enumerate(errcodelist): self.assertEqual(len(errcodes), MAXPROCS[i]) for errcode in errcodes: self.assertEqual(errcode, MPI.SUCCESS) @unittest.skipMPI('msmpi') def testArgsOnlyAtRootMultiple(self): self.COMM.Barrier() if self.COMM.Get_rank() == self.ROOT: count = 2 + (self.COMM.Get_size() == 0) COMMAND = [self.COMMAND] * count ARGS = [self.ARGS] * len(COMMAND) MAXPROCS = list(range(1, len(COMMAND)+1)) INFO = [MPI.INFO_NULL] * len(COMMAND) child = self.COMM.Spawn_multiple( COMMAND, ARGS, MAXPROCS, info=INFO, root=self.ROOT) else: child = self.COMM.Spawn_multiple( None, None, -1, info=MPI.INFO_NULL, root=self.ROOT) child.Barrier() child.Disconnect() self.COMM.Barrier() @unittest.skipIf(os.name != 'posix', 'posix') def testNoArgsMultiple(self): self.COMM.Barrier() script = None if self.COMM.Get_rank() == self.ROOT: script = childscript() self.COMM.Barrier() script = self.COMM.bcast(script, root=self.ROOT) count = 2 + (self.COMM.Get_size() == 0) COMMAND = [script] * count MAXPROCS = list(range(1, len(COMMAND)+1)) INFO = [self.INFO] * len(COMMAND) child = self.COMM.Spawn_multiple(COMMAND, None, MAXPROCS, info=INFO, root=self.ROOT) child.Barrier() child.Disconnect() self.COMM.Barrier() if self.COMM.Get_rank() == self.ROOT: os.remove(script) self.COMM.Barrier() class TestSpawnSelf(BaseTestSpawn, unittest.TestCase): COMM = MPI.COMM_SELF class TestSpawnWorld(BaseTestSpawn, unittest.TestCase): COMM = MPI.COMM_WORLD class TestSpawnSelfMany(BaseTestSpawn, unittest.TestCase): COMM = MPI.COMM_SELF MAXPROCS = MPI.COMM_WORLD.Get_size() class TestSpawnWorldMany(BaseTestSpawn, unittest.TestCase): COMM = MPI.COMM_WORLD MAXPROCS = MPI.COMM_WORLD.Get_size() if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_status.py000066400000000000000000000062171460670727200166770ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest class TestStatus(unittest.TestCase): def setUp(self): self.STATUS = MPI.Status() def tearDown(self): self.STATUS = None def testDefaultFieldValues(self): self.assertEqual(self.STATUS.Get_source(), MPI.ANY_SOURCE) self.assertEqual(self.STATUS.Get_tag(), MPI.ANY_TAG) self.assertEqual(self.STATUS.Get_error(), MPI.SUCCESS) def testGetCount(self): count = self.STATUS.Get_count(MPI.BYTE) self.assertEqual(count, 0) def testGetElements(self): elements = self.STATUS.Get_elements(MPI.BYTE) self.assertEqual(elements, 0) def testSetElements(self): try: self.STATUS.Set_elements(MPI.BYTE, 7) count = self.STATUS.Get_count(MPI.BYTE) self.assertEqual(count, 7) elements = self.STATUS.Get_elements(MPI.BYTE) self.assertEqual(elements, 7) except NotImplementedError: if MPI.Get_version() >= (2,0): raise self.skipTest('mpi-status-set_elements') def testIsCancelled(self): flag = self.STATUS.Is_cancelled() self.assertTrue(type(flag) is bool) self.assertFalse(flag) def testSetCancelled(self): try: self.STATUS.Set_cancelled(True) flag = self.STATUS.Is_cancelled() self.assertTrue(flag) except NotImplementedError: if MPI.Get_version() >= (2,0): raise self.skipTest('mpi-status-set_cancelled') def testPyProps(self): self.assertEqual(self.STATUS.Get_source(), self.STATUS.source) self.assertEqual(self.STATUS.Get_tag(), self.STATUS.tag) self.assertEqual(self.STATUS.Get_error(), self.STATUS.error) self.STATUS.source = 1 self.STATUS.tag = 2 self.STATUS.error = MPI.ERR_ARG self.assertEqual(self.STATUS.source, 1) self.assertEqual(self.STATUS.tag, 2) self.assertEqual(self.STATUS.error, MPI.ERR_ARG) def testConstructor(self): self.assertRaises(TypeError, MPI.Status, 123) self.assertRaises(TypeError, MPI.Status, "abc") def testCopyConstructor(self): self.STATUS.source = 1 self.STATUS.tag = 2 self.STATUS.error = MPI.ERR_ARG status = MPI.Status(self.STATUS) self.assertEqual(status.source, 1) self.assertEqual(status.tag, 2) self.assertEqual(status.error, MPI.ERR_ARG) try: self.STATUS.Set_elements(MPI.BYTE, 7) except NotImplementedError: pass try: self.STATUS.Set_cancelled(True) except NotImplementedError: pass status = MPI.Status(self.STATUS) try: count = status.Get_count(MPI.BYTE) elems = status.Get_elements(MPI.BYTE) self.assertEqual(count, 7) self.assertEqual(elems, 7) except NotImplementedError: pass try: flag = status.Is_cancelled() self.assertTrue(flag) except NotImplementedError: pass if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_subclass.py000066400000000000000000000175751460670727200172040ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import sys # --- class MyBaseComm(object): def free(self): if self != MPI.COMM_NULL: MPI.Comm.Free(self) class BaseTestBaseComm(object): def setUp(self): self.comm = self.CommType(self.COMM_BASE) def testSubType(self): self.assertTrue(type(self.comm) not in [ MPI.Comm, MPI.Intracomm, MPI.Cartcomm, MPI.Graphcomm, MPI.Distgraphcomm, MPI.Intercomm]) self.assertTrue(isinstance(self.comm, self.CommType)) def testCloneFree(self): if self.COMM_BASE != MPI.COMM_NULL: comm = self.comm.Clone() else: comm = self.CommType() self.assertTrue(isinstance(comm, MPI.Comm)) self.assertTrue(isinstance(comm, self.CommType)) comm.free() def tearDown(self): self.comm.free() # --- class MyComm(MPI.Comm, MyBaseComm): def __new__(cls, comm=None): if comm is not None: if comm != MPI.COMM_NULL: comm = comm.Clone() return super(MyComm, cls).__new__(cls, comm) class BaseTestMyComm(BaseTestBaseComm): CommType = MyComm class TestMyCommNULL(BaseTestMyComm, unittest.TestCase): COMM_BASE = MPI.COMM_NULL class TestMyCommSELF(BaseTestMyComm, unittest.TestCase): COMM_BASE = MPI.COMM_SELF class TestMyCommWORLD(BaseTestMyComm, unittest.TestCase): COMM_BASE = MPI.COMM_WORLD # --- class MyIntracomm(MPI.Intracomm, MyBaseComm): def __new__(cls, comm=None): if comm is not None: if comm != MPI.COMM_NULL: comm = comm.Dup() return super(MyIntracomm, cls).__new__(cls, comm) class BaseTestMyIntracomm(BaseTestBaseComm): CommType = MyIntracomm class TestMyIntracommNULL(BaseTestMyIntracomm, unittest.TestCase): COMM_BASE = MPI.COMM_NULL class TestMyIntracommSELF(BaseTestMyIntracomm, unittest.TestCase): COMM_BASE = MPI.COMM_SELF class TestMyIntracommWORLD(BaseTestMyIntracomm, unittest.TestCase): COMM_BASE = MPI.COMM_WORLD # --- class MyCartcomm(MPI.Cartcomm, MyBaseComm): def __new__(cls, comm=None): if comm is not None: if comm != MPI.COMM_NULL: dims = [comm.size] comm = comm.Create_cart(dims) return super(MyCartcomm, cls).__new__(cls, comm) class BaseTestMyCartcomm(BaseTestBaseComm): CommType = MyCartcomm class TestMyCartcommNULL(BaseTestMyCartcomm, unittest.TestCase): COMM_BASE = MPI.COMM_NULL class TestMyCartcommSELF(BaseTestMyCartcomm, unittest.TestCase): COMM_BASE = MPI.COMM_SELF class TestMyCartcommWORLD(BaseTestMyCartcomm, unittest.TestCase): COMM_BASE = MPI.COMM_WORLD # --- class MyGraphcomm(MPI.Graphcomm, MyBaseComm): def __new__(cls, comm=None): if comm is not None: if comm != MPI.COMM_NULL: index = list(range(0, comm.size+1)) edges = list(range(0, comm.size)) comm = comm.Create_graph(index, edges) return super(MyGraphcomm, cls).__new__(cls, comm) class BaseTestMyGraphcomm(BaseTestBaseComm): CommType = MyGraphcomm class TestMyGraphcommNULL(BaseTestMyGraphcomm, unittest.TestCase): COMM_BASE = MPI.COMM_NULL class TestMyGraphcommSELF(BaseTestMyGraphcomm, unittest.TestCase): COMM_BASE = MPI.COMM_SELF class TestMyGraphcommWORLD(BaseTestMyGraphcomm, unittest.TestCase): COMM_BASE = MPI.COMM_WORLD # --- class MyRequest(MPI.Request): def __new__(cls, request=None): return super(MyRequest, cls).__new__(cls, request) def test(self): return super(type(self), self).Test() def wait(self): return super(type(self), self).Wait() class MyPrequest(MPI.Prequest): def __new__(cls, request=None): return super(MyPrequest, cls).__new__(cls, request) def test(self): return super(type(self), self).Test() def wait(self): return super(type(self), self).Wait() def start(self): return super(type(self), self).Start() class MyGrequest(MPI.Grequest): def __new__(cls, request=None): return super(MyGrequest, cls).__new__(cls, request) def test(self): return super(type(self), self).Test() def wait(self): return super(type(self), self).Wait() class BaseTestMyRequest(object): def setUp(self): self.req = self.MyRequestType(MPI.REQUEST_NULL) def testSubType(self): self.assertTrue(type(self.req) is not self.MPIRequestType) self.assertTrue(isinstance(self.req, self.MPIRequestType)) self.assertTrue(isinstance(self.req, self.MyRequestType)) self.req.test() class TestMyRequest(BaseTestMyRequest, unittest.TestCase): MPIRequestType = MPI.Request MyRequestType = MyRequest class TestMyPrequest(BaseTestMyRequest, unittest.TestCase): MPIRequestType = MPI.Prequest MyRequestType = MyPrequest class TestMyGrequest(BaseTestMyRequest, unittest.TestCase): MPIRequestType = MPI.Grequest MyRequestType = MyGrequest class TestMyRequest2(TestMyRequest): def setUp(self): req = MPI.COMM_SELF.Isend( [MPI.BOTTOM, 0, MPI.BYTE], dest=MPI.PROC_NULL, tag=0) self.req = MyRequest(req) @unittest.skipMPI('mpich(==3.4.1)') class TestMyPrequest2(TestMyPrequest): def setUp(self): req = MPI.COMM_SELF.Send_init( [MPI.BOTTOM, 0, MPI.BYTE], dest=MPI.PROC_NULL, tag=0) self.req = MyPrequest(req) def tearDown(self): self.req.Free() def testStart(self): for i in range(5): self.req.start() self.req.test() self.req.start() self.req.wait() # --- class MyWin(MPI.Win): def __new__(cls, win=None): return MPI.Win.__new__(cls, win) def free(self): if self != MPI.WIN_NULL: MPI.Win.Free(self) class BaseTestMyWin(object): def setUp(self): w = MPI.Win.Create(MPI.BOTTOM) self.win = MyWin(w) def tearDown(self): self.win.free() def testSubType(self): self.assertTrue(type(self.win) is not MPI.Win) self.assertTrue(isinstance(self.win, MPI.Win)) self.assertTrue(isinstance(self.win, MyWin)) def testFree(self): self.assertTrue(self.win) self.win.free() self.assertFalse(self.win) class TestMyWin(BaseTestMyWin, unittest.TestCase): pass SpectrumMPI = MPI.get_vendor()[0] == 'Spectrum MPI' try: if SpectrumMPI: raise NotImplementedError MPI.Win.Create(MPI.BOTTOM).Free() except (NotImplementedError, MPI.Exception): unittest.disable(BaseTestMyWin, 'mpi-win') # --- import os, tempfile class MyFile(MPI.File): def __new__(cls, file=None): return MPI.File.__new__(cls, file) def close(self): if self != MPI.FILE_NULL: MPI.File.Close(self) class BaseTestMyFile(object): def openfile(self): fd, fname = tempfile.mkstemp(prefix='mpi4py') os.close(fd) amode = MPI.MODE_RDWR | MPI.MODE_CREATE | MPI.MODE_DELETE_ON_CLOSE try: self.file = MPI.File.Open(MPI.COMM_SELF, fname, amode, MPI.INFO_NULL) return self.file except Exception: os.remove(fname) raise def setUp(self): f = self.openfile() self.file = MyFile(f) def tearDown(self): self.file.close() def testSubType(self): self.assertTrue(type(self.file) is not MPI.File) self.assertTrue(isinstance(self.file, MPI.File)) self.assertTrue(isinstance(self.file, MyFile)) def testFree(self): self.assertTrue(self.file) self.file.close() self.assertFalse(self.file) class TestMyFile(BaseTestMyFile, unittest.TestCase): pass try: BaseTestMyFile().openfile().Close() except NotImplementedError: unittest.disable(BaseTestMyFile, 'mpi-file') if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_threads.py000066400000000000000000000040751460670727200170060ustar00rootroot00000000000000import sys try: import threading HAVE_THREADING = True except ImportError: import dummy_threading as threading HAVE_THREADING = False VERBOSE = False #VERBOSE = True import mpi4py.rc mpi4py.rc.thread_level = 'multiple' from mpi4py import MPI import mpiunittest as unittest pypy3_lt_50 = (hasattr(sys, 'pypy_version_info') and sys.version_info[0] == 3 and sys.pypy_version_info < (5, 0)) class TestMPIThreads(unittest.TestCase): def testThreadLevels(self): levels = [MPI.THREAD_SINGLE, MPI.THREAD_FUNNELED, MPI.THREAD_SERIALIZED, MPI.THREAD_MULTIPLE] for i in range(len(levels)-1): self.assertTrue(levels[i] < levels[i+1]) try: provided = MPI.Query_thread() self.assertTrue(provided in levels) except NotImplementedError: self.skipTest('mpi-query_thread') def testIsThreadMain(self): try: flag = MPI.Is_thread_main() except NotImplementedError: self.skipTest('mpi-is_thread_main') name = threading.current_thread().name main = (name == 'MainThread') or not HAVE_THREADING self.assertEqual(flag, main) if VERBOSE: log = lambda m: sys.stderr.write(m+'\n') log("%s: MPI.Is_thread_main() -> %s" % (name, flag)) @unittest.skipIf(pypy3_lt_50, 'pypy3(<5.0)') def testIsThreadMainInThread(self): try: provided = MPI.Query_thread() except NotImplementedError: self.skipTest('mpi-query_thread') self.testIsThreadMain() T = [threading.Thread(target=self.testIsThreadMain) for _ in range(5)] if provided == MPI.THREAD_MULTIPLE: for t in T: t.start() for t in T: t.join() elif provided == MPI.THREAD_SERIALIZED: for t in T: t.start() t.join() else: self.skipTest('mpi-thread_level') if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_util_dtlib.py000066400000000000000000000346771460670727200175220ustar00rootroot00000000000000from mpi4py import MPI from mpi4py.util.dtlib import from_numpy_dtype as fromnumpy from mpi4py.util.dtlib import to_numpy_dtype as tonumpy import sys, os import itertools try: import mpiunittest as unittest except ImportError: sys.path.append( os.path.abspath( os.path.dirname(__file__))) import mpiunittest as unittest try: import numpy np_dtype = numpy.dtype np_version = tuple(map(int, numpy.__version__.split('.', 2)[:2])) except ImportError: numpy = None np_dtype = None np_version = None typecodes = list("?cbhilqpBHILQfdgFDG") typecodes += ['b{:d}'.format(n) for n in (1,)] typecodes += ['i{:d}'.format(n) for n in (1,2,4,8)] typecodes += ['u{:d}'.format(n) for n in (1,2,4,8)] typecodes += ['f{:d}'.format(n) for n in (4,8)] np_lt_117 = (np_version and np_version < (1, 17)) if np_lt_117 or sys.version_info[0] == 2: typecodes.remove('L') if np_lt_117: typecodes.remove('F') typecodes.remove('D') typecodes.remove('G') name, version = MPI.get_vendor() mpich_lt_400 = (name == 'MPICH') and version < (4, 0, 0) if mpich_lt_400: typecodes = [t for t in typecodes if t not in 'FDG'] datatypes = [MPI._typedict[t] for t in typecodes] class TestUtilDTLib(unittest.TestCase): def check(self, arg, *args): if numpy is None: if isinstance(arg, MPI.Datatype): mt1 = arg.Dup() dt1 = tonumpy(mt1) mt1.Free() return if isinstance(arg, MPI.Datatype): mt1 = arg.Dup() dt1 = tonumpy(mt1) else: dt1 = np_dtype(arg, *args) mt1 = fromnumpy(dt1) dt2 = tonumpy(mt1) mt2 = fromnumpy(dt2) dt3 = tonumpy(mt2) mt3 = fromnumpy(dt3) try: self.assertEqual(dt1, dt2) self.assertEqual(dt2, dt3) if isinstance(arg, MPI.Datatype): if arg.combiner not in ( MPI.COMBINER_INDEXED, MPI.COMBINER_HINDEXED, MPI.COMBINER_INDEXED_BLOCK, MPI.COMBINER_HINDEXED_BLOCK, ): self.assertEqual(dt1.itemsize, mt1.extent) self.assertEqual(dt2.itemsize, mt2.extent) self.assertEqual(dt3.itemsize, mt3.extent) finally: mt1.Free() mt2.Free() mt3.Free() def testBasic(self): for spec in typecodes: with self.subTest(spec=spec): self.check(spec) for mpit in datatypes: with self.subTest(name=mpit.name): self.check(mpit) def testSubarray1(self): shapes = [(1,), (1, 1), (1, 1, 1), (3,), (3, 4), (2, 3, 4),] for dt, shape in itertools.product(typecodes, shapes): spec = "{}{}".format(shape, dt) with self.subTest(spec=spec): self.check(spec) def testSubarray2(self): shapes = [(1,), (1, 1), (1, 1, 1), (3,), (3, 4), (2, 3, 4),] orders = [MPI.ORDER_C, MPI.ORDER_FORTRAN] for mt, shape, order in itertools.product(datatypes, shapes, orders): with self.subTest(name=mt.name, shape=shape, order=order): starts = (0,) * len(shape) mt1 = mt.Create_subarray(shape, shape, starts, order) self.check(mt1) mt1.Free() @unittest.skipMPI('msmpi') def testStruct1(self): shapes = [(), (1,), (3,), (3, 5),] iter1 = itertools.product(shapes, typecodes) iter2 = itertools.product(shapes, typecodes) iterN = itertools.product(iter1, iter2) iterA = iter([False, True]) for nt, align in itertools.product(iterN, iterA): spec = "{}{},{}{}".format(*sum(nt, ())) with self.subTest(spec=spec, align=align): self.check(spec, align) @unittest.skipMPI('msmpi') def testStruct2(self): iter1 = iter(typecodes) iter2 = iter(typecodes) iter3 = iter(typecodes) iterN = itertools.product(iter1, iter2, iter3) iterA = iter([False, True]) for tp, align in itertools.product(iterN, iterA): spec = "{},{},{}".format(*tp) with self.subTest(spec=spec, align=align): self.check(spec, align) @unittest.skipMPI('msmpi') def testStruct3(self): blens = [1, 2, 3] disps = [1, 27, 71] types = [MPI.INT, MPI.DOUBLE, MPI.INT] mt1 = MPI.Datatype.Create_struct(blens, disps, types) mt2 = MPI.Datatype.Create_struct([1], [0], [mt1]) self.check(mt1) self.check(mt2) mt1.Free() mt2.Free() def makeStruct(self, dt, mt): dt = numpy.dtype(dt).str stp = numpy.dtype(",".join(['B', dt, 'B']), align=True) off = lambda i: stp.fields[stp.names[i]][1] blens = [1, 1, 1] disps = [0, off(1), off(2)] types = [MPI.BYTE, mt, MPI.BYTE] mtp = MPI.Datatype.Create_struct(blens, disps, types) return stp, mtp @unittest.skipMPI('msmpi') @unittest.skipIf(numpy is None, 'numpy') def testStruct4(self): for t in typecodes: with self.subTest(typecode=t): dt0 = np_dtype(t) mt0 = fromnumpy(dt0) stp, mt1 = self.makeStruct(t, mt0) ex1 = stp.itemsize for n, mt in ( (1, mt1), (1, mt1.Dup()), (1, mt1.Create_resized(0, 1*ex1)), (3, mt1.Create_resized(0, 3*ex1)), (3, mt1.Create_contiguous(3)), (5, mt1.Create_subarray([5], [5], [0])), (7, MPI.Datatype.Create_struct([7], [0], [mt1])), ): dt = tonumpy(mt) self.assertEqual(mt.extent, n*ex1) self.assertEqual(dt.itemsize, n*ex1) self.assertTrue(dt.isalignedstruct) self.check(mt) self.check(dt) if mt != mt1: mt.Free() mt0.Free() mt1.Free() def testVector(self): for mt in datatypes: with self.subTest(name=mt.name): mt1 = mt.Create_vector(3, 4, 6) mt2 = mt.Create_hvector(3, 4, 6*mt.extent) self.check(mt1) self.check(mt2) dt1 = tonumpy(mt1) dt2 = tonumpy(mt2) self.check(dt1) self.check(dt2) self.assertEqual(dt1, dt2) mt3 = mt1.Create_vector(2, 3, 4) mt4 = mt2.Create_hvector(2, 3, 4*mt2.extent) self.check(mt3) self.check(mt4) dt3 = tonumpy(mt3) dt4 = tonumpy(mt4) self.check(dt3) self.check(dt4) self.assertEqual(dt3, dt4) mt3.Free() mt4.Free() mt1.Free() mt2.Free() def testHVector(self): for mt in datatypes: with self.subTest(name=mt.name): mt1 = mt.Create_hvector(3, 4, 6*mt.extent+1) mt2 = mt1.Dup() self.check(mt1) self.check(mt2) dt1 = tonumpy(mt1) dt2 = tonumpy(mt2) self.check(dt1) self.check(dt2) self.assertEqual(dt1, dt2) mt3 = mt1.Create_hvector(2, 3, 4*mt1.extent+1) mt4 = mt2.Create_hvector(2, 3, 4*mt2.extent+1) self.check(mt3) self.check(mt4) dt3 = tonumpy(mt3) dt4 = tonumpy(mt4) self.check(dt3) self.check(dt4) self.assertEqual(dt3, dt4) mt3.Free() mt4.Free() mt1.Free() mt2.Free() def testIndexed(self): disps = [1, 6, 12] for mt in datatypes: with self.subTest(name=mt.name): mt1 = mt.Create_indexed([4]*3, disps) mt2 = mt.Create_indexed_block(4, disps) self.check(mt1) self.check(mt2) dt1 = tonumpy(mt1) dt2 = tonumpy(mt2) self.check(dt1) self.check(dt2) self.assertEqual(dt1, dt2) mt3 = mt1.Create_indexed([1], [0]) mt4 = mt2.Create_indexed_block(1, [0]) self.check(mt3) self.check(mt4) dt3 = tonumpy(mt3) dt4 = tonumpy(mt4) self.check(dt3) self.check(dt4) self.assertEqual(dt3, dt4) mt3.Free() mt4.Free() mt1.Free() mt2.Free() def testHIndexed(self): disps = [0, 6, 12] for mt in datatypes: with self.subTest(name=mt.name): mt1 = mt.Create_hindexed([4]*3, [d*mt.extent+1 for d in disps]) mt2 = mt.Create_hindexed_block(4, [d*mt.extent+1 for d in disps]) self.check(mt1) self.check(mt2) dt1 = tonumpy(mt1) dt2 = tonumpy(mt2) self.check(dt1) self.check(dt2) self.assertEqual(dt1, dt2) mt3 = mt1.Create_hindexed([1], [0]) mt4 = mt2.Create_hindexed_block(1, [0]) self.check(mt3) self.check(mt4) mt3.Free() mt4.Free() mt1.Free() mt2.Free() @unittest.skipMPI('msmpi') def testF77(self): mpif77types = [ MPI.CHARACTER, #MPI.LOGICAL, MPI.INTEGER, MPI.REAL, MPI.DOUBLE_PRECISION, MPI.COMPLEX, MPI.DOUBLE_COMPLEX, ] for mt in mpif77types: if mt == MPI.DATATYPE_NULL: continue if mt.Get_size() == 0: continue dt = tonumpy(mt) if np_dtype is not None: self.assertEqual(dt.itemsize, mt.extent) @unittest.skipMPI('msmpi') def testF90(self): mpif90types = ( MPI.INTEGER1, MPI.INTEGER2, MPI.INTEGER4, MPI.INTEGER8, MPI.INTEGER16, MPI.REAL4, MPI.REAL8, MPI.COMPLEX8, MPI.COMPLEX16, ) for mt in mpif90types: if mt == MPI.DATATYPE_NULL: continue if mt.Get_size() == 0: continue dt = tonumpy(mt) if np_dtype is not None: self.assertEqual(dt.itemsize, mt.extent) @unittest.skipMPI('msmpi') def testF90Integer(self): try: mt = MPI.Datatype.Create_f90_integer(1) if mt == MPI.DATATYPE_NULL or mt.Get_size() == 0: raise NotImplementedError except NotImplementedError: self.skipTest('mpi-type-create-f90-integer') for r in range(1, 19): with self.subTest(r=r): mt = MPI.Datatype.Create_f90_integer(r) dt = tonumpy(mt) if np_dtype is not None: self.assertEqual(dt.kind, 'i') self.assertEqual(dt.itemsize, mt.extent) tstr = 'i{}'.format(mt.Get_size()) stp, mtp = self.makeStruct(tstr, mt) self.assertEqual(stp.itemsize, mtp.extent) self.check(mtp) mtp.Free() @unittest.skipMPI('msmpi') def testF90Real(self): try: mt = MPI.Datatype.Create_f90_real(7, MPI.UNDEFINED) if mt == MPI.DATATYPE_NULL or mt.Get_size() == 0: raise NotImplementedError except NotImplementedError: self.skipTest('mpi-type-create-f90-real') for p in (6, 7, 14, 15): with self.subTest(p=p): mt = MPI.Datatype.Create_f90_real(p, MPI.UNDEFINED) dt = tonumpy(mt) if np_dtype is not None: self.assertEqual(dt.kind, 'f') self.assertEqual(dt.itemsize, mt.extent) tstr = 'f{}'.format(mt.Get_size()) stp, mtp = self.makeStruct(tstr, mt) self.assertEqual(stp.itemsize, mtp.extent) self.check(mtp) mtp.Free() @unittest.skipMPI('msmpi') def testF90Complex(self): try: mt = MPI.Datatype.Create_f90_complex(7, MPI.UNDEFINED) if mt == MPI.DATATYPE_NULL or mt.Get_size() == 0: raise NotImplementedError except NotImplementedError: self.skipTest('mpi-type-create-f90-complex') for p in (6, 7, 14, 15): with self.subTest(p=p): mt = MPI.Datatype.Create_f90_complex(p, MPI.UNDEFINED) dt = tonumpy(mt) if np_dtype is not None: self.assertEqual(dt.kind, 'c') self.assertEqual(dt.itemsize, mt.extent) @unittest.skipMPI('msmpi') def testCoverage(self): from mpi4py.util import dtlib mpitypes = ( MPI.LOGICAL, ) for mt in mpitypes: if mt == MPI.DATATYPE_NULL: continue if mt.Get_size() == 0: continue dtlib._get_alignment(mt) def testAlignment(self): from mpi4py.util import dtlib complexcodes = ['c{}'.format(n) for n in (8, 16)] for t in typecodes + complexcodes: with self.subTest(typecode=t): alignment1 = dtlib._get_alignment_ctypes(t) if np_dtype is not None: alignment2 = numpy.dtype(t).alignment self.assertTrue(alignment1, alignment2) @unittest.skipIf(numpy is None, 'numpy') def testFailures(self): endian = '>' if np_dtype(' 0) comm.barrier() statuses = (MPI.Status(),) self.RequestType.waitall(requests1, statuses) self.assertEqual(statuses[0].error, 0) def testSendrecv(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages: rmess = self.COMM.sendrecv(smess, MPI.PROC_NULL, 0, None, MPI.PROC_NULL, 0) self.assertEqual(rmess, None) if isinstance(self.COMM, pkl5.Comm): rbuf = MPI.Alloc_mem(32) else: rbuf = None for smess in messages: dest = (rank + 1) % size source = (rank - 1) % size rmess = self.COMM.sendrecv(None, dest, 0, None, source, 0) self.assertEqual(rmess, None) rmess = self.COMM.sendrecv(smess, dest, 0, None, source, 0) self.assertEqual(rmess, smess) status = MPI.Status() rmess = self.COMM.sendrecv(smess, dest, 42, rbuf, source, 42, status) self.assertEqual(status.source, source) self.assertEqual(status.tag, 42) self.assertEqual(status.error, 0) if rbuf is not None: MPI.Free_mem(rbuf) def testPingPong01(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() for smess in messages: self.COMM.send(smess, MPI.PROC_NULL) rmess = self.COMM.recv(None, MPI.PROC_NULL, 0) self.assertEqual(rmess, None) if size == 1: return smess = None if rank == 0: self.COMM.send(smess, rank+1, 0) rmess = self.COMM.recv(None, rank+1, 0) elif rank == 1: rmess = self.COMM.recv(None, rank-1, 0) self.COMM.send(smess, rank-1, 0) else: rmess = smess self.assertEqual(rmess, smess) for smess in messages: if rank == 0: self.COMM.send(smess, rank+1, 0) rmess = self.COMM.recv(None, rank+1, 0) elif rank == 1: rmess = self.COMM.recv(None, rank-1, 0) self.COMM.send(smess, rank-1, 0) else: rmess = smess self.assertEqual(rmess, smess) def testIrecv(self): if isinstance(self.COMM, pkl5.Comm): self.assertRaises( RuntimeError, self.COMM.irecv, None, MPI.PROC_NULL, 0, ) def testProbe(self): comm = self.COMM.Dup() try: status = MPI.Status() flag = comm.iprobe(MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertFalse(flag) for smess in messages: request = comm.issend(smess, comm.rank, 123) self.assertTrue(isinstance(request, self.RequestType)) self.assertTrue(request != MPI.REQUEST_NULL) self.assertFalse(request == MPI.REQUEST_NULL) self.assertTrue(request == self.RequestType(request)) self.assertFalse(request != self.RequestType(request)) self.assertTrue(request != None) self.assertFalse(request == None) self.assertTrue(request) while not comm.iprobe(MPI.ANY_SOURCE, MPI.ANY_TAG, status): pass self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) comm.probe(MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) self.assertTrue(request) flag, obj = request.test() self.assertTrue(request) self.assertFalse(flag) self.assertEqual(obj, None) obj = comm.recv(None, comm.rank, 123) self.assertEqual(obj, smess) self.assertTrue(request) obj = request.wait() self.assertFalse(request) self.assertEqual(obj, None) finally: comm.Free() def testMProbe(self): comm = self.COMM.Dup() try: message = comm.mprobe(MPI.PROC_NULL) self.assertTrue(isinstance(message, self.MessageType)) self.assertTrue(message == MPI.MESSAGE_NO_PROC) self.assertFalse(message != MPI.MESSAGE_NO_PROC) self.assertTrue(message != None) self.assertFalse(message == None) rmess = message.recv() self.assertTrue(message == MPI.MESSAGE_NULL) self.assertFalse(message != MPI.MESSAGE_NULL) self.assertTrue(rmess is None) message = comm.mprobe(MPI.PROC_NULL) self.assertTrue(isinstance(message, self.MessageType)) self.assertTrue(message == MPI.MESSAGE_NO_PROC) self.assertFalse(message != MPI.MESSAGE_NO_PROC) request = message.irecv() self.assertTrue(message == MPI.MESSAGE_NULL) self.assertFalse(message != MPI.MESSAGE_NULL) self.assertTrue(request != MPI.REQUEST_NULL) self.assertFalse(request == MPI.REQUEST_NULL) rmess = request.wait() self.assertTrue(request == MPI.REQUEST_NULL) self.assertFalse(request != MPI.REQUEST_NULL) self.assertTrue(rmess is None) for smess in messages: request = comm.issend(smess, comm.rank, 123) message = comm.mprobe(comm.rank, 123) self.assertTrue(isinstance(message, self.MessageType)) self.assertTrue(message == self.MessageType(message)) self.assertFalse(message != self.MessageType(message)) rmess = message.recv() self.assertEqual(rmess, smess) obj = request.wait() self.assertFalse(request) self.assertTrue(obj is None) flag, obj = request.test() self.assertTrue(flag) self.assertTrue(obj is None) for smess in messages: request = comm.issend(smess, comm.rank, 123) status = MPI.Status() message = comm.mprobe(MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) self.assertTrue(message) status = MPI.Status() rmess = message.recv(status) self.assertFalse(message) self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) self.assertEqual(rmess, smess) self.assertTrue(request) request.wait() for smess in messages: request = comm.issend(smess, comm.rank, 123) status = MPI.Status() message = comm.mprobe(MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) self.assertTrue(message) rreq = message.irecv() self.assertFalse(message) self.assertTrue(rreq) status = MPI.Status() rmess = rreq.wait(status) self.assertFalse(rreq) self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) self.assertEqual(rmess, smess) flag, obj = rreq.test() self.assertTrue(flag) self.assertTrue(obj is None) self.assertTrue(request) obj = request.wait() self.assertFalse(request) self.assertTrue(obj is None) flag, obj = request.test() self.assertTrue(flag) self.assertTrue(obj is None) for smess in messages: request = comm.issend(smess, comm.rank, 123) message = comm.mprobe(MPI.ANY_SOURCE, MPI.ANY_TAG) rreq = message.irecv() rreq.test() request.Free() finally: comm.Free() def testIMProbe(self): comm = self.COMM.Dup() try: status = MPI.Status() for smess in messages: message = comm.improbe(MPI.PROC_NULL) self.assertTrue(isinstance(message, self.MessageType)) self.assertEqual(message, MPI.MESSAGE_NO_PROC) for smess in messages: message = comm.improbe(comm.rank, 123) self.assertEqual(message, None) request = comm.issend(smess, comm.rank, 123) while not comm.iprobe(comm.rank, 123): pass message = comm.improbe(comm.rank, 123) self.assertTrue(isinstance(message, self.MessageType)) rmess = message.recv() self.assertEqual(rmess, smess) request.wait() for smess in messages: message = comm.improbe(comm.rank, 123) self.assertEqual(message, None) request = comm.issend(smess, comm.rank, 123) while not comm.iprobe(comm.rank, 123): pass message = comm.improbe(MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) self.assertTrue(message) rmess = message.recv() self.assertFalse(message) self.assertEqual(rmess, smess) self.assertTrue(request) request.wait() self.assertFalse(request) finally: comm.Free() def testMessageProbeIProbe(self): comm = self.COMM.Dup() try: status = MPI.Status() for smess in messages: request = comm.issend(smess, comm.rank, 123) message = self.MessageType.probe(comm, MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) self.assertTrue(message) rmess = message.recv() self.assertFalse(message) self.assertEqual(rmess, smess) self.assertTrue(request) request.wait() self.assertFalse(request) for smess in messages: message = self.MessageType.iprobe(comm, comm.rank, 123) self.assertEqual(message, None) request = comm.issend(smess, comm.rank, 123) while not comm.iprobe(comm.rank, 123): pass message = self.MessageType.iprobe(comm, MPI.ANY_SOURCE, MPI.ANY_TAG, status) self.assertEqual(status.source, comm.rank) self.assertEqual(status.tag, 123) self.assertTrue(message) rmess = message.recv() self.assertFalse(message) self.assertEqual(rmess, smess) self.assertTrue(request) request.wait() self.assertFalse(request) finally: comm.Free() def testSSendAndMProbe(self): size = self.COMM.Get_size() rank = self.COMM.Get_rank() if size == 1: return comm = self.COMM.Dup() try: for smess in messages: if rank == 0: comm.ssend(smess, 1) message = comm.mprobe(1) rmess = message.recv() self.assertEqual(rmess, smess) if rank == 1: message = comm.mprobe(0) rmess = message.recv() comm.ssend(rmess, 0) self.assertEqual(rmess, smess) finally: comm.Free() def testRequest(self): req = self.RequestType() self.assertFalse(req) self.assertEqual(req, self.RequestType()) req = self.RequestType(MPI.REQUEST_NULL) self.assertFalse(req) self.assertEqual(req, MPI.REQUEST_NULL) self.assertEqual(req, self.RequestType()) def testMessage(self): msg = self.MessageType() self.assertFalse(msg) self.assertEqual(msg, self.MessageType()) msg = self.MessageType(MPI.MESSAGE_NULL) self.assertFalse(msg) self.assertEqual(msg, self.MessageType()) msg = self.MessageType(MPI.MESSAGE_NO_PROC) self.assertTrue(msg) self.assertEqual(msg, MPI.MESSAGE_NO_PROC) self.assertEqual(msg, self.MessageType(MPI.MESSAGE_NO_PROC)) self.assertNotEqual(msg, MPI.MESSAGE_NULL) def testBcastIntra(self, msglist=None, check=None): comm = self.COMM size = comm.Get_size() for smess in (msglist or messages): for root in range(size): rmess = comm.bcast(smess, root) if msglist and check: self.assertTrue(check(rmess)) else: self.assertEqual(rmess, smess) def testBcastInter(self, msglist=None, check=None): basecomm = self.COMM size = basecomm.Get_size() rank = basecomm.Get_rank() if size == 1: return if rank < size // 2 : COLOR = 0 local_leader = 0 remote_leader = size // 2 else: COLOR = 1 local_leader = 0 remote_leader = 0 basecomm.Barrier() intracomm = basecomm.Split(COLOR, key=0) intercomm = MPI.Intracomm.Create_intercomm( intracomm, local_leader, basecomm, remote_leader ) intracomm.Free() if isinstance(basecomm, pkl5.Intracomm): intercomm = pkl5.Intercomm(intercomm) rank = intercomm.Get_rank() size = intercomm.Get_size() rsize = intercomm.Get_remote_size() for smess in (msglist or messages)[:1]: intercomm.barrier() for color in [0, 1]: if COLOR == color: for root in range(size): if root == rank: rmess = intercomm.bcast(smess, root=MPI.ROOT) else: rmess = intercomm.bcast(None, root=MPI.PROC_NULL) self.assertEqual(rmess, None) else: for root in range(rsize): rmess = intercomm.bcast(None, root=root) if msglist and check: self.assertTrue(check(rmess)) else: self.assertEqual(rmess, smess) if isinstance(intercomm, pkl5.Intercomm): bcast = intercomm.bcast rsize = intercomm.Get_remote_size() self.assertRaises(MPI.Exception, bcast, None, root=rsize) intercomm.Free() @unittest.skipIf(numpy is None, 'numpy') def testBigMPI(self): comm = self.COMM size = comm.Get_size() rank = comm.Get_rank() dest = (rank + 1) % size source = (rank - 1) % size bigmpi = self.bigmpi blocksizes = ( 63, 64, 65, (1<<12)-1, (1<<12), (1<<12)+1, ) for blocksize in blocksizes: bigmpi.blocksize = blocksize a = numpy.empty(1024, dtype='i') b = numpy.empty(1024, dtype='i') c = numpy.empty(1024, dtype='i') a.fill(rank) b.fill(dest) c.fill(42) status = MPI.Status() smess = (a, b) rmess = comm.sendrecv( smess, dest, 42, None, source, 42, status, ) self.assertTrue(numpy.all(rmess[0] == source)) self.assertTrue(numpy.all(rmess[1] == rank)) self.assertTrue(status.Get_elements(MPI.BYTE) > 0) comm.barrier() status = MPI.Status() smess = (a, b) request = comm.issend(smess, dest, 123) rmess = comm.mprobe(source, 123).irecv().wait(status) self.assertTrue(numpy.all(rmess[0] == source)) self.assertTrue(numpy.all(rmess[1] == rank)) self.assertTrue(status.Get_elements(MPI.BYTE) > 0) request.Free() comm.barrier() check = lambda x: numpy.all(x == 42) self.testBcastIntra([c, c], check) self.testBcastInter([c, c], check) check2 = lambda x: check(x[0]) and check(x[1]) self.testBcastIntra([(c, c.copy())], check2) self.testBcastInter([(c, c.copy())], check2) class BaseTestPKL5(object): CommType = pkl5.Intracomm MessageType = pkl5.Message RequestType = pkl5.Request def setUp(self): super(BaseTestPKL5, self).setUp() self.pickle_prev = pkl5.pickle self.pickle = pkl5.Pickle() self.pickle.THRESHOLD = 0 pkl5.pickle = self.pickle def tearDown(self): super(BaseTestPKL5, self).tearDown() pkl5.pickle = self.pickle_prev @unittest.skipIf(numpy is None, 'numpy') def testPickle5(self): comm = self.COMM rank = comm.Get_rank() pickle = self.pickle protocols = list(range(-2, pickle.PROTOCOL+1)) for protocol in [None] + protocols: pickle.PROTOCOL = protocol for threshold in (-1, 0, 64, 256): pickle.THRESHOLD = threshold for slen in (0, 32, 64, 128, 256, 512): sobj = numpy.empty(slen, dtype='i') sobj.fill(rank) # robj = comm.sendrecv( sobj, rank, 42, None, rank, 42) self.assertTrue(numpy.all(sobj==robj)) # data, bufs = pickle.dumps(sobj) robj = pickle.loads(data, bufs) self.assertTrue(numpy.all(sobj==robj)) if protocol is None: protocol = MPI.Pickle().PROTOCOL if protocol < 0: protocol = pkl5._PROTOCOL if protocol >= 5 and sobj.nbytes >= threshold: self.assertEqual(len(bufs), 1) else: self.assertEqual(len(bufs), 0) class TestMPISelf(BaseTest, unittest.TestCase): COMM = MPI.COMM_SELF class TestMPIWorld(BaseTest, unittest.TestCase): COMM = MPI.COMM_WORLD class TestPKL5Self(BaseTestPKL5, TestMPISelf): pass class TestPKL5World(BaseTestPKL5, TestMPIWorld): pass if __name__ == '__main__': unittest.main() mpi4py-3.1.6/test/test_win.py000066400000000000000000000206271460670727200161520ustar00rootroot00000000000000from mpi4py import MPI import mpiunittest as unittest import sys try: sys.getrefcount except AttributeError: class getrefcount(object): def __init__(self, arg): pass def __eq__(self, other): return True def __add__(self, other): return self def __sub__(self, other): return self def memzero(m): try: m[:] = 0 except IndexError: # cffi buffer m[0:len(m)] = b'\0'*len(m) class BaseTestWin(object): COMM = MPI.COMM_NULL INFO = MPI.INFO_NULL CREATE_FLAVOR = MPI.UNDEFINED def testGetAttr(self): base = MPI.Get_address(self.memory) size = len(self.memory) unit = 1 self.assertEqual(size, self.WIN.Get_attr(MPI.WIN_SIZE)) self.assertEqual(unit, self.WIN.Get_attr(MPI.WIN_DISP_UNIT)) self.assertEqual(base, self.WIN.Get_attr(MPI.WIN_BASE)) def testMemory(self): memory = self.WIN.tomemory() pointer = MPI.Get_address(memory) length = len(memory) base, size, dunit = self.WIN.attrs self.assertEqual(size, length) self.assertEqual(dunit, 1) self.assertEqual(base, pointer) def testAttributes(self): base, size, unit = self.WIN.attrs self.assertEqual(base, MPI.Get_address(self.memory)) self.assertEqual(size, len(self.memory)) self.assertEqual(unit, 1) def testGetGroup(self): cgroup = self.COMM.Get_group() wgroup = self.WIN.Get_group() grpcmp = MPI.Group.Compare(cgroup, wgroup) cgroup.Free() wgroup.Free() self.assertEqual(grpcmp, MPI.IDENT) def testGetSetInfo(self): #info = MPI.INFO_NULL #self.WIN.Set_info(info) info = MPI.Info.Create() self.WIN.Set_info(info) info.Free() info = self.WIN.Get_info() self.WIN.Set_info(info) info.Free() def testGetSetErrhandler(self): for ERRHANDLER in [MPI.ERRORS_ARE_FATAL, MPI.ERRORS_RETURN, MPI.ERRORS_ARE_FATAL, MPI.ERRORS_RETURN,]: errhdl_1 = self.WIN.Get_errhandler() self.assertNotEqual(errhdl_1, MPI.ERRHANDLER_NULL) self.WIN.Set_errhandler(ERRHANDLER) errhdl_2 = self.WIN.Get_errhandler() self.assertEqual(errhdl_2, ERRHANDLER) errhdl_2.Free() self.assertEqual(errhdl_2, MPI.ERRHANDLER_NULL) self.WIN.Set_errhandler(errhdl_1) errhdl_1.Free() self.assertEqual(errhdl_1, MPI.ERRHANDLER_NULL) def testGetSetName(self): try: name = self.WIN.Get_name() self.WIN.Set_name('mywin') self.assertEqual(self.WIN.Get_name(), 'mywin') self.WIN.Set_name(name) self.assertEqual(self.WIN.Get_name(), name) except NotImplementedError: self.skipTest('mpi-win-name') @unittest.skipIf(MPI.WIN_CREATE_FLAVOR == MPI.KEYVAL_INVALID, 'mpi-win-flavor') def testCreateFlavor(self): flavors = (MPI.WIN_FLAVOR_CREATE, MPI.WIN_FLAVOR_ALLOCATE, MPI.WIN_FLAVOR_DYNAMIC, MPI.WIN_FLAVOR_SHARED,) flavor = self.WIN.Get_attr(MPI.WIN_CREATE_FLAVOR) self.assertTrue (flavor in flavors) self.assertEqual(flavor, self.WIN.flavor) self.assertEqual(flavor, self.CREATE_FLAVOR) @unittest.skipIf(MPI.WIN_MODEL == MPI.KEYVAL_INVALID, 'mpi-win-model') def testMemoryModel(self): models = (MPI.WIN_SEPARATE, MPI.WIN_UNIFIED) model = self.WIN.Get_attr(MPI.WIN_MODEL) self.assertTrue(model in models) self.assertEqual(model, self.WIN.model) class BaseTestWinCreate(BaseTestWin): CREATE_FLAVOR = MPI.WIN_FLAVOR_CREATE def setUp(self): self.memory = MPI.Alloc_mem(10) memzero(self.memory) self.WIN = MPI.Win.Create(self.memory, 1, self.INFO, self.COMM) def tearDown(self): self.WIN.Free() MPI.Free_mem(self.memory) class BaseTestWinAllocate(BaseTestWin): CREATE_FLAVOR = MPI.WIN_FLAVOR_ALLOCATE def setUp(self): self.WIN = MPI.Win.Allocate(10, 1, self.INFO, self.COMM) self.memory = self.WIN.tomemory() memzero(self.memory) def tearDown(self): self.WIN.Free() class BaseTestWinAllocateShared(BaseTestWin): CREATE_FLAVOR = MPI.WIN_FLAVOR_SHARED def setUp(self): self.WIN = MPI.Win.Allocate_shared(10, 1, self.INFO, self.COMM) self.memory = self.WIN.tomemory() memzero(self.memory) def tearDown(self): self.WIN.Free() def testSharedQuery(self): memory = self.WIN.tomemory() address = MPI.Get_address(memory) length = len(memory) memories = self.COMM.allgather((address, length)) rank = self.COMM.Get_rank() size = self.COMM.Get_size() for i in range(size): mem, disp = self.WIN.Shared_query(rank) base = MPI.Get_address(mem) size = len(mem) if i == rank: self.assertEqual(base, memories[i][0]) self.assertEqual(size, memories[i][1]) self.assertEqual(disp, 1) class BaseTestWinCreateDynamic(BaseTestWin): CREATE_FLAVOR = MPI.WIN_FLAVOR_DYNAMIC def setUp(self): self.WIN = MPI.Win.Create_dynamic(self.INFO, self.COMM) def tearDown(self): self.WIN.Free() def testGetAttr(self): base = self.WIN.Get_attr(MPI.WIN_BASE) size = self.WIN.Get_attr(MPI.WIN_SIZE) self.assertEqual(base, 0) self.assertEqual(size, 0) def testMemory(self): memory = self.WIN.tomemory() base = MPI.Get_address(memory) size = len(memory) self.assertEqual(base, 0) self.assertEqual(size, 0) def testAttributes(self): base, size, _ = self.WIN.attrs self.assertEqual(base, 0) self.assertEqual(size, 0) @unittest.skipMPI('msmpi(<9.1.0)') def testAttachDetach(self): mem1 = MPI.Alloc_mem(8) mem2 = MPI.Alloc_mem(16) mem3 = MPI.Alloc_mem(32) for mem in (mem1, mem2, mem3): self.WIN.Attach(mem) self.testMemory() self.WIN.Detach(mem) for mem in (mem1, mem2, mem3): self.WIN.Attach(mem) self.testMemory() for mem in (mem1, mem2, mem3): self.WIN.Detach(mem) for mem in (mem1, mem2, mem3): self.WIN.Attach(mem) self.testMemory() for mem in (mem3, mem2, mem1): self.WIN.Detach(mem) MPI.Free_mem(mem1) MPI.Free_mem(mem2) MPI.Free_mem(mem3) class TestWinCreateSelf(BaseTestWinCreate, unittest.TestCase): COMM = MPI.COMM_SELF @unittest.skipMPI('openmpi(<1.4.0)') class TestWinCreateWorld(BaseTestWinCreate, unittest.TestCase): COMM = MPI.COMM_WORLD class TestWinAllocateSelf(BaseTestWinAllocate, unittest.TestCase): COMM = MPI.COMM_SELF @unittest.skipMPI('openmpi(<1.4.0)') class TestWinAllocateWorld(BaseTestWinAllocate, unittest.TestCase): COMM = MPI.COMM_WORLD class TestWinAllocateSharedSelf(BaseTestWinAllocateShared, unittest.TestCase): COMM = MPI.COMM_SELF class TestWinAllocateSharedWorld(BaseTestWinAllocateShared, unittest.TestCase): COMM = MPI.COMM_WORLD class TestWinCreateDynamicSelf(BaseTestWinCreateDynamic, unittest.TestCase): COMM = MPI.COMM_SELF class TestWinCreateDynamicWorld(BaseTestWinCreateDynamic, unittest.TestCase): COMM = MPI.COMM_WORLD SpectrumMPI = MPI.get_vendor()[0] == 'Spectrum MPI' try: if SpectrumMPI: raise NotImplementedError MPI.Win.Create(MPI.BOTTOM, 1, MPI.INFO_NULL, MPI.COMM_SELF).Free() except (NotImplementedError, MPI.Exception): unittest.disable(BaseTestWinCreate, 'mpi-win-create') try: if SpectrumMPI: raise NotImplementedError MPI.Win.Allocate(1, 1, MPI.INFO_NULL, MPI.COMM_SELF).Free() except (NotImplementedError, MPI.Exception): unittest.disable(BaseTestWinAllocate, 'mpi-win-allocate') try: if SpectrumMPI: raise NotImplementedError MPI.Win.Allocate_shared(1, 1, MPI.INFO_NULL, MPI.COMM_SELF).Free() except (NotImplementedError, MPI.Exception): unittest.disable(BaseTestWinAllocateShared, 'mpi-win-shared') try: if SpectrumMPI: raise NotImplementedError MPI.Win.Create_dynamic(MPI.INFO_NULL, MPI.COMM_SELF).Free() except (NotImplementedError, MPI.Exception): unittest.disable(BaseTestWinCreateDynamic, 'mpi-win-dynamic') if __name__ == '__main__': unittest.main() mpi4py-3.1.6/tox.ini000066400000000000000000000071061460670727200142750ustar00rootroot00000000000000# Tox (https://tox.readthedocs.io) is a tool for running tests # in multiple virtualenvs. This configuration file will run the # test suite on all supported python versions. To use it, # "pip install tox" and then run "tox" from this directory. [tox] minversion = 3.18.0 requires = virtualenv<20.22 skip_missing_interpreters=True envlist = py27, py35, py36, py37, py38, py39, py310, py311, py312, pypy2.7 pypy3.7 pypy3.8 pypy3.9 pypy3.10 [mpi] mpicc = {env:MPICC:mpicc} mpicxx = {env:MPICXX:mpicxx} mpiexec = {env:MPIEXEC:mpiexec} [testenv] deps = numpy; python_version < '3.12' setuptools; python_version >= '3.12' allowlist_externals = {[mpi]mpiexec} commands = {[mpi]mpiexec} -n 1 {envpython} -m mpi4py --version {[mpi]mpiexec} -n 5 {envpython} -m mpi4py -m mpi4py.bench helloworld {[mpi]mpiexec} -n 5 {envpython} -m mpi4py -m mpi4py.bench ringtest {[mpi]mpiexec} -n 1 {envpython} {toxinidir}/test/runtests.py --no-builddir -q -e spawn [] {[mpi]mpiexec} -n 5 {envpython} {toxinidir}/test/runtests.py --no-builddir -q -e spawn [] {[mpi]mpiexec} -n 1 {envpython} -m mpi4py.futures {toxinidir}/demo/futures/test_futures.py -q [] {[mpi]mpiexec} -n 5 {envpython} -m mpi4py.futures {toxinidir}/demo/futures/test_futures.py -q [] [testenv:lint] labels = lint deps = pycodestyle pydocstyle flake8 pylint mypy setenv = MPICFG=nompi CFLAGS=-O0 commands = pycodestyle {toxinidir}/src/mpi4py pydocstyle {toxinidir}/src/mpi4py flake8 {toxinidir}/src/mpi4py pylint mpi4py stubtest mpi4py \ --mypy-config-file={toxinidir}/conf/mypy.ini \ --allowlist={toxinidir}/conf/mypy.stubtest.allow.txt mypy -p mpi4py \ --config-file={toxinidir}/conf/mypy.ini [cmd] rst2html5 = rst2html5.py --config=conf/docutils.conf [testenv:docs] labels = dist,docs,sphinx deps = -r{toxinidir}/conf/requirements-docs.txt allowlist_externals = rm,mv setenv = MPICFG=nompi CFLAGS=-O0 TOPDIR= DOCDIR=docs/ SRCDIR=docs/source/usrman/ BLDDIR=build/ OUTDIR=docs/ LATEXMKOPTS=-quiet commands_pre = rm -rf {env:OUTDIR}usrman commands = {envpython} -m pip uninstall --yes sphinx-rtd-theme {[cmd]rst2html5} {env:TOPDIR}LICENSE.rst {env:OUTDIR}LICENSE.html {[cmd]rst2html5} {env:TOPDIR}CHANGES.rst {env:OUTDIR}CHANGES.html {[cmd]rst2html5} {env:DOCDIR}index.rst {env:OUTDIR}index.html sphinx-build -M html {env:SRCDIR} {env:BLDDIR} -q -W -j auto sphinx-build -M man {env:SRCDIR} {env:BLDDIR} -q -W -j auto sphinx-build -M info {env:SRCDIR} {env:BLDDIR} -q -W -j auto sphinx-build -M latexpdf {env:SRCDIR} {env:BLDDIR} -q -W -j auto rm {env:BLDDIR}html/.buildinfo mv {env:BLDDIR}html {env:OUTDIR}usrman mv {env:BLDDIR}man/mpi4py.1 {env:OUTDIR} mv {env:BLDDIR}texinfo/mpi4py.info {env:OUTDIR} mv {env:BLDDIR}latex/mpi4py.pdf {env:OUTDIR} [testenv:epydoc] labels = dist,docs,epydoc basepython = python2.7 deps = epydoc docutils allowlist_externals = rm,mv setenv = MPICFG=nompi CFLAGS=-O0 commands_pre = rm -rf docs/apiref commands = {envpython} ./conf/epydocify.py --html -o docs/apiref [testenv:sdist] labels = dist,sdist deps = skip_install = true depends = docs epydoc allowlist_externals = test commands_pre = test -f docs/LICENSE.html test -f docs/CHANGES.html test -f docs/index.html test -d docs/usrman test -f docs/mpi4py.1 test -f docs/mpi4py.info test -f docs/mpi4py.pdf test -d docs/apiref commands = {envpython} setup.py -q sdist